]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.4.1-201206091540.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.4.1-201206091540.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b4a898f..6c0106a 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 @@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41 +PERF*
42 SCCS
43 System.map*
44 TAGS
45 @@ -92,19 +98,24 @@ bounds.h
46 bsetup
47 btfixupprep
48 build
49 +builtin-policy.h
50 bvmlinux
51 bzImage*
52 capability_names.h
53 capflags.c
54 classlist.h*
55 +clut_vga16.c
56 +common-cmds.h
57 comp*.log
58 compile.h*
59 conf
60 config
61 config-*
62 config_data.h*
63 +config.c
64 config.mak
65 config.mak.autogen
66 +config.tmp
67 conmakehash
68 consolemap_deftbl.c*
69 cpustr.h
70 @@ -115,9 +126,11 @@ devlist.h*
71 dnotify_test
72 docproc
73 dslm
74 +dtc-lexer.lex.c
75 elf2ecoff
76 elfconfig.h*
77 evergreen_reg_safe.h
78 +exception_policy.conf
79 fixdep
80 flask.h
81 fore200e_mkfirm
82 @@ -125,12 +138,15 @@ fore200e_pca_fw.c*
83 gconf
84 gconf.glade.h
85 gen-devlist
86 +gen-kdb_cmds.c
87 gen_crc32table
88 gen_init_cpio
89 generated
90 genheaders
91 genksyms
92 *_gray256.c
93 +hash
94 +hid-example
95 hpet_example
96 hugepage-mmap
97 hugepage-shm
98 @@ -145,7 +161,7 @@ int32.c
99 int4.c
100 int8.c
101 kallsyms
102 -kconfig
103 +kern_constants.h
104 keywords.c
105 ksym.c*
106 ksym.h*
107 @@ -153,7 +169,7 @@ kxgettext
108 lkc_defs.h
109 lex.c
110 lex.*.c
111 -linux
112 +lib1funcs.S
113 logo_*.c
114 logo_*_clut224.c
115 logo_*_mono.c
116 @@ -164,14 +180,15 @@ machtypes.h
117 map
118 map_hugetlb
119 maui_boot.h
120 -media
121 mconf
122 +mdp
123 miboot*
124 mk_elfconfig
125 mkboot
126 mkbugboot
127 mkcpustr
128 mkdep
129 +mkpiggy
130 mkprep
131 mkregtable
132 mktables
133 @@ -188,6 +205,7 @@ oui.c*
134 page-types
135 parse.c
136 parse.h
137 +parse-events*
138 patches*
139 pca200e.bin
140 pca200e_ecd.bin2
141 @@ -197,6 +215,7 @@ perf-archive
142 piggyback
143 piggy.gzip
144 piggy.S
145 +pmu-*
146 pnmtologo
147 ppc_defs.h*
148 pss_boot.h
149 @@ -207,6 +226,7 @@ r300_reg_safe.h
150 r420_reg_safe.h
151 r600_reg_safe.h
152 recordmcount
153 +regdb.c
154 relocs
155 rlim_names.h
156 rn50_reg_safe.h
157 @@ -217,6 +237,7 @@ setup
158 setup.bin
159 setup.elf
160 sImage
161 +slabinfo
162 sm_tbl*
163 split-include
164 syscalltab.h
165 @@ -227,6 +248,7 @@ tftpboot.img
166 timeconst.h
167 times.h*
168 trix_boot.h
169 +user_constants.h
170 utsrelease.h*
171 vdso-syms.lds
172 vdso.lds
173 @@ -238,13 +260,17 @@ vdso32.lds
174 vdso32.so.dbg
175 vdso64.lds
176 vdso64.so.dbg
177 +vdsox32.lds
178 +vdsox32-syms.lds
179 version.h*
180 vmImage
181 vmlinux
182 vmlinux-*
183 vmlinux.aout
184 vmlinux.bin.all
185 +vmlinux.bin.bz2
186 vmlinux.lds
187 +vmlinux.relocs
188 vmlinuz
189 voffset.h
190 vsyscall.lds
191 @@ -252,9 +278,11 @@ vsyscall_32.lds
192 wanxlfw.inc
193 uImage
194 unifdef
195 +utsrelease.h
196 wakeup.bin
197 wakeup.elf
198 wakeup.lds
199 zImage*
200 zconf.hash.c
201 +zconf.lex.c
202 zoffset.h
203 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
204 index c1601e5..08557ce 100644
205 --- a/Documentation/kernel-parameters.txt
206 +++ b/Documentation/kernel-parameters.txt
207 @@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
208 the specified number of seconds. This is to be used if
209 your oopses keep scrolling off the screen.
210
211 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
212 + virtualization environments that don't cope well with the
213 + expand down segment used by UDEREF on X86-32 or the frequent
214 + page table updates on X86-64.
215 +
216 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
217 +
218 pcbit= [HW,ISDN]
219
220 pcd. [PARIDE]
221 diff --git a/Makefile b/Makefile
222 index 0bd1554..808b0e5 100644
223 --- a/Makefile
224 +++ b/Makefile
225 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
226
227 HOSTCC = gcc
228 HOSTCXX = g++
229 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
230 -HOSTCXXFLAGS = -O2
231 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
232 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
233 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
234
235 # Decide whether to build built-in, modular, or both.
236 # Normally, just do built-in.
237 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
238 # Rules shared between *config targets and build targets
239
240 # Basic helpers built in scripts/
241 -PHONY += scripts_basic
242 -scripts_basic:
243 +PHONY += scripts_basic gcc-plugins
244 +scripts_basic: gcc-plugins
245 $(Q)$(MAKE) $(build)=scripts/basic
246 $(Q)rm -f .tmp_quiet_recordmcount
247
248 @@ -564,6 +565,55 @@ else
249 KBUILD_CFLAGS += -O2
250 endif
251
252 +ifndef DISABLE_PAX_PLUGINS
253 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
254 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
255 +ifndef CONFIG_UML
256 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
257 +endif
258 +endif
259 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
260 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
261 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
262 +endif
263 +ifdef CONFIG_KALLOCSTAT_PLUGIN
264 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
265 +endif
266 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
267 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
268 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
269 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
270 +endif
271 +ifdef CONFIG_CHECKER_PLUGIN
272 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
273 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
274 +endif
275 +endif
276 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
277 +ifdef CONFIG_PAX_SIZE_OVERFLOW
278 +SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
279 +endif
280 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
281 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
282 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
283 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
284 +ifeq ($(KBUILD_EXTMOD),)
285 +gcc-plugins:
286 + $(Q)$(MAKE) $(build)=tools/gcc
287 +else
288 +gcc-plugins: ;
289 +endif
290 +else
291 +gcc-plugins:
292 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
293 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
294 +else
295 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
296 +endif
297 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
298 +endif
299 +endif
300 +
301 include $(srctree)/arch/$(SRCARCH)/Makefile
302
303 ifneq ($(CONFIG_FRAME_WARN),0)
304 @@ -708,7 +758,7 @@ export mod_strip_cmd
305
306
307 ifeq ($(KBUILD_EXTMOD),)
308 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
309 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
310
311 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
312 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
313 @@ -932,6 +982,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
314
315 # The actual objects are generated when descending,
316 # make sure no implicit rule kicks in
317 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
318 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
319 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
320
321 # Handle descending into subdirectories listed in $(vmlinux-dirs)
322 @@ -941,7 +993,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
323 # Error messages still appears in the original language
324
325 PHONY += $(vmlinux-dirs)
326 -$(vmlinux-dirs): prepare scripts
327 +$(vmlinux-dirs): gcc-plugins prepare scripts
328 $(Q)$(MAKE) $(build)=$@
329
330 # Store (new) KERNELRELASE string in include/config/kernel.release
331 @@ -985,6 +1037,7 @@ prepare0: archprepare FORCE
332 $(Q)$(MAKE) $(build)=.
333
334 # All the preparing..
335 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
336 prepare: prepare0
337
338 # Generate some files
339 @@ -1092,6 +1145,8 @@ all: modules
340 # using awk while concatenating to the final file.
341
342 PHONY += modules
343 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
344 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
345 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
346 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
347 @$(kecho) ' Building modules, stage 2.';
348 @@ -1107,7 +1162,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
349
350 # Target to prepare building external modules
351 PHONY += modules_prepare
352 -modules_prepare: prepare scripts
353 +modules_prepare: gcc-plugins prepare scripts
354
355 # Target to install modules
356 PHONY += modules_install
357 @@ -1204,6 +1259,7 @@ distclean: mrproper
358 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
359 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
360 -o -name '.*.rej' \
361 + -o -name '.*.rej' -o -name '*.so' \
362 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
363 -type f -print | xargs rm -f
364
365 @@ -1364,6 +1420,8 @@ PHONY += $(module-dirs) modules
366 $(module-dirs): crmodverdir $(objtree)/Module.symvers
367 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
368
369 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
370 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
371 modules: $(module-dirs)
372 @$(kecho) ' Building modules, stage 2.';
373 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
374 @@ -1490,17 +1548,21 @@ else
375 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
376 endif
377
378 -%.s: %.c prepare scripts FORCE
379 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
380 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
381 +%.s: %.c gcc-plugins prepare scripts FORCE
382 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
383 %.i: %.c prepare scripts FORCE
384 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
385 -%.o: %.c prepare scripts FORCE
386 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
387 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
388 +%.o: %.c gcc-plugins prepare scripts FORCE
389 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
390 %.lst: %.c prepare scripts FORCE
391 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
392 -%.s: %.S prepare scripts FORCE
393 +%.s: %.S gcc-plugins prepare scripts FORCE
394 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
395 -%.o: %.S prepare scripts FORCE
396 +%.o: %.S gcc-plugins prepare scripts FORCE
397 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
398 %.symtypes: %.c prepare scripts FORCE
399 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
400 @@ -1510,11 +1572,15 @@ endif
401 $(cmd_crmodverdir)
402 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
403 $(build)=$(build-dir)
404 -%/: prepare scripts FORCE
405 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
406 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
407 +%/: gcc-plugins prepare scripts FORCE
408 $(cmd_crmodverdir)
409 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
410 $(build)=$(build-dir)
411 -%.ko: prepare scripts FORCE
412 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
413 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
414 +%.ko: gcc-plugins prepare scripts FORCE
415 $(cmd_crmodverdir)
416 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
417 $(build)=$(build-dir) $(@:.ko=.o)
418 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
419 index 3bb7ffe..347a54c 100644
420 --- a/arch/alpha/include/asm/atomic.h
421 +++ b/arch/alpha/include/asm/atomic.h
422 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
423 #define atomic_dec(v) atomic_sub(1,(v))
424 #define atomic64_dec(v) atomic64_sub(1,(v))
425
426 +#define atomic64_read_unchecked(v) atomic64_read(v)
427 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
428 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
429 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
430 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
431 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
432 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
433 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
434 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
435 +
436 #define smp_mb__before_atomic_dec() smp_mb()
437 #define smp_mb__after_atomic_dec() smp_mb()
438 #define smp_mb__before_atomic_inc() smp_mb()
439 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
440 index ad368a9..fbe0f25 100644
441 --- a/arch/alpha/include/asm/cache.h
442 +++ b/arch/alpha/include/asm/cache.h
443 @@ -4,19 +4,19 @@
444 #ifndef __ARCH_ALPHA_CACHE_H
445 #define __ARCH_ALPHA_CACHE_H
446
447 +#include <linux/const.h>
448
449 /* Bytes per L1 (data) cache line. */
450 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
451 -# define L1_CACHE_BYTES 64
452 # define L1_CACHE_SHIFT 6
453 #else
454 /* Both EV4 and EV5 are write-through, read-allocate,
455 direct-mapped, physical.
456 */
457 -# define L1_CACHE_BYTES 32
458 # define L1_CACHE_SHIFT 5
459 #endif
460
461 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
462 #define SMP_CACHE_BYTES L1_CACHE_BYTES
463
464 #endif
465 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
466 index 968d999..d36b2df 100644
467 --- a/arch/alpha/include/asm/elf.h
468 +++ b/arch/alpha/include/asm/elf.h
469 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
470
471 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
472
473 +#ifdef CONFIG_PAX_ASLR
474 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
475 +
476 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
477 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
478 +#endif
479 +
480 /* $0 is set by ld.so to a pointer to a function which might be
481 registered using atexit. This provides a mean for the dynamic
482 linker to call DT_FINI functions for shared libraries that have
483 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
484 index bc2a0da..8ad11ee 100644
485 --- a/arch/alpha/include/asm/pgalloc.h
486 +++ b/arch/alpha/include/asm/pgalloc.h
487 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
488 pgd_set(pgd, pmd);
489 }
490
491 +static inline void
492 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
493 +{
494 + pgd_populate(mm, pgd, pmd);
495 +}
496 +
497 extern pgd_t *pgd_alloc(struct mm_struct *mm);
498
499 static inline void
500 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
501 index 81a4342..348b927 100644
502 --- a/arch/alpha/include/asm/pgtable.h
503 +++ b/arch/alpha/include/asm/pgtable.h
504 @@ -102,6 +102,17 @@ struct vm_area_struct;
505 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
506 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
507 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
508 +
509 +#ifdef CONFIG_PAX_PAGEEXEC
510 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
511 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
512 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
513 +#else
514 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
515 +# define PAGE_COPY_NOEXEC PAGE_COPY
516 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
517 +#endif
518 +
519 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
520
521 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
522 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
523 index 2fd00b7..cfd5069 100644
524 --- a/arch/alpha/kernel/module.c
525 +++ b/arch/alpha/kernel/module.c
526 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
527
528 /* The small sections were sorted to the end of the segment.
529 The following should definitely cover them. */
530 - gp = (u64)me->module_core + me->core_size - 0x8000;
531 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
532 got = sechdrs[me->arch.gotsecindex].sh_addr;
533
534 for (i = 0; i < n; i++) {
535 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
536 index 49ee319..9ee7d14 100644
537 --- a/arch/alpha/kernel/osf_sys.c
538 +++ b/arch/alpha/kernel/osf_sys.c
539 @@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
540 /* At this point: (!vma || addr < vma->vm_end). */
541 if (limit - len < addr)
542 return -ENOMEM;
543 - if (!vma || addr + len <= vma->vm_start)
544 + if (check_heap_stack_gap(vma, addr, len))
545 return addr;
546 addr = vma->vm_end;
547 vma = vma->vm_next;
548 @@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
549 merely specific addresses, but regions of memory -- perhaps
550 this feature should be incorporated into all ports? */
551
552 +#ifdef CONFIG_PAX_RANDMMAP
553 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
554 +#endif
555 +
556 if (addr) {
557 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
558 if (addr != (unsigned long) -ENOMEM)
559 @@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
560 }
561
562 /* Next, try allocating at TASK_UNMAPPED_BASE. */
563 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
564 - len, limit);
565 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
566 +
567 if (addr != (unsigned long) -ENOMEM)
568 return addr;
569
570 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
571 index 5eecab1..609abc0 100644
572 --- a/arch/alpha/mm/fault.c
573 +++ b/arch/alpha/mm/fault.c
574 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
575 __reload_thread(pcb);
576 }
577
578 +#ifdef CONFIG_PAX_PAGEEXEC
579 +/*
580 + * PaX: decide what to do with offenders (regs->pc = fault address)
581 + *
582 + * returns 1 when task should be killed
583 + * 2 when patched PLT trampoline was detected
584 + * 3 when unpatched PLT trampoline was detected
585 + */
586 +static int pax_handle_fetch_fault(struct pt_regs *regs)
587 +{
588 +
589 +#ifdef CONFIG_PAX_EMUPLT
590 + int err;
591 +
592 + do { /* PaX: patched PLT emulation #1 */
593 + unsigned int ldah, ldq, jmp;
594 +
595 + err = get_user(ldah, (unsigned int *)regs->pc);
596 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
597 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
598 +
599 + if (err)
600 + break;
601 +
602 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
603 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
604 + jmp == 0x6BFB0000U)
605 + {
606 + unsigned long r27, addr;
607 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
608 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
609 +
610 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
611 + err = get_user(r27, (unsigned long *)addr);
612 + if (err)
613 + break;
614 +
615 + regs->r27 = r27;
616 + regs->pc = r27;
617 + return 2;
618 + }
619 + } while (0);
620 +
621 + do { /* PaX: patched PLT emulation #2 */
622 + unsigned int ldah, lda, br;
623 +
624 + err = get_user(ldah, (unsigned int *)regs->pc);
625 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
626 + err |= get_user(br, (unsigned int *)(regs->pc+8));
627 +
628 + if (err)
629 + break;
630 +
631 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
632 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
633 + (br & 0xFFE00000U) == 0xC3E00000U)
634 + {
635 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
636 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
637 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
638 +
639 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
640 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
641 + return 2;
642 + }
643 + } while (0);
644 +
645 + do { /* PaX: unpatched PLT emulation */
646 + unsigned int br;
647 +
648 + err = get_user(br, (unsigned int *)regs->pc);
649 +
650 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
651 + unsigned int br2, ldq, nop, jmp;
652 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
653 +
654 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
655 + err = get_user(br2, (unsigned int *)addr);
656 + err |= get_user(ldq, (unsigned int *)(addr+4));
657 + err |= get_user(nop, (unsigned int *)(addr+8));
658 + err |= get_user(jmp, (unsigned int *)(addr+12));
659 + err |= get_user(resolver, (unsigned long *)(addr+16));
660 +
661 + if (err)
662 + break;
663 +
664 + if (br2 == 0xC3600000U &&
665 + ldq == 0xA77B000CU &&
666 + nop == 0x47FF041FU &&
667 + jmp == 0x6B7B0000U)
668 + {
669 + regs->r28 = regs->pc+4;
670 + regs->r27 = addr+16;
671 + regs->pc = resolver;
672 + return 3;
673 + }
674 + }
675 + } while (0);
676 +#endif
677 +
678 + return 1;
679 +}
680 +
681 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
682 +{
683 + unsigned long i;
684 +
685 + printk(KERN_ERR "PAX: bytes at PC: ");
686 + for (i = 0; i < 5; i++) {
687 + unsigned int c;
688 + if (get_user(c, (unsigned int *)pc+i))
689 + printk(KERN_CONT "???????? ");
690 + else
691 + printk(KERN_CONT "%08x ", c);
692 + }
693 + printk("\n");
694 +}
695 +#endif
696
697 /*
698 * This routine handles page faults. It determines the address,
699 @@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
700 good_area:
701 si_code = SEGV_ACCERR;
702 if (cause < 0) {
703 - if (!(vma->vm_flags & VM_EXEC))
704 + if (!(vma->vm_flags & VM_EXEC)) {
705 +
706 +#ifdef CONFIG_PAX_PAGEEXEC
707 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
708 + goto bad_area;
709 +
710 + up_read(&mm->mmap_sem);
711 + switch (pax_handle_fetch_fault(regs)) {
712 +
713 +#ifdef CONFIG_PAX_EMUPLT
714 + case 2:
715 + case 3:
716 + return;
717 +#endif
718 +
719 + }
720 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
721 + do_group_exit(SIGKILL);
722 +#else
723 goto bad_area;
724 +#endif
725 +
726 + }
727 } else if (!cause) {
728 /* Allow reads even for write-only mappings */
729 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
730 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
731 index 68374ba..15c980c 100644
732 --- a/arch/arm/include/asm/atomic.h
733 +++ b/arch/arm/include/asm/atomic.h
734 @@ -17,17 +17,35 @@
735 #include <asm/barrier.h>
736 #include <asm/cmpxchg.h>
737
738 +#ifdef CONFIG_GENERIC_ATOMIC64
739 +#include <asm-generic/atomic64.h>
740 +#endif
741 +
742 #define ATOMIC_INIT(i) { (i) }
743
744 #ifdef __KERNEL__
745
746 +#define _ASM_EXTABLE(from, to) \
747 +" .pushsection __ex_table,\"a\"\n"\
748 +" .align 3\n" \
749 +" .long " #from ", " #to"\n" \
750 +" .popsection"
751 +
752 /*
753 * On ARM, ordinary assignment (str instruction) doesn't clear the local
754 * strex/ldrex monitor on some implementations. The reason we can use it for
755 * atomic_set() is the clrex or dummy strex done on every exception return.
756 */
757 #define atomic_read(v) (*(volatile int *)&(v)->counter)
758 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
759 +{
760 + return v->counter;
761 +}
762 #define atomic_set(v,i) (((v)->counter) = (i))
763 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
764 +{
765 + v->counter = i;
766 +}
767
768 #if __LINUX_ARM_ARCH__ >= 6
769
770 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
771 int result;
772
773 __asm__ __volatile__("@ atomic_add\n"
774 +"1: ldrex %1, [%3]\n"
775 +" adds %0, %1, %4\n"
776 +
777 +#ifdef CONFIG_PAX_REFCOUNT
778 +" bvc 3f\n"
779 +"2: bkpt 0xf103\n"
780 +"3:\n"
781 +#endif
782 +
783 +" strex %1, %0, [%3]\n"
784 +" teq %1, #0\n"
785 +" bne 1b"
786 +
787 +#ifdef CONFIG_PAX_REFCOUNT
788 +"\n4:\n"
789 + _ASM_EXTABLE(2b, 4b)
790 +#endif
791 +
792 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
793 + : "r" (&v->counter), "Ir" (i)
794 + : "cc");
795 +}
796 +
797 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
798 +{
799 + unsigned long tmp;
800 + int result;
801 +
802 + __asm__ __volatile__("@ atomic_add_unchecked\n"
803 "1: ldrex %0, [%3]\n"
804 " add %0, %0, %4\n"
805 " strex %1, %0, [%3]\n"
806 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
807 smp_mb();
808
809 __asm__ __volatile__("@ atomic_add_return\n"
810 +"1: ldrex %1, [%3]\n"
811 +" adds %0, %1, %4\n"
812 +
813 +#ifdef CONFIG_PAX_REFCOUNT
814 +" bvc 3f\n"
815 +" mov %0, %1\n"
816 +"2: bkpt 0xf103\n"
817 +"3:\n"
818 +#endif
819 +
820 +" strex %1, %0, [%3]\n"
821 +" teq %1, #0\n"
822 +" bne 1b"
823 +
824 +#ifdef CONFIG_PAX_REFCOUNT
825 +"\n4:\n"
826 + _ASM_EXTABLE(2b, 4b)
827 +#endif
828 +
829 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
830 + : "r" (&v->counter), "Ir" (i)
831 + : "cc");
832 +
833 + smp_mb();
834 +
835 + return result;
836 +}
837 +
838 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
839 +{
840 + unsigned long tmp;
841 + int result;
842 +
843 + smp_mb();
844 +
845 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
846 "1: ldrex %0, [%3]\n"
847 " add %0, %0, %4\n"
848 " strex %1, %0, [%3]\n"
849 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
850 int result;
851
852 __asm__ __volatile__("@ atomic_sub\n"
853 +"1: ldrex %1, [%3]\n"
854 +" subs %0, %1, %4\n"
855 +
856 +#ifdef CONFIG_PAX_REFCOUNT
857 +" bvc 3f\n"
858 +"2: bkpt 0xf103\n"
859 +"3:\n"
860 +#endif
861 +
862 +" strex %1, %0, [%3]\n"
863 +" teq %1, #0\n"
864 +" bne 1b"
865 +
866 +#ifdef CONFIG_PAX_REFCOUNT
867 +"\n4:\n"
868 + _ASM_EXTABLE(2b, 4b)
869 +#endif
870 +
871 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
872 + : "r" (&v->counter), "Ir" (i)
873 + : "cc");
874 +}
875 +
876 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
877 +{
878 + unsigned long tmp;
879 + int result;
880 +
881 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
882 "1: ldrex %0, [%3]\n"
883 " sub %0, %0, %4\n"
884 " strex %1, %0, [%3]\n"
885 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
886 smp_mb();
887
888 __asm__ __volatile__("@ atomic_sub_return\n"
889 -"1: ldrex %0, [%3]\n"
890 -" sub %0, %0, %4\n"
891 +"1: ldrex %1, [%3]\n"
892 +" sub %0, %1, %4\n"
893 +
894 +#ifdef CONFIG_PAX_REFCOUNT
895 +" bvc 3f\n"
896 +" mov %0, %1\n"
897 +"2: bkpt 0xf103\n"
898 +"3:\n"
899 +#endif
900 +
901 " strex %1, %0, [%3]\n"
902 " teq %1, #0\n"
903 " bne 1b"
904 +
905 +#ifdef CONFIG_PAX_REFCOUNT
906 +"\n4:\n"
907 + _ASM_EXTABLE(2b, 4b)
908 +#endif
909 +
910 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
911 : "r" (&v->counter), "Ir" (i)
912 : "cc");
913 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
914 return oldval;
915 }
916
917 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
918 +{
919 + unsigned long oldval, res;
920 +
921 + smp_mb();
922 +
923 + do {
924 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
925 + "ldrex %1, [%3]\n"
926 + "mov %0, #0\n"
927 + "teq %1, %4\n"
928 + "strexeq %0, %5, [%3]\n"
929 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
930 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
931 + : "cc");
932 + } while (res);
933 +
934 + smp_mb();
935 +
936 + return oldval;
937 +}
938 +
939 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
940 {
941 unsigned long tmp, tmp2;
942 @@ -167,7 +315,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
943
944 return val;
945 }
946 +#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
947 #define atomic_add(i, v) (void) atomic_add_return(i, v)
948 +#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
949
950 static inline int atomic_sub_return(int i, atomic_t *v)
951 {
952 @@ -181,7 +331,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
953
954 return val;
955 }
956 +#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
957 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
958 +#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
959
960 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
961 {
962 @@ -196,6 +348,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
963
964 return ret;
965 }
966 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
967
968 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
969 {
970 @@ -209,6 +362,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
971 #endif /* __LINUX_ARM_ARCH__ */
972
973 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
974 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
975 +{
976 + return xchg(&v->counter, new);
977 +}
978
979 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
980 {
981 @@ -221,11 +378,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
982 }
983
984 #define atomic_inc(v) atomic_add(1, v)
985 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
986 +{
987 + atomic_add_unchecked(1, v);
988 +}
989 #define atomic_dec(v) atomic_sub(1, v)
990 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
991 +{
992 + atomic_sub_unchecked(1, v);
993 +}
994
995 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
996 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
997 +{
998 + return atomic_add_return_unchecked(1, v) == 0;
999 +}
1000 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1001 #define atomic_inc_return(v) (atomic_add_return(1, v))
1002 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1003 +{
1004 + return atomic_add_return_unchecked(1, v);
1005 +}
1006 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1007 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1008
1009 @@ -241,6 +414,14 @@ typedef struct {
1010 u64 __aligned(8) counter;
1011 } atomic64_t;
1012
1013 +#ifdef CONFIG_PAX_REFCOUNT
1014 +typedef struct {
1015 + u64 __aligned(8) counter;
1016 +} atomic64_unchecked_t;
1017 +#else
1018 +typedef atomic64_t atomic64_unchecked_t;
1019 +#endif
1020 +
1021 #define ATOMIC64_INIT(i) { (i) }
1022
1023 static inline u64 atomic64_read(atomic64_t *v)
1024 @@ -256,6 +437,19 @@ static inline u64 atomic64_read(atomic64_t *v)
1025 return result;
1026 }
1027
1028 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1029 +{
1030 + u64 result;
1031 +
1032 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1033 +" ldrexd %0, %H0, [%1]"
1034 + : "=&r" (result)
1035 + : "r" (&v->counter), "Qo" (v->counter)
1036 + );
1037 +
1038 + return result;
1039 +}
1040 +
1041 static inline void atomic64_set(atomic64_t *v, u64 i)
1042 {
1043 u64 tmp;
1044 @@ -270,6 +464,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1045 : "cc");
1046 }
1047
1048 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1049 +{
1050 + u64 tmp;
1051 +
1052 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1053 +"1: ldrexd %0, %H0, [%2]\n"
1054 +" strexd %0, %3, %H3, [%2]\n"
1055 +" teq %0, #0\n"
1056 +" bne 1b"
1057 + : "=&r" (tmp), "=Qo" (v->counter)
1058 + : "r" (&v->counter), "r" (i)
1059 + : "cc");
1060 +}
1061 +
1062 static inline void atomic64_add(u64 i, atomic64_t *v)
1063 {
1064 u64 result;
1065 @@ -278,6 +486,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1066 __asm__ __volatile__("@ atomic64_add\n"
1067 "1: ldrexd %0, %H0, [%3]\n"
1068 " adds %0, %0, %4\n"
1069 +" adcs %H0, %H0, %H4\n"
1070 +
1071 +#ifdef CONFIG_PAX_REFCOUNT
1072 +" bvc 3f\n"
1073 +"2: bkpt 0xf103\n"
1074 +"3:\n"
1075 +#endif
1076 +
1077 +" strexd %1, %0, %H0, [%3]\n"
1078 +" teq %1, #0\n"
1079 +" bne 1b"
1080 +
1081 +#ifdef CONFIG_PAX_REFCOUNT
1082 +"\n4:\n"
1083 + _ASM_EXTABLE(2b, 4b)
1084 +#endif
1085 +
1086 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1087 + : "r" (&v->counter), "r" (i)
1088 + : "cc");
1089 +}
1090 +
1091 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1092 +{
1093 + u64 result;
1094 + unsigned long tmp;
1095 +
1096 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1097 +"1: ldrexd %0, %H0, [%3]\n"
1098 +" adds %0, %0, %4\n"
1099 " adc %H0, %H0, %H4\n"
1100 " strexd %1, %0, %H0, [%3]\n"
1101 " teq %1, #0\n"
1102 @@ -289,12 +527,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1103
1104 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1105 {
1106 - u64 result;
1107 - unsigned long tmp;
1108 + u64 result, tmp;
1109
1110 smp_mb();
1111
1112 __asm__ __volatile__("@ atomic64_add_return\n"
1113 +"1: ldrexd %1, %H1, [%3]\n"
1114 +" adds %0, %1, %4\n"
1115 +" adcs %H0, %H1, %H4\n"
1116 +
1117 +#ifdef CONFIG_PAX_REFCOUNT
1118 +" bvc 3f\n"
1119 +" mov %0, %1\n"
1120 +" mov %H0, %H1\n"
1121 +"2: bkpt 0xf103\n"
1122 +"3:\n"
1123 +#endif
1124 +
1125 +" strexd %1, %0, %H0, [%3]\n"
1126 +" teq %1, #0\n"
1127 +" bne 1b"
1128 +
1129 +#ifdef CONFIG_PAX_REFCOUNT
1130 +"\n4:\n"
1131 + _ASM_EXTABLE(2b, 4b)
1132 +#endif
1133 +
1134 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1135 + : "r" (&v->counter), "r" (i)
1136 + : "cc");
1137 +
1138 + smp_mb();
1139 +
1140 + return result;
1141 +}
1142 +
1143 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1144 +{
1145 + u64 result;
1146 + unsigned long tmp;
1147 +
1148 + smp_mb();
1149 +
1150 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1151 "1: ldrexd %0, %H0, [%3]\n"
1152 " adds %0, %0, %4\n"
1153 " adc %H0, %H0, %H4\n"
1154 @@ -318,6 +593,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1155 __asm__ __volatile__("@ atomic64_sub\n"
1156 "1: ldrexd %0, %H0, [%3]\n"
1157 " subs %0, %0, %4\n"
1158 +" sbcs %H0, %H0, %H4\n"
1159 +
1160 +#ifdef CONFIG_PAX_REFCOUNT
1161 +" bvc 3f\n"
1162 +"2: bkpt 0xf103\n"
1163 +"3:\n"
1164 +#endif
1165 +
1166 +" strexd %1, %0, %H0, [%3]\n"
1167 +" teq %1, #0\n"
1168 +" bne 1b"
1169 +
1170 +#ifdef CONFIG_PAX_REFCOUNT
1171 +"\n4:\n"
1172 + _ASM_EXTABLE(2b, 4b)
1173 +#endif
1174 +
1175 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1176 + : "r" (&v->counter), "r" (i)
1177 + : "cc");
1178 +}
1179 +
1180 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1181 +{
1182 + u64 result;
1183 + unsigned long tmp;
1184 +
1185 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1186 +"1: ldrexd %0, %H0, [%3]\n"
1187 +" subs %0, %0, %4\n"
1188 " sbc %H0, %H0, %H4\n"
1189 " strexd %1, %0, %H0, [%3]\n"
1190 " teq %1, #0\n"
1191 @@ -329,18 +634,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1192
1193 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1194 {
1195 - u64 result;
1196 - unsigned long tmp;
1197 + u64 result, tmp;
1198
1199 smp_mb();
1200
1201 __asm__ __volatile__("@ atomic64_sub_return\n"
1202 -"1: ldrexd %0, %H0, [%3]\n"
1203 -" subs %0, %0, %4\n"
1204 -" sbc %H0, %H0, %H4\n"
1205 +"1: ldrexd %1, %H1, [%3]\n"
1206 +" subs %0, %1, %4\n"
1207 +" sbc %H0, %H1, %H4\n"
1208 +
1209 +#ifdef CONFIG_PAX_REFCOUNT
1210 +" bvc 3f\n"
1211 +" mov %0, %1\n"
1212 +" mov %H0, %H1\n"
1213 +"2: bkpt 0xf103\n"
1214 +"3:\n"
1215 +#endif
1216 +
1217 " strexd %1, %0, %H0, [%3]\n"
1218 " teq %1, #0\n"
1219 " bne 1b"
1220 +
1221 +#ifdef CONFIG_PAX_REFCOUNT
1222 +"\n4:\n"
1223 + _ASM_EXTABLE(2b, 4b)
1224 +#endif
1225 +
1226 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1227 : "r" (&v->counter), "r" (i)
1228 : "cc");
1229 @@ -374,6 +693,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1230 return oldval;
1231 }
1232
1233 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1234 +{
1235 + u64 oldval;
1236 + unsigned long res;
1237 +
1238 + smp_mb();
1239 +
1240 + do {
1241 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1242 + "ldrexd %1, %H1, [%3]\n"
1243 + "mov %0, #0\n"
1244 + "teq %1, %4\n"
1245 + "teqeq %H1, %H4\n"
1246 + "strexdeq %0, %5, %H5, [%3]"
1247 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1248 + : "r" (&ptr->counter), "r" (old), "r" (new)
1249 + : "cc");
1250 + } while (res);
1251 +
1252 + smp_mb();
1253 +
1254 + return oldval;
1255 +}
1256 +
1257 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1258 {
1259 u64 result;
1260 @@ -397,21 +740,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1261
1262 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1263 {
1264 - u64 result;
1265 - unsigned long tmp;
1266 + u64 result, tmp;
1267
1268 smp_mb();
1269
1270 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1271 -"1: ldrexd %0, %H0, [%3]\n"
1272 -" subs %0, %0, #1\n"
1273 -" sbc %H0, %H0, #0\n"
1274 +"1: ldrexd %1, %H1, [%3]\n"
1275 +" subs %0, %1, #1\n"
1276 +" sbc %H0, %H1, #0\n"
1277 +
1278 +#ifdef CONFIG_PAX_REFCOUNT
1279 +" bvc 3f\n"
1280 +" mov %0, %1\n"
1281 +" mov %H0, %H1\n"
1282 +"2: bkpt 0xf103\n"
1283 +"3:\n"
1284 +#endif
1285 +
1286 " teq %H0, #0\n"
1287 -" bmi 2f\n"
1288 +" bmi 4f\n"
1289 " strexd %1, %0, %H0, [%3]\n"
1290 " teq %1, #0\n"
1291 " bne 1b\n"
1292 -"2:"
1293 +"4:\n"
1294 +
1295 +#ifdef CONFIG_PAX_REFCOUNT
1296 + _ASM_EXTABLE(2b, 4b)
1297 +#endif
1298 +
1299 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1300 : "r" (&v->counter)
1301 : "cc");
1302 @@ -434,13 +790,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1303 " teq %0, %5\n"
1304 " teqeq %H0, %H5\n"
1305 " moveq %1, #0\n"
1306 -" beq 2f\n"
1307 +" beq 4f\n"
1308 " adds %0, %0, %6\n"
1309 " adc %H0, %H0, %H6\n"
1310 +
1311 +#ifdef CONFIG_PAX_REFCOUNT
1312 +" bvc 3f\n"
1313 +"2: bkpt 0xf103\n"
1314 +"3:\n"
1315 +#endif
1316 +
1317 " strexd %2, %0, %H0, [%4]\n"
1318 " teq %2, #0\n"
1319 " bne 1b\n"
1320 -"2:"
1321 +"4:\n"
1322 +
1323 +#ifdef CONFIG_PAX_REFCOUNT
1324 + _ASM_EXTABLE(2b, 4b)
1325 +#endif
1326 +
1327 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1328 : "r" (&v->counter), "r" (u), "r" (a)
1329 : "cc");
1330 @@ -453,10 +821,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1331
1332 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1333 #define atomic64_inc(v) atomic64_add(1LL, (v))
1334 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1335 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1336 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1337 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1338 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1339 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1340 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1341 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1342 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1343 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1344 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1345 index 75fe66b..2255c86 100644
1346 --- a/arch/arm/include/asm/cache.h
1347 +++ b/arch/arm/include/asm/cache.h
1348 @@ -4,8 +4,10 @@
1349 #ifndef __ASMARM_CACHE_H
1350 #define __ASMARM_CACHE_H
1351
1352 +#include <linux/const.h>
1353 +
1354 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1355 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1356 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1357
1358 /*
1359 * Memory returned by kmalloc() may be used for DMA, so we must make
1360 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1361 index 1252a26..9dc17b5 100644
1362 --- a/arch/arm/include/asm/cacheflush.h
1363 +++ b/arch/arm/include/asm/cacheflush.h
1364 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1365 void (*dma_unmap_area)(const void *, size_t, int);
1366
1367 void (*dma_flush_range)(const void *, const void *);
1368 -};
1369 +} __no_const;
1370
1371 /*
1372 * Select the calling method
1373 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1374 index d41d7cb..9bea5e0 100644
1375 --- a/arch/arm/include/asm/cmpxchg.h
1376 +++ b/arch/arm/include/asm/cmpxchg.h
1377 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1378
1379 #define xchg(ptr,x) \
1380 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1381 +#define xchg_unchecked(ptr,x) \
1382 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1383
1384 #include <asm-generic/cmpxchg-local.h>
1385
1386 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1387 index 38050b1..9d90e8b 100644
1388 --- a/arch/arm/include/asm/elf.h
1389 +++ b/arch/arm/include/asm/elf.h
1390 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1391 the loader. We need to make sure that it is out of the way of the program
1392 that it will "exec", and that there is sufficient room for the brk. */
1393
1394 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1395 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1396 +
1397 +#ifdef CONFIG_PAX_ASLR
1398 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1399 +
1400 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1401 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1402 +#endif
1403
1404 /* When the program starts, a1 contains a pointer to a function to be
1405 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1406 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1407 extern void elf_set_personality(const struct elf32_hdr *);
1408 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1409
1410 -struct mm_struct;
1411 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1412 -#define arch_randomize_brk arch_randomize_brk
1413 -
1414 #endif
1415 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1416 index e51b1e8..32a3113 100644
1417 --- a/arch/arm/include/asm/kmap_types.h
1418 +++ b/arch/arm/include/asm/kmap_types.h
1419 @@ -21,6 +21,7 @@ enum km_type {
1420 KM_L1_CACHE,
1421 KM_L2_CACHE,
1422 KM_KDB,
1423 + KM_CLEARPAGE,
1424 KM_TYPE_NR
1425 };
1426
1427 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1428 index 53426c6..c7baff3 100644
1429 --- a/arch/arm/include/asm/outercache.h
1430 +++ b/arch/arm/include/asm/outercache.h
1431 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1432 #endif
1433 void (*set_debug)(unsigned long);
1434 void (*resume)(void);
1435 -};
1436 +} __no_const;
1437
1438 #ifdef CONFIG_OUTER_CACHE
1439
1440 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1441 index 5838361..da6e813 100644
1442 --- a/arch/arm/include/asm/page.h
1443 +++ b/arch/arm/include/asm/page.h
1444 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1445 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1446 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1447 unsigned long vaddr, struct vm_area_struct *vma);
1448 -};
1449 +} __no_const;
1450
1451 #ifdef MULTI_USER
1452 extern struct cpu_user_fns cpu_user;
1453 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1454 index 943504f..bf8d667 100644
1455 --- a/arch/arm/include/asm/pgalloc.h
1456 +++ b/arch/arm/include/asm/pgalloc.h
1457 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1458 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1459 }
1460
1461 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1462 +{
1463 + pud_populate(mm, pud, pmd);
1464 +}
1465 +
1466 #else /* !CONFIG_ARM_LPAE */
1467
1468 /*
1469 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1470 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1471 #define pmd_free(mm, pmd) do { } while (0)
1472 #define pud_populate(mm,pmd,pte) BUG()
1473 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1474
1475 #endif /* CONFIG_ARM_LPAE */
1476
1477 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1478 index 0f04d84..2be5648 100644
1479 --- a/arch/arm/include/asm/thread_info.h
1480 +++ b/arch/arm/include/asm/thread_info.h
1481 @@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1482 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1483 #define TIF_SYSCALL_TRACE 8
1484 #define TIF_SYSCALL_AUDIT 9
1485 +
1486 +/* within 8 bits of TIF_SYSCALL_TRACE
1487 + to meet flexible second operand requirements
1488 +*/
1489 +#define TIF_GRSEC_SETXID 10
1490 +
1491 #define TIF_POLLING_NRFLAG 16
1492 #define TIF_USING_IWMMXT 17
1493 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1494 @@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1495 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1496 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1497 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1498 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1499
1500 /* Checks for any syscall work in entry-common.S */
1501 -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1502 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1503 + _TIF_GRSEC_SETXID)
1504
1505 /*
1506 * Change these and you break ASM code in entry-common.S
1507 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1508 index 71f6536..602f279 100644
1509 --- a/arch/arm/include/asm/uaccess.h
1510 +++ b/arch/arm/include/asm/uaccess.h
1511 @@ -22,6 +22,8 @@
1512 #define VERIFY_READ 0
1513 #define VERIFY_WRITE 1
1514
1515 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1516 +
1517 /*
1518 * The exception table consists of pairs of addresses: the first is the
1519 * address of an instruction that is allowed to fault, and the second is
1520 @@ -387,8 +389,23 @@ do { \
1521
1522
1523 #ifdef CONFIG_MMU
1524 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1525 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1526 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1527 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1528 +
1529 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1530 +{
1531 + if (!__builtin_constant_p(n))
1532 + check_object_size(to, n, false);
1533 + return ___copy_from_user(to, from, n);
1534 +}
1535 +
1536 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1537 +{
1538 + if (!__builtin_constant_p(n))
1539 + check_object_size(from, n, true);
1540 + return ___copy_to_user(to, from, n);
1541 +}
1542 +
1543 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1544 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1545 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1546 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1547
1548 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1549 {
1550 + if ((long)n < 0)
1551 + return n;
1552 +
1553 if (access_ok(VERIFY_READ, from, n))
1554 n = __copy_from_user(to, from, n);
1555 else /* security hole - plug it */
1556 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1557
1558 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1559 {
1560 + if ((long)n < 0)
1561 + return n;
1562 +
1563 if (access_ok(VERIFY_WRITE, to, n))
1564 n = __copy_to_user(to, from, n);
1565 return n;
1566 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1567 index b57c75e..ed2d6b2 100644
1568 --- a/arch/arm/kernel/armksyms.c
1569 +++ b/arch/arm/kernel/armksyms.c
1570 @@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1571 #ifdef CONFIG_MMU
1572 EXPORT_SYMBOL(copy_page);
1573
1574 -EXPORT_SYMBOL(__copy_from_user);
1575 -EXPORT_SYMBOL(__copy_to_user);
1576 +EXPORT_SYMBOL(___copy_from_user);
1577 +EXPORT_SYMBOL(___copy_to_user);
1578 EXPORT_SYMBOL(__clear_user);
1579
1580 EXPORT_SYMBOL(__get_user_1);
1581 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1582 index 2b7b017..c380fa2 100644
1583 --- a/arch/arm/kernel/process.c
1584 +++ b/arch/arm/kernel/process.c
1585 @@ -28,7 +28,6 @@
1586 #include <linux/tick.h>
1587 #include <linux/utsname.h>
1588 #include <linux/uaccess.h>
1589 -#include <linux/random.h>
1590 #include <linux/hw_breakpoint.h>
1591 #include <linux/cpuidle.h>
1592
1593 @@ -275,9 +274,10 @@ void machine_power_off(void)
1594 machine_shutdown();
1595 if (pm_power_off)
1596 pm_power_off();
1597 + BUG();
1598 }
1599
1600 -void machine_restart(char *cmd)
1601 +__noreturn void machine_restart(char *cmd)
1602 {
1603 machine_shutdown();
1604
1605 @@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_struct *p)
1606 return 0;
1607 }
1608
1609 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1610 -{
1611 - unsigned long range_end = mm->brk + 0x02000000;
1612 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1613 -}
1614 -
1615 #ifdef CONFIG_MMU
1616 /*
1617 * The vectors page is always readable from user space for the
1618 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1619 index 9650c14..ae30cdd 100644
1620 --- a/arch/arm/kernel/ptrace.c
1621 +++ b/arch/arm/kernel/ptrace.c
1622 @@ -906,10 +906,19 @@ long arch_ptrace(struct task_struct *child, long request,
1623 return ret;
1624 }
1625
1626 +#ifdef CONFIG_GRKERNSEC_SETXID
1627 +extern void gr_delayed_cred_worker(void);
1628 +#endif
1629 +
1630 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1631 {
1632 unsigned long ip;
1633
1634 +#ifdef CONFIG_GRKERNSEC_SETXID
1635 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1636 + gr_delayed_cred_worker();
1637 +#endif
1638 +
1639 if (why)
1640 audit_syscall_exit(regs);
1641 else
1642 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1643 index ebfac78..cbea9c0 100644
1644 --- a/arch/arm/kernel/setup.c
1645 +++ b/arch/arm/kernel/setup.c
1646 @@ -111,13 +111,13 @@ struct processor processor __read_mostly;
1647 struct cpu_tlb_fns cpu_tlb __read_mostly;
1648 #endif
1649 #ifdef MULTI_USER
1650 -struct cpu_user_fns cpu_user __read_mostly;
1651 +struct cpu_user_fns cpu_user __read_only;
1652 #endif
1653 #ifdef MULTI_CACHE
1654 -struct cpu_cache_fns cpu_cache __read_mostly;
1655 +struct cpu_cache_fns cpu_cache __read_only;
1656 #endif
1657 #ifdef CONFIG_OUTER_CACHE
1658 -struct outer_cache_fns outer_cache __read_mostly;
1659 +struct outer_cache_fns outer_cache __read_only;
1660 EXPORT_SYMBOL(outer_cache);
1661 #endif
1662
1663 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1664 index 63d402f..db1d714 100644
1665 --- a/arch/arm/kernel/traps.c
1666 +++ b/arch/arm/kernel/traps.c
1667 @@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1668
1669 static DEFINE_RAW_SPINLOCK(die_lock);
1670
1671 +extern void gr_handle_kernel_exploit(void);
1672 +
1673 /*
1674 * This function is protected against re-entrancy.
1675 */
1676 @@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1677 panic("Fatal exception in interrupt");
1678 if (panic_on_oops)
1679 panic("Fatal exception");
1680 +
1681 + gr_handle_kernel_exploit();
1682 +
1683 if (ret != NOTIFY_STOP)
1684 do_exit(SIGSEGV);
1685 }
1686 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1687 index 66a477a..bee61d3 100644
1688 --- a/arch/arm/lib/copy_from_user.S
1689 +++ b/arch/arm/lib/copy_from_user.S
1690 @@ -16,7 +16,7 @@
1691 /*
1692 * Prototype:
1693 *
1694 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1695 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1696 *
1697 * Purpose:
1698 *
1699 @@ -84,11 +84,11 @@
1700
1701 .text
1702
1703 -ENTRY(__copy_from_user)
1704 +ENTRY(___copy_from_user)
1705
1706 #include "copy_template.S"
1707
1708 -ENDPROC(__copy_from_user)
1709 +ENDPROC(___copy_from_user)
1710
1711 .pushsection .fixup,"ax"
1712 .align 0
1713 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1714 index 6ee2f67..d1cce76 100644
1715 --- a/arch/arm/lib/copy_page.S
1716 +++ b/arch/arm/lib/copy_page.S
1717 @@ -10,6 +10,7 @@
1718 * ASM optimised string functions
1719 */
1720 #include <linux/linkage.h>
1721 +#include <linux/const.h>
1722 #include <asm/assembler.h>
1723 #include <asm/asm-offsets.h>
1724 #include <asm/cache.h>
1725 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1726 index d066df6..df28194 100644
1727 --- a/arch/arm/lib/copy_to_user.S
1728 +++ b/arch/arm/lib/copy_to_user.S
1729 @@ -16,7 +16,7 @@
1730 /*
1731 * Prototype:
1732 *
1733 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1734 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1735 *
1736 * Purpose:
1737 *
1738 @@ -88,11 +88,11 @@
1739 .text
1740
1741 ENTRY(__copy_to_user_std)
1742 -WEAK(__copy_to_user)
1743 +WEAK(___copy_to_user)
1744
1745 #include "copy_template.S"
1746
1747 -ENDPROC(__copy_to_user)
1748 +ENDPROC(___copy_to_user)
1749 ENDPROC(__copy_to_user_std)
1750
1751 .pushsection .fixup,"ax"
1752 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1753 index 5c908b1..e712687 100644
1754 --- a/arch/arm/lib/uaccess.S
1755 +++ b/arch/arm/lib/uaccess.S
1756 @@ -20,7 +20,7 @@
1757
1758 #define PAGE_SHIFT 12
1759
1760 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1761 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1762 * Purpose : copy a block to user memory from kernel memory
1763 * Params : to - user memory
1764 * : from - kernel memory
1765 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1766 sub r2, r2, ip
1767 b .Lc2u_dest_aligned
1768
1769 -ENTRY(__copy_to_user)
1770 +ENTRY(___copy_to_user)
1771 stmfd sp!, {r2, r4 - r7, lr}
1772 cmp r2, #4
1773 blt .Lc2u_not_enough
1774 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1775 ldrgtb r3, [r1], #0
1776 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1777 b .Lc2u_finished
1778 -ENDPROC(__copy_to_user)
1779 +ENDPROC(___copy_to_user)
1780
1781 .pushsection .fixup,"ax"
1782 .align 0
1783 9001: ldmfd sp!, {r0, r4 - r7, pc}
1784 .popsection
1785
1786 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1787 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1788 * Purpose : copy a block from user memory to kernel memory
1789 * Params : to - kernel memory
1790 * : from - user memory
1791 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1792 sub r2, r2, ip
1793 b .Lcfu_dest_aligned
1794
1795 -ENTRY(__copy_from_user)
1796 +ENTRY(___copy_from_user)
1797 stmfd sp!, {r0, r2, r4 - r7, lr}
1798 cmp r2, #4
1799 blt .Lcfu_not_enough
1800 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1801 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1802 strgtb r3, [r0], #1
1803 b .Lcfu_finished
1804 -ENDPROC(__copy_from_user)
1805 +ENDPROC(___copy_from_user)
1806
1807 .pushsection .fixup,"ax"
1808 .align 0
1809 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1810 index 025f742..8432b08 100644
1811 --- a/arch/arm/lib/uaccess_with_memcpy.c
1812 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1813 @@ -104,7 +104,7 @@ out:
1814 }
1815
1816 unsigned long
1817 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1818 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1819 {
1820 /*
1821 * This test is stubbed out of the main function above to keep
1822 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1823 index 518091c..eae9a76 100644
1824 --- a/arch/arm/mach-omap2/board-n8x0.c
1825 +++ b/arch/arm/mach-omap2/board-n8x0.c
1826 @@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1827 }
1828 #endif
1829
1830 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1831 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1832 .late_init = n8x0_menelaus_late_init,
1833 };
1834
1835 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1836 index 5bb4835..4760f68 100644
1837 --- a/arch/arm/mm/fault.c
1838 +++ b/arch/arm/mm/fault.c
1839 @@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1840 }
1841 #endif
1842
1843 +#ifdef CONFIG_PAX_PAGEEXEC
1844 + if (fsr & FSR_LNX_PF) {
1845 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1846 + do_group_exit(SIGKILL);
1847 + }
1848 +#endif
1849 +
1850 tsk->thread.address = addr;
1851 tsk->thread.error_code = fsr;
1852 tsk->thread.trap_no = 14;
1853 @@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1854 }
1855 #endif /* CONFIG_MMU */
1856
1857 +#ifdef CONFIG_PAX_PAGEEXEC
1858 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1859 +{
1860 + long i;
1861 +
1862 + printk(KERN_ERR "PAX: bytes at PC: ");
1863 + for (i = 0; i < 20; i++) {
1864 + unsigned char c;
1865 + if (get_user(c, (__force unsigned char __user *)pc+i))
1866 + printk(KERN_CONT "?? ");
1867 + else
1868 + printk(KERN_CONT "%02x ", c);
1869 + }
1870 + printk("\n");
1871 +
1872 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1873 + for (i = -1; i < 20; i++) {
1874 + unsigned long c;
1875 + if (get_user(c, (__force unsigned long __user *)sp+i))
1876 + printk(KERN_CONT "???????? ");
1877 + else
1878 + printk(KERN_CONT "%08lx ", c);
1879 + }
1880 + printk("\n");
1881 +}
1882 +#endif
1883 +
1884 /*
1885 * First Level Translation Fault Handler
1886 *
1887 @@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1888 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1889 struct siginfo info;
1890
1891 +#ifdef CONFIG_PAX_REFCOUNT
1892 + if (fsr_fs(ifsr) == 2) {
1893 + unsigned int bkpt;
1894 +
1895 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1896 + current->thread.error_code = ifsr;
1897 + current->thread.trap_no = 0;
1898 + pax_report_refcount_overflow(regs);
1899 + fixup_exception(regs);
1900 + return;
1901 + }
1902 + }
1903 +#endif
1904 +
1905 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1906 return;
1907
1908 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1909 index ce8cb19..3ec539d 100644
1910 --- a/arch/arm/mm/mmap.c
1911 +++ b/arch/arm/mm/mmap.c
1912 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1913 if (len > TASK_SIZE)
1914 return -ENOMEM;
1915
1916 +#ifdef CONFIG_PAX_RANDMMAP
1917 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1918 +#endif
1919 +
1920 if (addr) {
1921 if (do_align)
1922 addr = COLOUR_ALIGN(addr, pgoff);
1923 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1924 addr = PAGE_ALIGN(addr);
1925
1926 vma = find_vma(mm, addr);
1927 - if (TASK_SIZE - len >= addr &&
1928 - (!vma || addr + len <= vma->vm_start))
1929 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1930 return addr;
1931 }
1932 if (len > mm->cached_hole_size) {
1933 - start_addr = addr = mm->free_area_cache;
1934 + start_addr = addr = mm->free_area_cache;
1935 } else {
1936 - start_addr = addr = mm->mmap_base;
1937 - mm->cached_hole_size = 0;
1938 + start_addr = addr = mm->mmap_base;
1939 + mm->cached_hole_size = 0;
1940 }
1941
1942 full_search:
1943 @@ -124,14 +127,14 @@ full_search:
1944 * Start a new search - just in case we missed
1945 * some holes.
1946 */
1947 - if (start_addr != TASK_UNMAPPED_BASE) {
1948 - start_addr = addr = TASK_UNMAPPED_BASE;
1949 + if (start_addr != mm->mmap_base) {
1950 + start_addr = addr = mm->mmap_base;
1951 mm->cached_hole_size = 0;
1952 goto full_search;
1953 }
1954 return -ENOMEM;
1955 }
1956 - if (!vma || addr + len <= vma->vm_start) {
1957 + if (check_heap_stack_gap(vma, addr, len)) {
1958 /*
1959 * Remember the place where we stopped the search:
1960 */
1961 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1962
1963 if (mmap_is_legacy()) {
1964 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1965 +
1966 +#ifdef CONFIG_PAX_RANDMMAP
1967 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1968 + mm->mmap_base += mm->delta_mmap;
1969 +#endif
1970 +
1971 mm->get_unmapped_area = arch_get_unmapped_area;
1972 mm->unmap_area = arch_unmap_area;
1973 } else {
1974 mm->mmap_base = mmap_base(random_factor);
1975 +
1976 +#ifdef CONFIG_PAX_RANDMMAP
1977 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1978 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1979 +#endif
1980 +
1981 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1982 mm->unmap_area = arch_unmap_area_topdown;
1983 }
1984 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1985 index 71a6827..e7fbc23 100644
1986 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1987 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1988 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
1989 int (*started)(unsigned ch);
1990 int (*flush)(unsigned ch);
1991 int (*stop)(unsigned ch);
1992 -};
1993 +} __no_const;
1994
1995 extern void *samsung_dmadev_get_ops(void);
1996 extern void *s3c_dma_get_ops(void);
1997 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1998 index 5f28cae..3d23723 100644
1999 --- a/arch/arm/plat-samsung/include/plat/ehci.h
2000 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
2001 @@ -14,7 +14,7 @@
2002 struct s5p_ehci_platdata {
2003 int (*phy_init)(struct platform_device *pdev, int type);
2004 int (*phy_exit)(struct platform_device *pdev, int type);
2005 -};
2006 +} __no_const;
2007
2008 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2009
2010 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2011 index c3a58a1..78fbf54 100644
2012 --- a/arch/avr32/include/asm/cache.h
2013 +++ b/arch/avr32/include/asm/cache.h
2014 @@ -1,8 +1,10 @@
2015 #ifndef __ASM_AVR32_CACHE_H
2016 #define __ASM_AVR32_CACHE_H
2017
2018 +#include <linux/const.h>
2019 +
2020 #define L1_CACHE_SHIFT 5
2021 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2022 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2023
2024 /*
2025 * Memory returned by kmalloc() may be used for DMA, so we must make
2026 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2027 index 3b3159b..425ea94 100644
2028 --- a/arch/avr32/include/asm/elf.h
2029 +++ b/arch/avr32/include/asm/elf.h
2030 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2031 the loader. We need to make sure that it is out of the way of the program
2032 that it will "exec", and that there is sufficient room for the brk. */
2033
2034 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2035 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2036
2037 +#ifdef CONFIG_PAX_ASLR
2038 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2039 +
2040 +#define PAX_DELTA_MMAP_LEN 15
2041 +#define PAX_DELTA_STACK_LEN 15
2042 +#endif
2043
2044 /* This yields a mask that user programs can use to figure out what
2045 instruction set this CPU supports. This could be done in user space,
2046 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2047 index b7f5c68..556135c 100644
2048 --- a/arch/avr32/include/asm/kmap_types.h
2049 +++ b/arch/avr32/include/asm/kmap_types.h
2050 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2051 D(11) KM_IRQ1,
2052 D(12) KM_SOFTIRQ0,
2053 D(13) KM_SOFTIRQ1,
2054 -D(14) KM_TYPE_NR
2055 +D(14) KM_CLEARPAGE,
2056 +D(15) KM_TYPE_NR
2057 };
2058
2059 #undef D
2060 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2061 index f7040a1..db9f300 100644
2062 --- a/arch/avr32/mm/fault.c
2063 +++ b/arch/avr32/mm/fault.c
2064 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2065
2066 int exception_trace = 1;
2067
2068 +#ifdef CONFIG_PAX_PAGEEXEC
2069 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2070 +{
2071 + unsigned long i;
2072 +
2073 + printk(KERN_ERR "PAX: bytes at PC: ");
2074 + for (i = 0; i < 20; i++) {
2075 + unsigned char c;
2076 + if (get_user(c, (unsigned char *)pc+i))
2077 + printk(KERN_CONT "???????? ");
2078 + else
2079 + printk(KERN_CONT "%02x ", c);
2080 + }
2081 + printk("\n");
2082 +}
2083 +#endif
2084 +
2085 /*
2086 * This routine handles page faults. It determines the address and the
2087 * problem, and then passes it off to one of the appropriate routines.
2088 @@ -156,6 +173,16 @@ bad_area:
2089 up_read(&mm->mmap_sem);
2090
2091 if (user_mode(regs)) {
2092 +
2093 +#ifdef CONFIG_PAX_PAGEEXEC
2094 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2095 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2096 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2097 + do_group_exit(SIGKILL);
2098 + }
2099 + }
2100 +#endif
2101 +
2102 if (exception_trace && printk_ratelimit())
2103 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2104 "sp %08lx ecr %lu\n",
2105 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2106 index 568885a..f8008df 100644
2107 --- a/arch/blackfin/include/asm/cache.h
2108 +++ b/arch/blackfin/include/asm/cache.h
2109 @@ -7,6 +7,7 @@
2110 #ifndef __ARCH_BLACKFIN_CACHE_H
2111 #define __ARCH_BLACKFIN_CACHE_H
2112
2113 +#include <linux/const.h>
2114 #include <linux/linkage.h> /* for asmlinkage */
2115
2116 /*
2117 @@ -14,7 +15,7 @@
2118 * Blackfin loads 32 bytes for cache
2119 */
2120 #define L1_CACHE_SHIFT 5
2121 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2122 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2123 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2124
2125 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2126 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2127 index aea2718..3639a60 100644
2128 --- a/arch/cris/include/arch-v10/arch/cache.h
2129 +++ b/arch/cris/include/arch-v10/arch/cache.h
2130 @@ -1,8 +1,9 @@
2131 #ifndef _ASM_ARCH_CACHE_H
2132 #define _ASM_ARCH_CACHE_H
2133
2134 +#include <linux/const.h>
2135 /* Etrax 100LX have 32-byte cache-lines. */
2136 -#define L1_CACHE_BYTES 32
2137 #define L1_CACHE_SHIFT 5
2138 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2139
2140 #endif /* _ASM_ARCH_CACHE_H */
2141 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2142 index 1de779f..336fad3 100644
2143 --- a/arch/cris/include/arch-v32/arch/cache.h
2144 +++ b/arch/cris/include/arch-v32/arch/cache.h
2145 @@ -1,11 +1,12 @@
2146 #ifndef _ASM_CRIS_ARCH_CACHE_H
2147 #define _ASM_CRIS_ARCH_CACHE_H
2148
2149 +#include <linux/const.h>
2150 #include <arch/hwregs/dma.h>
2151
2152 /* A cache-line is 32 bytes. */
2153 -#define L1_CACHE_BYTES 32
2154 #define L1_CACHE_SHIFT 5
2155 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2156
2157 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2158
2159 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2160 index b86329d..6709906 100644
2161 --- a/arch/frv/include/asm/atomic.h
2162 +++ b/arch/frv/include/asm/atomic.h
2163 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2164 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2165 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2166
2167 +#define atomic64_read_unchecked(v) atomic64_read(v)
2168 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2169 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2170 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2171 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2172 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2173 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2174 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2175 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2176 +
2177 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2178 {
2179 int c, old;
2180 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2181 index 2797163..c2a401d 100644
2182 --- a/arch/frv/include/asm/cache.h
2183 +++ b/arch/frv/include/asm/cache.h
2184 @@ -12,10 +12,11 @@
2185 #ifndef __ASM_CACHE_H
2186 #define __ASM_CACHE_H
2187
2188 +#include <linux/const.h>
2189
2190 /* bytes per L1 cache line */
2191 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2192 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2193 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2194
2195 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2196 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2197 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2198 index f8e16b2..c73ff79 100644
2199 --- a/arch/frv/include/asm/kmap_types.h
2200 +++ b/arch/frv/include/asm/kmap_types.h
2201 @@ -23,6 +23,7 @@ enum km_type {
2202 KM_IRQ1,
2203 KM_SOFTIRQ0,
2204 KM_SOFTIRQ1,
2205 + KM_CLEARPAGE,
2206 KM_TYPE_NR
2207 };
2208
2209 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2210 index 385fd30..6c3d97e 100644
2211 --- a/arch/frv/mm/elf-fdpic.c
2212 +++ b/arch/frv/mm/elf-fdpic.c
2213 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2214 if (addr) {
2215 addr = PAGE_ALIGN(addr);
2216 vma = find_vma(current->mm, addr);
2217 - if (TASK_SIZE - len >= addr &&
2218 - (!vma || addr + len <= vma->vm_start))
2219 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2220 goto success;
2221 }
2222
2223 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2224 for (; vma; vma = vma->vm_next) {
2225 if (addr > limit)
2226 break;
2227 - if (addr + len <= vma->vm_start)
2228 + if (check_heap_stack_gap(vma, addr, len))
2229 goto success;
2230 addr = vma->vm_end;
2231 }
2232 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2233 for (; vma; vma = vma->vm_next) {
2234 if (addr > limit)
2235 break;
2236 - if (addr + len <= vma->vm_start)
2237 + if (check_heap_stack_gap(vma, addr, len))
2238 goto success;
2239 addr = vma->vm_end;
2240 }
2241 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2242 index c635028..6d9445a 100644
2243 --- a/arch/h8300/include/asm/cache.h
2244 +++ b/arch/h8300/include/asm/cache.h
2245 @@ -1,8 +1,10 @@
2246 #ifndef __ARCH_H8300_CACHE_H
2247 #define __ARCH_H8300_CACHE_H
2248
2249 +#include <linux/const.h>
2250 +
2251 /* bytes per L1 cache line */
2252 -#define L1_CACHE_BYTES 4
2253 +#define L1_CACHE_BYTES _AC(4,UL)
2254
2255 /* m68k-elf-gcc 2.95.2 doesn't like these */
2256
2257 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2258 index 0f01de2..d37d309 100644
2259 --- a/arch/hexagon/include/asm/cache.h
2260 +++ b/arch/hexagon/include/asm/cache.h
2261 @@ -21,9 +21,11 @@
2262 #ifndef __ASM_CACHE_H
2263 #define __ASM_CACHE_H
2264
2265 +#include <linux/const.h>
2266 +
2267 /* Bytes per L1 cache line */
2268 -#define L1_CACHE_SHIFT (5)
2269 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2270 +#define L1_CACHE_SHIFT 5
2271 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2272
2273 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2274 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2275 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2276 index 7d91166..88ab87e 100644
2277 --- a/arch/ia64/include/asm/atomic.h
2278 +++ b/arch/ia64/include/asm/atomic.h
2279 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2280 #define atomic64_inc(v) atomic64_add(1, (v))
2281 #define atomic64_dec(v) atomic64_sub(1, (v))
2282
2283 +#define atomic64_read_unchecked(v) atomic64_read(v)
2284 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2285 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2286 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2287 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2288 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2289 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2290 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2291 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2292 +
2293 /* Atomic operations are already serializing */
2294 #define smp_mb__before_atomic_dec() barrier()
2295 #define smp_mb__after_atomic_dec() barrier()
2296 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2297 index 988254a..e1ee885 100644
2298 --- a/arch/ia64/include/asm/cache.h
2299 +++ b/arch/ia64/include/asm/cache.h
2300 @@ -1,6 +1,7 @@
2301 #ifndef _ASM_IA64_CACHE_H
2302 #define _ASM_IA64_CACHE_H
2303
2304 +#include <linux/const.h>
2305
2306 /*
2307 * Copyright (C) 1998-2000 Hewlett-Packard Co
2308 @@ -9,7 +10,7 @@
2309
2310 /* Bytes per L1 (data) cache line. */
2311 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2312 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2313 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2314
2315 #ifdef CONFIG_SMP
2316 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2317 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2318 index b5298eb..67c6e62 100644
2319 --- a/arch/ia64/include/asm/elf.h
2320 +++ b/arch/ia64/include/asm/elf.h
2321 @@ -42,6 +42,13 @@
2322 */
2323 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2324
2325 +#ifdef CONFIG_PAX_ASLR
2326 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2327 +
2328 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2329 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2330 +#endif
2331 +
2332 #define PT_IA_64_UNWIND 0x70000001
2333
2334 /* IA-64 relocations: */
2335 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2336 index 96a8d92..617a1cf 100644
2337 --- a/arch/ia64/include/asm/pgalloc.h
2338 +++ b/arch/ia64/include/asm/pgalloc.h
2339 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2340 pgd_val(*pgd_entry) = __pa(pud);
2341 }
2342
2343 +static inline void
2344 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2345 +{
2346 + pgd_populate(mm, pgd_entry, pud);
2347 +}
2348 +
2349 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2350 {
2351 return quicklist_alloc(0, GFP_KERNEL, NULL);
2352 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2353 pud_val(*pud_entry) = __pa(pmd);
2354 }
2355
2356 +static inline void
2357 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2358 +{
2359 + pud_populate(mm, pud_entry, pmd);
2360 +}
2361 +
2362 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2363 {
2364 return quicklist_alloc(0, GFP_KERNEL, NULL);
2365 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2366 index 815810c..d60bd4c 100644
2367 --- a/arch/ia64/include/asm/pgtable.h
2368 +++ b/arch/ia64/include/asm/pgtable.h
2369 @@ -12,7 +12,7 @@
2370 * David Mosberger-Tang <davidm@hpl.hp.com>
2371 */
2372
2373 -
2374 +#include <linux/const.h>
2375 #include <asm/mman.h>
2376 #include <asm/page.h>
2377 #include <asm/processor.h>
2378 @@ -142,6 +142,17 @@
2379 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2380 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2381 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2382 +
2383 +#ifdef CONFIG_PAX_PAGEEXEC
2384 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2385 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2386 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2387 +#else
2388 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2389 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2390 +# define PAGE_COPY_NOEXEC PAGE_COPY
2391 +#endif
2392 +
2393 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2394 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2395 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2396 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2397 index 54ff557..70c88b7 100644
2398 --- a/arch/ia64/include/asm/spinlock.h
2399 +++ b/arch/ia64/include/asm/spinlock.h
2400 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2401 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2402
2403 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2404 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2405 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2406 }
2407
2408 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2409 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2410 index 449c8c0..432a3d2 100644
2411 --- a/arch/ia64/include/asm/uaccess.h
2412 +++ b/arch/ia64/include/asm/uaccess.h
2413 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2414 const void *__cu_from = (from); \
2415 long __cu_len = (n); \
2416 \
2417 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2418 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2419 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2420 __cu_len; \
2421 })
2422 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2423 long __cu_len = (n); \
2424 \
2425 __chk_user_ptr(__cu_from); \
2426 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2427 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2428 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2429 __cu_len; \
2430 })
2431 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2432 index 24603be..948052d 100644
2433 --- a/arch/ia64/kernel/module.c
2434 +++ b/arch/ia64/kernel/module.c
2435 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2436 void
2437 module_free (struct module *mod, void *module_region)
2438 {
2439 - if (mod && mod->arch.init_unw_table &&
2440 - module_region == mod->module_init) {
2441 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2442 unw_remove_unwind_table(mod->arch.init_unw_table);
2443 mod->arch.init_unw_table = NULL;
2444 }
2445 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2446 }
2447
2448 static inline int
2449 +in_init_rx (const struct module *mod, uint64_t addr)
2450 +{
2451 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2452 +}
2453 +
2454 +static inline int
2455 +in_init_rw (const struct module *mod, uint64_t addr)
2456 +{
2457 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2458 +}
2459 +
2460 +static inline int
2461 in_init (const struct module *mod, uint64_t addr)
2462 {
2463 - return addr - (uint64_t) mod->module_init < mod->init_size;
2464 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2465 +}
2466 +
2467 +static inline int
2468 +in_core_rx (const struct module *mod, uint64_t addr)
2469 +{
2470 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2471 +}
2472 +
2473 +static inline int
2474 +in_core_rw (const struct module *mod, uint64_t addr)
2475 +{
2476 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2477 }
2478
2479 static inline int
2480 in_core (const struct module *mod, uint64_t addr)
2481 {
2482 - return addr - (uint64_t) mod->module_core < mod->core_size;
2483 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2484 }
2485
2486 static inline int
2487 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2488 break;
2489
2490 case RV_BDREL:
2491 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2492 + if (in_init_rx(mod, val))
2493 + val -= (uint64_t) mod->module_init_rx;
2494 + else if (in_init_rw(mod, val))
2495 + val -= (uint64_t) mod->module_init_rw;
2496 + else if (in_core_rx(mod, val))
2497 + val -= (uint64_t) mod->module_core_rx;
2498 + else if (in_core_rw(mod, val))
2499 + val -= (uint64_t) mod->module_core_rw;
2500 break;
2501
2502 case RV_LTV:
2503 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2504 * addresses have been selected...
2505 */
2506 uint64_t gp;
2507 - if (mod->core_size > MAX_LTOFF)
2508 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2509 /*
2510 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2511 * at the end of the module.
2512 */
2513 - gp = mod->core_size - MAX_LTOFF / 2;
2514 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2515 else
2516 - gp = mod->core_size / 2;
2517 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2518 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2519 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2520 mod->arch.gp = gp;
2521 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2522 }
2523 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2524 index 609d500..7dde2a8 100644
2525 --- a/arch/ia64/kernel/sys_ia64.c
2526 +++ b/arch/ia64/kernel/sys_ia64.c
2527 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2528 if (REGION_NUMBER(addr) == RGN_HPAGE)
2529 addr = 0;
2530 #endif
2531 +
2532 +#ifdef CONFIG_PAX_RANDMMAP
2533 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2534 + addr = mm->free_area_cache;
2535 + else
2536 +#endif
2537 +
2538 if (!addr)
2539 addr = mm->free_area_cache;
2540
2541 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2542 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2543 /* At this point: (!vma || addr < vma->vm_end). */
2544 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2545 - if (start_addr != TASK_UNMAPPED_BASE) {
2546 + if (start_addr != mm->mmap_base) {
2547 /* Start a new search --- just in case we missed some holes. */
2548 - addr = TASK_UNMAPPED_BASE;
2549 + addr = mm->mmap_base;
2550 goto full_search;
2551 }
2552 return -ENOMEM;
2553 }
2554 - if (!vma || addr + len <= vma->vm_start) {
2555 + if (check_heap_stack_gap(vma, addr, len)) {
2556 /* Remember the address where we stopped this search: */
2557 mm->free_area_cache = addr + len;
2558 return addr;
2559 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2560 index 0ccb28f..8992469 100644
2561 --- a/arch/ia64/kernel/vmlinux.lds.S
2562 +++ b/arch/ia64/kernel/vmlinux.lds.S
2563 @@ -198,7 +198,7 @@ SECTIONS {
2564 /* Per-cpu data: */
2565 . = ALIGN(PERCPU_PAGE_SIZE);
2566 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2567 - __phys_per_cpu_start = __per_cpu_load;
2568 + __phys_per_cpu_start = per_cpu_load;
2569 /*
2570 * ensure percpu data fits
2571 * into percpu page size
2572 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2573 index 02d29c2..ea893df 100644
2574 --- a/arch/ia64/mm/fault.c
2575 +++ b/arch/ia64/mm/fault.c
2576 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2577 return pte_present(pte);
2578 }
2579
2580 +#ifdef CONFIG_PAX_PAGEEXEC
2581 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2582 +{
2583 + unsigned long i;
2584 +
2585 + printk(KERN_ERR "PAX: bytes at PC: ");
2586 + for (i = 0; i < 8; i++) {
2587 + unsigned int c;
2588 + if (get_user(c, (unsigned int *)pc+i))
2589 + printk(KERN_CONT "???????? ");
2590 + else
2591 + printk(KERN_CONT "%08x ", c);
2592 + }
2593 + printk("\n");
2594 +}
2595 +#endif
2596 +
2597 void __kprobes
2598 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2599 {
2600 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2601 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2602 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2603
2604 - if ((vma->vm_flags & mask) != mask)
2605 + if ((vma->vm_flags & mask) != mask) {
2606 +
2607 +#ifdef CONFIG_PAX_PAGEEXEC
2608 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2609 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2610 + goto bad_area;
2611 +
2612 + up_read(&mm->mmap_sem);
2613 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2614 + do_group_exit(SIGKILL);
2615 + }
2616 +#endif
2617 +
2618 goto bad_area;
2619
2620 + }
2621 +
2622 /*
2623 * If for any reason at all we couldn't handle the fault, make
2624 * sure we exit gracefully rather than endlessly redo the
2625 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2626 index 5ca674b..e0e1b70 100644
2627 --- a/arch/ia64/mm/hugetlbpage.c
2628 +++ b/arch/ia64/mm/hugetlbpage.c
2629 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2630 /* At this point: (!vmm || addr < vmm->vm_end). */
2631 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2632 return -ENOMEM;
2633 - if (!vmm || (addr + len) <= vmm->vm_start)
2634 + if (check_heap_stack_gap(vmm, addr, len))
2635 return addr;
2636 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2637 }
2638 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2639 index 0eab454..bd794f2 100644
2640 --- a/arch/ia64/mm/init.c
2641 +++ b/arch/ia64/mm/init.c
2642 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2643 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2644 vma->vm_end = vma->vm_start + PAGE_SIZE;
2645 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2646 +
2647 +#ifdef CONFIG_PAX_PAGEEXEC
2648 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2649 + vma->vm_flags &= ~VM_EXEC;
2650 +
2651 +#ifdef CONFIG_PAX_MPROTECT
2652 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2653 + vma->vm_flags &= ~VM_MAYEXEC;
2654 +#endif
2655 +
2656 + }
2657 +#endif
2658 +
2659 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2660 down_write(&current->mm->mmap_sem);
2661 if (insert_vm_struct(current->mm, vma)) {
2662 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2663 index 40b3ee9..8c2c112 100644
2664 --- a/arch/m32r/include/asm/cache.h
2665 +++ b/arch/m32r/include/asm/cache.h
2666 @@ -1,8 +1,10 @@
2667 #ifndef _ASM_M32R_CACHE_H
2668 #define _ASM_M32R_CACHE_H
2669
2670 +#include <linux/const.h>
2671 +
2672 /* L1 cache line size */
2673 #define L1_CACHE_SHIFT 4
2674 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2675 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2676
2677 #endif /* _ASM_M32R_CACHE_H */
2678 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2679 index 82abd15..d95ae5d 100644
2680 --- a/arch/m32r/lib/usercopy.c
2681 +++ b/arch/m32r/lib/usercopy.c
2682 @@ -14,6 +14,9 @@
2683 unsigned long
2684 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2685 {
2686 + if ((long)n < 0)
2687 + return n;
2688 +
2689 prefetch(from);
2690 if (access_ok(VERIFY_WRITE, to, n))
2691 __copy_user(to,from,n);
2692 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2693 unsigned long
2694 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2695 {
2696 + if ((long)n < 0)
2697 + return n;
2698 +
2699 prefetchw(to);
2700 if (access_ok(VERIFY_READ, from, n))
2701 __copy_user_zeroing(to,from,n);
2702 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2703 index 0395c51..5f26031 100644
2704 --- a/arch/m68k/include/asm/cache.h
2705 +++ b/arch/m68k/include/asm/cache.h
2706 @@ -4,9 +4,11 @@
2707 #ifndef __ARCH_M68K_CACHE_H
2708 #define __ARCH_M68K_CACHE_H
2709
2710 +#include <linux/const.h>
2711 +
2712 /* bytes per L1 cache line */
2713 #define L1_CACHE_SHIFT 4
2714 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2715 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2716
2717 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2718
2719 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2720 index 4efe96a..60e8699 100644
2721 --- a/arch/microblaze/include/asm/cache.h
2722 +++ b/arch/microblaze/include/asm/cache.h
2723 @@ -13,11 +13,12 @@
2724 #ifndef _ASM_MICROBLAZE_CACHE_H
2725 #define _ASM_MICROBLAZE_CACHE_H
2726
2727 +#include <linux/const.h>
2728 #include <asm/registers.h>
2729
2730 #define L1_CACHE_SHIFT 5
2731 /* word-granular cache in microblaze */
2732 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2733 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2734
2735 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2736
2737 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2738 index 3f4c5cb..3439c6e 100644
2739 --- a/arch/mips/include/asm/atomic.h
2740 +++ b/arch/mips/include/asm/atomic.h
2741 @@ -21,6 +21,10 @@
2742 #include <asm/cmpxchg.h>
2743 #include <asm/war.h>
2744
2745 +#ifdef CONFIG_GENERIC_ATOMIC64
2746 +#include <asm-generic/atomic64.h>
2747 +#endif
2748 +
2749 #define ATOMIC_INIT(i) { (i) }
2750
2751 /*
2752 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2753 */
2754 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2755
2756 +#define atomic64_read_unchecked(v) atomic64_read(v)
2757 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2758 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2759 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2760 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2761 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2762 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2763 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2764 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2765 +
2766 #endif /* CONFIG_64BIT */
2767
2768 /*
2769 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2770 index b4db69f..8f3b093 100644
2771 --- a/arch/mips/include/asm/cache.h
2772 +++ b/arch/mips/include/asm/cache.h
2773 @@ -9,10 +9,11 @@
2774 #ifndef _ASM_CACHE_H
2775 #define _ASM_CACHE_H
2776
2777 +#include <linux/const.h>
2778 #include <kmalloc.h>
2779
2780 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2781 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2782 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2783
2784 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2785 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2786 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2787 index 455c0ac..ad65fbe 100644
2788 --- a/arch/mips/include/asm/elf.h
2789 +++ b/arch/mips/include/asm/elf.h
2790 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2791 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2792 #endif
2793
2794 +#ifdef CONFIG_PAX_ASLR
2795 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2796 +
2797 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2798 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2799 +#endif
2800 +
2801 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2802 struct linux_binprm;
2803 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2804 int uses_interp);
2805
2806 -struct mm_struct;
2807 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2808 -#define arch_randomize_brk arch_randomize_brk
2809 -
2810 #endif /* _ASM_ELF_H */
2811 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2812 index c1f6afa..38cc6e9 100644
2813 --- a/arch/mips/include/asm/exec.h
2814 +++ b/arch/mips/include/asm/exec.h
2815 @@ -12,6 +12,6 @@
2816 #ifndef _ASM_EXEC_H
2817 #define _ASM_EXEC_H
2818
2819 -extern unsigned long arch_align_stack(unsigned long sp);
2820 +#define arch_align_stack(x) ((x) & ~0xfUL)
2821
2822 #endif /* _ASM_EXEC_H */
2823 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2824 index da9bd7d..91aa7ab 100644
2825 --- a/arch/mips/include/asm/page.h
2826 +++ b/arch/mips/include/asm/page.h
2827 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2828 #ifdef CONFIG_CPU_MIPS32
2829 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2830 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2831 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2832 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2833 #else
2834 typedef struct { unsigned long long pte; } pte_t;
2835 #define pte_val(x) ((x).pte)
2836 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2837 index 881d18b..cea38bc 100644
2838 --- a/arch/mips/include/asm/pgalloc.h
2839 +++ b/arch/mips/include/asm/pgalloc.h
2840 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2841 {
2842 set_pud(pud, __pud((unsigned long)pmd));
2843 }
2844 +
2845 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2846 +{
2847 + pud_populate(mm, pud, pmd);
2848 +}
2849 #endif
2850
2851 /*
2852 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2853 index 0d85d8e..ec71487 100644
2854 --- a/arch/mips/include/asm/thread_info.h
2855 +++ b/arch/mips/include/asm/thread_info.h
2856 @@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2857 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2858 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2859 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2860 +/* li takes a 32bit immediate */
2861 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2862 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2863
2864 #ifdef CONFIG_MIPS32_O32
2865 @@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2866 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2867 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2868 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2869 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2870 +
2871 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2872
2873 /* work to do in syscall_trace_leave() */
2874 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2875 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2876
2877 /* work to do on interrupt/exception return */
2878 #define _TIF_WORK_MASK (0x0000ffef & \
2879 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2880 /* work to do on any return to u-space */
2881 -#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2882 +#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2883
2884 #endif /* __KERNEL__ */
2885
2886 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2887 index 9fdd8bc..4bd7f1a 100644
2888 --- a/arch/mips/kernel/binfmt_elfn32.c
2889 +++ b/arch/mips/kernel/binfmt_elfn32.c
2890 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2891 #undef ELF_ET_DYN_BASE
2892 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2893
2894 +#ifdef CONFIG_PAX_ASLR
2895 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2896 +
2897 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2898 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2899 +#endif
2900 +
2901 #include <asm/processor.h>
2902 #include <linux/module.h>
2903 #include <linux/elfcore.h>
2904 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2905 index ff44823..97f8906 100644
2906 --- a/arch/mips/kernel/binfmt_elfo32.c
2907 +++ b/arch/mips/kernel/binfmt_elfo32.c
2908 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2909 #undef ELF_ET_DYN_BASE
2910 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2911
2912 +#ifdef CONFIG_PAX_ASLR
2913 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2914 +
2915 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2916 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2917 +#endif
2918 +
2919 #include <asm/processor.h>
2920
2921 /*
2922 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2923 index e9a5fd7..378809a 100644
2924 --- a/arch/mips/kernel/process.c
2925 +++ b/arch/mips/kernel/process.c
2926 @@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2927 out:
2928 return pc;
2929 }
2930 -
2931 -/*
2932 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2933 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2934 - */
2935 -unsigned long arch_align_stack(unsigned long sp)
2936 -{
2937 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2938 - sp -= get_random_int() & ~PAGE_MASK;
2939 -
2940 - return sp & ALMASK;
2941 -}
2942 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2943 index 7c24c29..e2f1981 100644
2944 --- a/arch/mips/kernel/ptrace.c
2945 +++ b/arch/mips/kernel/ptrace.c
2946 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
2947 return arch;
2948 }
2949
2950 +#ifdef CONFIG_GRKERNSEC_SETXID
2951 +extern void gr_delayed_cred_worker(void);
2952 +#endif
2953 +
2954 /*
2955 * Notification of system call entry/exit
2956 * - triggered by current->work.syscall_trace
2957 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
2958 /* do the secure computing check first */
2959 secure_computing(regs->regs[2]);
2960
2961 +#ifdef CONFIG_GRKERNSEC_SETXID
2962 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2963 + gr_delayed_cred_worker();
2964 +#endif
2965 +
2966 if (!(current->ptrace & PT_PTRACED))
2967 goto out;
2968
2969 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
2970 index a632bc1..0b77c7c 100644
2971 --- a/arch/mips/kernel/scall32-o32.S
2972 +++ b/arch/mips/kernel/scall32-o32.S
2973 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
2974
2975 stack_done:
2976 lw t0, TI_FLAGS($28) # syscall tracing enabled?
2977 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2978 + li t1, _TIF_SYSCALL_WORK
2979 and t0, t1
2980 bnez t0, syscall_trace_entry # -> yes
2981
2982 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
2983 index 3b5a5e9..e1ee86d 100644
2984 --- a/arch/mips/kernel/scall64-64.S
2985 +++ b/arch/mips/kernel/scall64-64.S
2986 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
2987
2988 sd a3, PT_R26(sp) # save a3 for syscall restarting
2989
2990 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2991 + li t1, _TIF_SYSCALL_WORK
2992 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
2993 and t0, t1, t0
2994 bnez t0, syscall_trace_entry
2995 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
2996 index 6be6f70..1859577 100644
2997 --- a/arch/mips/kernel/scall64-n32.S
2998 +++ b/arch/mips/kernel/scall64-n32.S
2999 @@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3000
3001 sd a3, PT_R26(sp) # save a3 for syscall restarting
3002
3003 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3004 + li t1, _TIF_SYSCALL_WORK
3005 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3006 and t0, t1, t0
3007 bnez t0, n32_syscall_trace_entry
3008 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3009 index 5422855..74e63a3 100644
3010 --- a/arch/mips/kernel/scall64-o32.S
3011 +++ b/arch/mips/kernel/scall64-o32.S
3012 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3013 PTR 4b, bad_stack
3014 .previous
3015
3016 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3017 + li t1, _TIF_SYSCALL_WORK
3018 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3019 and t0, t1, t0
3020 bnez t0, trace_a_syscall
3021 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3022 index c14f6df..537e729 100644
3023 --- a/arch/mips/mm/fault.c
3024 +++ b/arch/mips/mm/fault.c
3025 @@ -27,6 +27,23 @@
3026 #include <asm/highmem.h> /* For VMALLOC_END */
3027 #include <linux/kdebug.h>
3028
3029 +#ifdef CONFIG_PAX_PAGEEXEC
3030 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3031 +{
3032 + unsigned long i;
3033 +
3034 + printk(KERN_ERR "PAX: bytes at PC: ");
3035 + for (i = 0; i < 5; i++) {
3036 + unsigned int c;
3037 + if (get_user(c, (unsigned int *)pc+i))
3038 + printk(KERN_CONT "???????? ");
3039 + else
3040 + printk(KERN_CONT "%08x ", c);
3041 + }
3042 + printk("\n");
3043 +}
3044 +#endif
3045 +
3046 /*
3047 * This routine handles page faults. It determines the address,
3048 * and the problem, and then passes it off to one of the appropriate
3049 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3050 index 302d779..7d35bf8 100644
3051 --- a/arch/mips/mm/mmap.c
3052 +++ b/arch/mips/mm/mmap.c
3053 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3054 do_color_align = 1;
3055
3056 /* requesting a specific address */
3057 +
3058 +#ifdef CONFIG_PAX_RANDMMAP
3059 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3060 +#endif
3061 +
3062 if (addr) {
3063 if (do_color_align)
3064 addr = COLOUR_ALIGN(addr, pgoff);
3065 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3066 addr = PAGE_ALIGN(addr);
3067
3068 vma = find_vma(mm, addr);
3069 - if (TASK_SIZE - len >= addr &&
3070 - (!vma || addr + len <= vma->vm_start))
3071 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3072 return addr;
3073 }
3074
3075 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3076 /* At this point: (!vma || addr < vma->vm_end). */
3077 if (TASK_SIZE - len < addr)
3078 return -ENOMEM;
3079 - if (!vma || addr + len <= vma->vm_start)
3080 + if (check_heap_stack_gap(vmm, addr, len))
3081 return addr;
3082 addr = vma->vm_end;
3083 if (do_color_align)
3084 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3085 /* make sure it can fit in the remaining address space */
3086 if (likely(addr > len)) {
3087 vma = find_vma(mm, addr - len);
3088 - if (!vma || addr <= vma->vm_start) {
3089 + if (check_heap_stack_gap(vmm, addr - len, len))
3090 /* cache the address as a hint for next time */
3091 return mm->free_area_cache = addr - len;
3092 }
3093 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3094 * return with success:
3095 */
3096 vma = find_vma(mm, addr);
3097 - if (likely(!vma || addr + len <= vma->vm_start)) {
3098 + if (check_heap_stack_gap(vmm, addr, len)) {
3099 /* cache the address as a hint for next time */
3100 return mm->free_area_cache = addr;
3101 }
3102 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3103 mm->unmap_area = arch_unmap_area_topdown;
3104 }
3105 }
3106 -
3107 -static inline unsigned long brk_rnd(void)
3108 -{
3109 - unsigned long rnd = get_random_int();
3110 -
3111 - rnd = rnd << PAGE_SHIFT;
3112 - /* 8MB for 32bit, 256MB for 64bit */
3113 - if (TASK_IS_32BIT_ADDR)
3114 - rnd = rnd & 0x7ffffful;
3115 - else
3116 - rnd = rnd & 0xffffffful;
3117 -
3118 - return rnd;
3119 -}
3120 -
3121 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3122 -{
3123 - unsigned long base = mm->brk;
3124 - unsigned long ret;
3125 -
3126 - ret = PAGE_ALIGN(base + brk_rnd());
3127 -
3128 - if (ret < mm->brk)
3129 - return mm->brk;
3130 -
3131 - return ret;
3132 -}
3133 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3134 index 967d144..db12197 100644
3135 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3136 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3137 @@ -11,12 +11,14 @@
3138 #ifndef _ASM_PROC_CACHE_H
3139 #define _ASM_PROC_CACHE_H
3140
3141 +#include <linux/const.h>
3142 +
3143 /* L1 cache */
3144
3145 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3146 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3147 -#define L1_CACHE_BYTES 16 /* bytes per entry */
3148 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3149 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3150 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3151
3152 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3153 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3154 index bcb5df2..84fabd2 100644
3155 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3156 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3157 @@ -16,13 +16,15 @@
3158 #ifndef _ASM_PROC_CACHE_H
3159 #define _ASM_PROC_CACHE_H
3160
3161 +#include <linux/const.h>
3162 +
3163 /*
3164 * L1 cache
3165 */
3166 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3167 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3168 -#define L1_CACHE_BYTES 32 /* bytes per entry */
3169 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3170 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3171 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3172
3173 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3174 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3175 index 4ce7a01..449202a 100644
3176 --- a/arch/openrisc/include/asm/cache.h
3177 +++ b/arch/openrisc/include/asm/cache.h
3178 @@ -19,11 +19,13 @@
3179 #ifndef __ASM_OPENRISC_CACHE_H
3180 #define __ASM_OPENRISC_CACHE_H
3181
3182 +#include <linux/const.h>
3183 +
3184 /* FIXME: How can we replace these with values from the CPU...
3185 * they shouldn't be hard-coded!
3186 */
3187
3188 -#define L1_CACHE_BYTES 16
3189 #define L1_CACHE_SHIFT 4
3190 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3191
3192 #endif /* __ASM_OPENRISC_CACHE_H */
3193 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3194 index 6c6defc..d30653d 100644
3195 --- a/arch/parisc/include/asm/atomic.h
3196 +++ b/arch/parisc/include/asm/atomic.h
3197 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3198
3199 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3200
3201 +#define atomic64_read_unchecked(v) atomic64_read(v)
3202 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3203 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3204 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3205 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3206 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3207 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3208 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3209 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3210 +
3211 #endif /* !CONFIG_64BIT */
3212
3213
3214 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3215 index 47f11c7..3420df2 100644
3216 --- a/arch/parisc/include/asm/cache.h
3217 +++ b/arch/parisc/include/asm/cache.h
3218 @@ -5,6 +5,7 @@
3219 #ifndef __ARCH_PARISC_CACHE_H
3220 #define __ARCH_PARISC_CACHE_H
3221
3222 +#include <linux/const.h>
3223
3224 /*
3225 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3226 @@ -15,13 +16,13 @@
3227 * just ruin performance.
3228 */
3229 #ifdef CONFIG_PA20
3230 -#define L1_CACHE_BYTES 64
3231 #define L1_CACHE_SHIFT 6
3232 #else
3233 -#define L1_CACHE_BYTES 32
3234 #define L1_CACHE_SHIFT 5
3235 #endif
3236
3237 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3238 +
3239 #ifndef __ASSEMBLY__
3240
3241 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3242 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3243 index 19f6cb1..6c78cf2 100644
3244 --- a/arch/parisc/include/asm/elf.h
3245 +++ b/arch/parisc/include/asm/elf.h
3246 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3247
3248 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3249
3250 +#ifdef CONFIG_PAX_ASLR
3251 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3252 +
3253 +#define PAX_DELTA_MMAP_LEN 16
3254 +#define PAX_DELTA_STACK_LEN 16
3255 +#endif
3256 +
3257 /* This yields a mask that user programs can use to figure out what
3258 instruction set this CPU supports. This could be done in user space,
3259 but it's not easy, and we've already done it here. */
3260 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3261 index fc987a1..6e068ef 100644
3262 --- a/arch/parisc/include/asm/pgalloc.h
3263 +++ b/arch/parisc/include/asm/pgalloc.h
3264 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3265 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3266 }
3267
3268 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3269 +{
3270 + pgd_populate(mm, pgd, pmd);
3271 +}
3272 +
3273 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3274 {
3275 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3276 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3277 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3278 #define pmd_free(mm, x) do { } while (0)
3279 #define pgd_populate(mm, pmd, pte) BUG()
3280 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3281
3282 #endif
3283
3284 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3285 index ee99f23..802b0a1 100644
3286 --- a/arch/parisc/include/asm/pgtable.h
3287 +++ b/arch/parisc/include/asm/pgtable.h
3288 @@ -212,6 +212,17 @@ struct vm_area_struct;
3289 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3290 #define PAGE_COPY PAGE_EXECREAD
3291 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3292 +
3293 +#ifdef CONFIG_PAX_PAGEEXEC
3294 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3295 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3296 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3297 +#else
3298 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3299 +# define PAGE_COPY_NOEXEC PAGE_COPY
3300 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3301 +#endif
3302 +
3303 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3304 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3305 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3306 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3307 index 5e34ccf..672bc9c 100644
3308 --- a/arch/parisc/kernel/module.c
3309 +++ b/arch/parisc/kernel/module.c
3310 @@ -98,16 +98,38 @@
3311
3312 /* three functions to determine where in the module core
3313 * or init pieces the location is */
3314 +static inline int in_init_rx(struct module *me, void *loc)
3315 +{
3316 + return (loc >= me->module_init_rx &&
3317 + loc < (me->module_init_rx + me->init_size_rx));
3318 +}
3319 +
3320 +static inline int in_init_rw(struct module *me, void *loc)
3321 +{
3322 + return (loc >= me->module_init_rw &&
3323 + loc < (me->module_init_rw + me->init_size_rw));
3324 +}
3325 +
3326 static inline int in_init(struct module *me, void *loc)
3327 {
3328 - return (loc >= me->module_init &&
3329 - loc <= (me->module_init + me->init_size));
3330 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3331 +}
3332 +
3333 +static inline int in_core_rx(struct module *me, void *loc)
3334 +{
3335 + return (loc >= me->module_core_rx &&
3336 + loc < (me->module_core_rx + me->core_size_rx));
3337 +}
3338 +
3339 +static inline int in_core_rw(struct module *me, void *loc)
3340 +{
3341 + return (loc >= me->module_core_rw &&
3342 + loc < (me->module_core_rw + me->core_size_rw));
3343 }
3344
3345 static inline int in_core(struct module *me, void *loc)
3346 {
3347 - return (loc >= me->module_core &&
3348 - loc <= (me->module_core + me->core_size));
3349 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3350 }
3351
3352 static inline int in_local(struct module *me, void *loc)
3353 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3354 }
3355
3356 /* align things a bit */
3357 - me->core_size = ALIGN(me->core_size, 16);
3358 - me->arch.got_offset = me->core_size;
3359 - me->core_size += gots * sizeof(struct got_entry);
3360 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3361 + me->arch.got_offset = me->core_size_rw;
3362 + me->core_size_rw += gots * sizeof(struct got_entry);
3363
3364 - me->core_size = ALIGN(me->core_size, 16);
3365 - me->arch.fdesc_offset = me->core_size;
3366 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3367 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3368 + me->arch.fdesc_offset = me->core_size_rw;
3369 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3370
3371 me->arch.got_max = gots;
3372 me->arch.fdesc_max = fdescs;
3373 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3374
3375 BUG_ON(value == 0);
3376
3377 - got = me->module_core + me->arch.got_offset;
3378 + got = me->module_core_rw + me->arch.got_offset;
3379 for (i = 0; got[i].addr; i++)
3380 if (got[i].addr == value)
3381 goto out;
3382 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3383 #ifdef CONFIG_64BIT
3384 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3385 {
3386 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3387 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3388
3389 if (!value) {
3390 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3391 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3392
3393 /* Create new one */
3394 fdesc->addr = value;
3395 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3396 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3397 return (Elf_Addr)fdesc;
3398 }
3399 #endif /* CONFIG_64BIT */
3400 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3401
3402 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3403 end = table + sechdrs[me->arch.unwind_section].sh_size;
3404 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3405 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3406
3407 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3408 me->arch.unwind_section, table, end, gp);
3409 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3410 index c9b9322..02d8940 100644
3411 --- a/arch/parisc/kernel/sys_parisc.c
3412 +++ b/arch/parisc/kernel/sys_parisc.c
3413 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3414 /* At this point: (!vma || addr < vma->vm_end). */
3415 if (TASK_SIZE - len < addr)
3416 return -ENOMEM;
3417 - if (!vma || addr + len <= vma->vm_start)
3418 + if (check_heap_stack_gap(vma, addr, len))
3419 return addr;
3420 addr = vma->vm_end;
3421 }
3422 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3423 /* At this point: (!vma || addr < vma->vm_end). */
3424 if (TASK_SIZE - len < addr)
3425 return -ENOMEM;
3426 - if (!vma || addr + len <= vma->vm_start)
3427 + if (check_heap_stack_gap(vma, addr, len))
3428 return addr;
3429 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3430 if (addr < vma->vm_end) /* handle wraparound */
3431 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3432 if (flags & MAP_FIXED)
3433 return addr;
3434 if (!addr)
3435 - addr = TASK_UNMAPPED_BASE;
3436 + addr = current->mm->mmap_base;
3437
3438 if (filp) {
3439 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3440 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3441 index 45ba99f..8e22c33 100644
3442 --- a/arch/parisc/kernel/traps.c
3443 +++ b/arch/parisc/kernel/traps.c
3444 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3445
3446 down_read(&current->mm->mmap_sem);
3447 vma = find_vma(current->mm,regs->iaoq[0]);
3448 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3449 - && (vma->vm_flags & VM_EXEC)) {
3450 -
3451 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3452 fault_address = regs->iaoq[0];
3453 fault_space = regs->iasq[0];
3454
3455 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3456 index 18162ce..94de376 100644
3457 --- a/arch/parisc/mm/fault.c
3458 +++ b/arch/parisc/mm/fault.c
3459 @@ -15,6 +15,7 @@
3460 #include <linux/sched.h>
3461 #include <linux/interrupt.h>
3462 #include <linux/module.h>
3463 +#include <linux/unistd.h>
3464
3465 #include <asm/uaccess.h>
3466 #include <asm/traps.h>
3467 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3468 static unsigned long
3469 parisc_acctyp(unsigned long code, unsigned int inst)
3470 {
3471 - if (code == 6 || code == 16)
3472 + if (code == 6 || code == 7 || code == 16)
3473 return VM_EXEC;
3474
3475 switch (inst & 0xf0000000) {
3476 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3477 }
3478 #endif
3479
3480 +#ifdef CONFIG_PAX_PAGEEXEC
3481 +/*
3482 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3483 + *
3484 + * returns 1 when task should be killed
3485 + * 2 when rt_sigreturn trampoline was detected
3486 + * 3 when unpatched PLT trampoline was detected
3487 + */
3488 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3489 +{
3490 +
3491 +#ifdef CONFIG_PAX_EMUPLT
3492 + int err;
3493 +
3494 + do { /* PaX: unpatched PLT emulation */
3495 + unsigned int bl, depwi;
3496 +
3497 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3498 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3499 +
3500 + if (err)
3501 + break;
3502 +
3503 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3504 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3505 +
3506 + err = get_user(ldw, (unsigned int *)addr);
3507 + err |= get_user(bv, (unsigned int *)(addr+4));
3508 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3509 +
3510 + if (err)
3511 + break;
3512 +
3513 + if (ldw == 0x0E801096U &&
3514 + bv == 0xEAC0C000U &&
3515 + ldw2 == 0x0E881095U)
3516 + {
3517 + unsigned int resolver, map;
3518 +
3519 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3520 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3521 + if (err)
3522 + break;
3523 +
3524 + regs->gr[20] = instruction_pointer(regs)+8;
3525 + regs->gr[21] = map;
3526 + regs->gr[22] = resolver;
3527 + regs->iaoq[0] = resolver | 3UL;
3528 + regs->iaoq[1] = regs->iaoq[0] + 4;
3529 + return 3;
3530 + }
3531 + }
3532 + } while (0);
3533 +#endif
3534 +
3535 +#ifdef CONFIG_PAX_EMUTRAMP
3536 +
3537 +#ifndef CONFIG_PAX_EMUSIGRT
3538 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3539 + return 1;
3540 +#endif
3541 +
3542 + do { /* PaX: rt_sigreturn emulation */
3543 + unsigned int ldi1, ldi2, bel, nop;
3544 +
3545 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3546 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3547 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3548 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3549 +
3550 + if (err)
3551 + break;
3552 +
3553 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3554 + ldi2 == 0x3414015AU &&
3555 + bel == 0xE4008200U &&
3556 + nop == 0x08000240U)
3557 + {
3558 + regs->gr[25] = (ldi1 & 2) >> 1;
3559 + regs->gr[20] = __NR_rt_sigreturn;
3560 + regs->gr[31] = regs->iaoq[1] + 16;
3561 + regs->sr[0] = regs->iasq[1];
3562 + regs->iaoq[0] = 0x100UL;
3563 + regs->iaoq[1] = regs->iaoq[0] + 4;
3564 + regs->iasq[0] = regs->sr[2];
3565 + regs->iasq[1] = regs->sr[2];
3566 + return 2;
3567 + }
3568 + } while (0);
3569 +#endif
3570 +
3571 + return 1;
3572 +}
3573 +
3574 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3575 +{
3576 + unsigned long i;
3577 +
3578 + printk(KERN_ERR "PAX: bytes at PC: ");
3579 + for (i = 0; i < 5; i++) {
3580 + unsigned int c;
3581 + if (get_user(c, (unsigned int *)pc+i))
3582 + printk(KERN_CONT "???????? ");
3583 + else
3584 + printk(KERN_CONT "%08x ", c);
3585 + }
3586 + printk("\n");
3587 +}
3588 +#endif
3589 +
3590 int fixup_exception(struct pt_regs *regs)
3591 {
3592 const struct exception_table_entry *fix;
3593 @@ -192,8 +303,33 @@ good_area:
3594
3595 acc_type = parisc_acctyp(code,regs->iir);
3596
3597 - if ((vma->vm_flags & acc_type) != acc_type)
3598 + if ((vma->vm_flags & acc_type) != acc_type) {
3599 +
3600 +#ifdef CONFIG_PAX_PAGEEXEC
3601 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3602 + (address & ~3UL) == instruction_pointer(regs))
3603 + {
3604 + up_read(&mm->mmap_sem);
3605 + switch (pax_handle_fetch_fault(regs)) {
3606 +
3607 +#ifdef CONFIG_PAX_EMUPLT
3608 + case 3:
3609 + return;
3610 +#endif
3611 +
3612 +#ifdef CONFIG_PAX_EMUTRAMP
3613 + case 2:
3614 + return;
3615 +#endif
3616 +
3617 + }
3618 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3619 + do_group_exit(SIGKILL);
3620 + }
3621 +#endif
3622 +
3623 goto bad_area;
3624 + }
3625
3626 /*
3627 * If for any reason at all we couldn't handle the fault, make
3628 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3629 index da29032..f76c24c 100644
3630 --- a/arch/powerpc/include/asm/atomic.h
3631 +++ b/arch/powerpc/include/asm/atomic.h
3632 @@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3633 return t1;
3634 }
3635
3636 +#define atomic64_read_unchecked(v) atomic64_read(v)
3637 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3638 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3639 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3640 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3641 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3642 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3643 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3644 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3645 +
3646 #endif /* __powerpc64__ */
3647
3648 #endif /* __KERNEL__ */
3649 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3650 index 9e495c9..b6878e5 100644
3651 --- a/arch/powerpc/include/asm/cache.h
3652 +++ b/arch/powerpc/include/asm/cache.h
3653 @@ -3,6 +3,7 @@
3654
3655 #ifdef __KERNEL__
3656
3657 +#include <linux/const.h>
3658
3659 /* bytes per L1 cache line */
3660 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3661 @@ -22,7 +23,7 @@
3662 #define L1_CACHE_SHIFT 7
3663 #endif
3664
3665 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3666 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3667
3668 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3669
3670 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3671 index 3bf9cca..e7457d0 100644
3672 --- a/arch/powerpc/include/asm/elf.h
3673 +++ b/arch/powerpc/include/asm/elf.h
3674 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3675 the loader. We need to make sure that it is out of the way of the program
3676 that it will "exec", and that there is sufficient room for the brk. */
3677
3678 -extern unsigned long randomize_et_dyn(unsigned long base);
3679 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3680 +#define ELF_ET_DYN_BASE (0x20000000)
3681 +
3682 +#ifdef CONFIG_PAX_ASLR
3683 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3684 +
3685 +#ifdef __powerpc64__
3686 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3687 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3688 +#else
3689 +#define PAX_DELTA_MMAP_LEN 15
3690 +#define PAX_DELTA_STACK_LEN 15
3691 +#endif
3692 +#endif
3693
3694 /*
3695 * Our registers are always unsigned longs, whether we're a 32 bit
3696 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3697 (0x7ff >> (PAGE_SHIFT - 12)) : \
3698 (0x3ffff >> (PAGE_SHIFT - 12)))
3699
3700 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3701 -#define arch_randomize_brk arch_randomize_brk
3702 -
3703 #endif /* __KERNEL__ */
3704
3705 /*
3706 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3707 index 8196e9c..d83a9f3 100644
3708 --- a/arch/powerpc/include/asm/exec.h
3709 +++ b/arch/powerpc/include/asm/exec.h
3710 @@ -4,6 +4,6 @@
3711 #ifndef _ASM_POWERPC_EXEC_H
3712 #define _ASM_POWERPC_EXEC_H
3713
3714 -extern unsigned long arch_align_stack(unsigned long sp);
3715 +#define arch_align_stack(x) ((x) & ~0xfUL)
3716
3717 #endif /* _ASM_POWERPC_EXEC_H */
3718 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3719 index bca8fdc..61e9580 100644
3720 --- a/arch/powerpc/include/asm/kmap_types.h
3721 +++ b/arch/powerpc/include/asm/kmap_types.h
3722 @@ -27,6 +27,7 @@ enum km_type {
3723 KM_PPC_SYNC_PAGE,
3724 KM_PPC_SYNC_ICACHE,
3725 KM_KDB,
3726 + KM_CLEARPAGE,
3727 KM_TYPE_NR
3728 };
3729
3730 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3731 index d4a7f64..451de1c 100644
3732 --- a/arch/powerpc/include/asm/mman.h
3733 +++ b/arch/powerpc/include/asm/mman.h
3734 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3735 }
3736 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3737
3738 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3739 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3740 {
3741 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3742 }
3743 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3744 index f072e97..b436dee 100644
3745 --- a/arch/powerpc/include/asm/page.h
3746 +++ b/arch/powerpc/include/asm/page.h
3747 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3748 * and needs to be executable. This means the whole heap ends
3749 * up being executable.
3750 */
3751 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3752 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3753 +#define VM_DATA_DEFAULT_FLAGS32 \
3754 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3755 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3756
3757 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3758 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3759 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3760 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3761 #endif
3762
3763 +#define ktla_ktva(addr) (addr)
3764 +#define ktva_ktla(addr) (addr)
3765 +
3766 /*
3767 * Use the top bit of the higher-level page table entries to indicate whether
3768 * the entries we point to contain hugepages. This works because we know that
3769 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3770 index fed85e6..da5c71b 100644
3771 --- a/arch/powerpc/include/asm/page_64.h
3772 +++ b/arch/powerpc/include/asm/page_64.h
3773 @@ -146,15 +146,18 @@ do { \
3774 * stack by default, so in the absence of a PT_GNU_STACK program header
3775 * we turn execute permission off.
3776 */
3777 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3778 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3779 +#define VM_STACK_DEFAULT_FLAGS32 \
3780 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3781 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3782
3783 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3784 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3785
3786 +#ifndef CONFIG_PAX_PAGEEXEC
3787 #define VM_STACK_DEFAULT_FLAGS \
3788 (is_32bit_task() ? \
3789 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3790 +#endif
3791
3792 #include <asm-generic/getorder.h>
3793
3794 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3795 index 292725c..f87ae14 100644
3796 --- a/arch/powerpc/include/asm/pgalloc-64.h
3797 +++ b/arch/powerpc/include/asm/pgalloc-64.h
3798 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3799 #ifndef CONFIG_PPC_64K_PAGES
3800
3801 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3802 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3803
3804 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3805 {
3806 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3807 pud_set(pud, (unsigned long)pmd);
3808 }
3809
3810 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3811 +{
3812 + pud_populate(mm, pud, pmd);
3813 +}
3814 +
3815 #define pmd_populate(mm, pmd, pte_page) \
3816 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3817 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3818 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3819 #else /* CONFIG_PPC_64K_PAGES */
3820
3821 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3822 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3823
3824 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3825 pte_t *pte)
3826 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3827 index 2e0e411..7899c68 100644
3828 --- a/arch/powerpc/include/asm/pgtable.h
3829 +++ b/arch/powerpc/include/asm/pgtable.h
3830 @@ -2,6 +2,7 @@
3831 #define _ASM_POWERPC_PGTABLE_H
3832 #ifdef __KERNEL__
3833
3834 +#include <linux/const.h>
3835 #ifndef __ASSEMBLY__
3836 #include <asm/processor.h> /* For TASK_SIZE */
3837 #include <asm/mmu.h>
3838 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3839 index 4aad413..85d86bf 100644
3840 --- a/arch/powerpc/include/asm/pte-hash32.h
3841 +++ b/arch/powerpc/include/asm/pte-hash32.h
3842 @@ -21,6 +21,7 @@
3843 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3844 #define _PAGE_USER 0x004 /* usermode access allowed */
3845 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3846 +#define _PAGE_EXEC _PAGE_GUARDED
3847 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3848 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3849 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3850 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3851 index 9d7f0fb..a28fe69 100644
3852 --- a/arch/powerpc/include/asm/reg.h
3853 +++ b/arch/powerpc/include/asm/reg.h
3854 @@ -212,6 +212,7 @@
3855 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3856 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3857 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3858 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3859 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3860 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3861 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3862 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3863 index 4a741c7..c8162227b 100644
3864 --- a/arch/powerpc/include/asm/thread_info.h
3865 +++ b/arch/powerpc/include/asm/thread_info.h
3866 @@ -104,12 +104,14 @@ static inline struct thread_info *current_thread_info(void)
3867 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3868 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3869 #define TIF_SINGLESTEP 8 /* singlestepping active */
3870 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3871 #define TIF_SECCOMP 10 /* secure computing */
3872 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3873 #define TIF_NOERROR 12 /* Force successful syscall return */
3874 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3875 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3876 +#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3877 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3878 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3879
3880 /* as above, but as bit values */
3881 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3882 @@ -127,8 +129,11 @@ static inline struct thread_info *current_thread_info(void)
3883 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3884 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3885 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3886 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3887 +
3888 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3889 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3890 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3891 + _TIF_GRSEC_SETXID)
3892
3893 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3894 _TIF_NOTIFY_RESUME)
3895 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3896 index bd0fb84..a42a14b 100644
3897 --- a/arch/powerpc/include/asm/uaccess.h
3898 +++ b/arch/powerpc/include/asm/uaccess.h
3899 @@ -13,6 +13,8 @@
3900 #define VERIFY_READ 0
3901 #define VERIFY_WRITE 1
3902
3903 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3904 +
3905 /*
3906 * The fs value determines whether argument validity checking should be
3907 * performed or not. If get_fs() == USER_DS, checking is performed, with
3908 @@ -327,52 +329,6 @@ do { \
3909 extern unsigned long __copy_tofrom_user(void __user *to,
3910 const void __user *from, unsigned long size);
3911
3912 -#ifndef __powerpc64__
3913 -
3914 -static inline unsigned long copy_from_user(void *to,
3915 - const void __user *from, unsigned long n)
3916 -{
3917 - unsigned long over;
3918 -
3919 - if (access_ok(VERIFY_READ, from, n))
3920 - return __copy_tofrom_user((__force void __user *)to, from, n);
3921 - if ((unsigned long)from < TASK_SIZE) {
3922 - over = (unsigned long)from + n - TASK_SIZE;
3923 - return __copy_tofrom_user((__force void __user *)to, from,
3924 - n - over) + over;
3925 - }
3926 - return n;
3927 -}
3928 -
3929 -static inline unsigned long copy_to_user(void __user *to,
3930 - const void *from, unsigned long n)
3931 -{
3932 - unsigned long over;
3933 -
3934 - if (access_ok(VERIFY_WRITE, to, n))
3935 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3936 - if ((unsigned long)to < TASK_SIZE) {
3937 - over = (unsigned long)to + n - TASK_SIZE;
3938 - return __copy_tofrom_user(to, (__force void __user *)from,
3939 - n - over) + over;
3940 - }
3941 - return n;
3942 -}
3943 -
3944 -#else /* __powerpc64__ */
3945 -
3946 -#define __copy_in_user(to, from, size) \
3947 - __copy_tofrom_user((to), (from), (size))
3948 -
3949 -extern unsigned long copy_from_user(void *to, const void __user *from,
3950 - unsigned long n);
3951 -extern unsigned long copy_to_user(void __user *to, const void *from,
3952 - unsigned long n);
3953 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3954 - unsigned long n);
3955 -
3956 -#endif /* __powerpc64__ */
3957 -
3958 static inline unsigned long __copy_from_user_inatomic(void *to,
3959 const void __user *from, unsigned long n)
3960 {
3961 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3962 if (ret == 0)
3963 return 0;
3964 }
3965 +
3966 + if (!__builtin_constant_p(n))
3967 + check_object_size(to, n, false);
3968 +
3969 return __copy_tofrom_user((__force void __user *)to, from, n);
3970 }
3971
3972 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3973 if (ret == 0)
3974 return 0;
3975 }
3976 +
3977 + if (!__builtin_constant_p(n))
3978 + check_object_size(from, n, true);
3979 +
3980 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3981 }
3982
3983 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3984 return __copy_to_user_inatomic(to, from, size);
3985 }
3986
3987 +#ifndef __powerpc64__
3988 +
3989 +static inline unsigned long __must_check copy_from_user(void *to,
3990 + const void __user *from, unsigned long n)
3991 +{
3992 + unsigned long over;
3993 +
3994 + if ((long)n < 0)
3995 + return n;
3996 +
3997 + if (access_ok(VERIFY_READ, from, n)) {
3998 + if (!__builtin_constant_p(n))
3999 + check_object_size(to, n, false);
4000 + return __copy_tofrom_user((__force void __user *)to, from, n);
4001 + }
4002 + if ((unsigned long)from < TASK_SIZE) {
4003 + over = (unsigned long)from + n - TASK_SIZE;
4004 + if (!__builtin_constant_p(n - over))
4005 + check_object_size(to, n - over, false);
4006 + return __copy_tofrom_user((__force void __user *)to, from,
4007 + n - over) + over;
4008 + }
4009 + return n;
4010 +}
4011 +
4012 +static inline unsigned long __must_check copy_to_user(void __user *to,
4013 + const void *from, unsigned long n)
4014 +{
4015 + unsigned long over;
4016 +
4017 + if ((long)n < 0)
4018 + return n;
4019 +
4020 + if (access_ok(VERIFY_WRITE, to, n)) {
4021 + if (!__builtin_constant_p(n))
4022 + check_object_size(from, n, true);
4023 + return __copy_tofrom_user(to, (__force void __user *)from, n);
4024 + }
4025 + if ((unsigned long)to < TASK_SIZE) {
4026 + over = (unsigned long)to + n - TASK_SIZE;
4027 + if (!__builtin_constant_p(n))
4028 + check_object_size(from, n - over, true);
4029 + return __copy_tofrom_user(to, (__force void __user *)from,
4030 + n - over) + over;
4031 + }
4032 + return n;
4033 +}
4034 +
4035 +#else /* __powerpc64__ */
4036 +
4037 +#define __copy_in_user(to, from, size) \
4038 + __copy_tofrom_user((to), (from), (size))
4039 +
4040 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4041 +{
4042 + if ((long)n < 0 || n > INT_MAX)
4043 + return n;
4044 +
4045 + if (!__builtin_constant_p(n))
4046 + check_object_size(to, n, false);
4047 +
4048 + if (likely(access_ok(VERIFY_READ, from, n)))
4049 + n = __copy_from_user(to, from, n);
4050 + else
4051 + memset(to, 0, n);
4052 + return n;
4053 +}
4054 +
4055 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4056 +{
4057 + if ((long)n < 0 || n > INT_MAX)
4058 + return n;
4059 +
4060 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
4061 + if (!__builtin_constant_p(n))
4062 + check_object_size(from, n, true);
4063 + n = __copy_to_user(to, from, n);
4064 + }
4065 + return n;
4066 +}
4067 +
4068 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
4069 + unsigned long n);
4070 +
4071 +#endif /* __powerpc64__ */
4072 +
4073 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4074
4075 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4076 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4077 index 7215cc2..a9730c1 100644
4078 --- a/arch/powerpc/kernel/exceptions-64e.S
4079 +++ b/arch/powerpc/kernel/exceptions-64e.S
4080 @@ -661,6 +661,7 @@ storage_fault_common:
4081 std r14,_DAR(r1)
4082 std r15,_DSISR(r1)
4083 addi r3,r1,STACK_FRAME_OVERHEAD
4084 + bl .save_nvgprs
4085 mr r4,r14
4086 mr r5,r15
4087 ld r14,PACA_EXGEN+EX_R14(r13)
4088 @@ -669,8 +670,7 @@ storage_fault_common:
4089 cmpdi r3,0
4090 bne- 1f
4091 b .ret_from_except_lite
4092 -1: bl .save_nvgprs
4093 - mr r5,r3
4094 +1: mr r5,r3
4095 addi r3,r1,STACK_FRAME_OVERHEAD
4096 ld r4,_DAR(r1)
4097 bl .bad_page_fault
4098 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4099 index 8f880bc..c5bd2f3 100644
4100 --- a/arch/powerpc/kernel/exceptions-64s.S
4101 +++ b/arch/powerpc/kernel/exceptions-64s.S
4102 @@ -890,10 +890,10 @@ handle_page_fault:
4103 11: ld r4,_DAR(r1)
4104 ld r5,_DSISR(r1)
4105 addi r3,r1,STACK_FRAME_OVERHEAD
4106 + bl .save_nvgprs
4107 bl .do_page_fault
4108 cmpdi r3,0
4109 beq+ 12f
4110 - bl .save_nvgprs
4111 mr r5,r3
4112 addi r3,r1,STACK_FRAME_OVERHEAD
4113 lwz r4,_DAR(r1)
4114 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4115 index 0b6d796..d760ddb 100644
4116 --- a/arch/powerpc/kernel/module_32.c
4117 +++ b/arch/powerpc/kernel/module_32.c
4118 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4119 me->arch.core_plt_section = i;
4120 }
4121 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4122 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4123 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4124 return -ENOEXEC;
4125 }
4126
4127 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4128
4129 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4130 /* Init, or core PLT? */
4131 - if (location >= mod->module_core
4132 - && location < mod->module_core + mod->core_size)
4133 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4134 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4135 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4136 - else
4137 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4138 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4139 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4140 + else {
4141 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4142 + return ~0UL;
4143 + }
4144
4145 /* Find this entry, or if that fails, the next avail. entry */
4146 while (entry->jump[0]) {
4147 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4148 index 4937c96..70714b7 100644
4149 --- a/arch/powerpc/kernel/process.c
4150 +++ b/arch/powerpc/kernel/process.c
4151 @@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4152 * Lookup NIP late so we have the best change of getting the
4153 * above info out without failing
4154 */
4155 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4156 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4157 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4158 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4159 #endif
4160 show_stack(current, (unsigned long *) regs->gpr[1]);
4161 if (!user_mode(regs))
4162 @@ -1186,10 +1186,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4163 newsp = stack[0];
4164 ip = stack[STACK_FRAME_LR_SAVE];
4165 if (!firstframe || ip != lr) {
4166 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4167 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4168 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4169 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4170 - printk(" (%pS)",
4171 + printk(" (%pA)",
4172 (void *)current->ret_stack[curr_frame].ret);
4173 curr_frame--;
4174 }
4175 @@ -1209,7 +1209,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4176 struct pt_regs *regs = (struct pt_regs *)
4177 (sp + STACK_FRAME_OVERHEAD);
4178 lr = regs->link;
4179 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4180 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4181 regs->trap, (void *)regs->nip, (void *)lr);
4182 firstframe = 1;
4183 }
4184 @@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
4185 }
4186
4187 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4188 -
4189 -unsigned long arch_align_stack(unsigned long sp)
4190 -{
4191 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4192 - sp -= get_random_int() & ~PAGE_MASK;
4193 - return sp & ~0xf;
4194 -}
4195 -
4196 -static inline unsigned long brk_rnd(void)
4197 -{
4198 - unsigned long rnd = 0;
4199 -
4200 - /* 8MB for 32bit, 1GB for 64bit */
4201 - if (is_32bit_task())
4202 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4203 - else
4204 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4205 -
4206 - return rnd << PAGE_SHIFT;
4207 -}
4208 -
4209 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4210 -{
4211 - unsigned long base = mm->brk;
4212 - unsigned long ret;
4213 -
4214 -#ifdef CONFIG_PPC_STD_MMU_64
4215 - /*
4216 - * If we are using 1TB segments and we are allowed to randomise
4217 - * the heap, we can put it above 1TB so it is backed by a 1TB
4218 - * segment. Otherwise the heap will be in the bottom 1TB
4219 - * which always uses 256MB segments and this may result in a
4220 - * performance penalty.
4221 - */
4222 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4223 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4224 -#endif
4225 -
4226 - ret = PAGE_ALIGN(base + brk_rnd());
4227 -
4228 - if (ret < mm->brk)
4229 - return mm->brk;
4230 -
4231 - return ret;
4232 -}
4233 -
4234 -unsigned long randomize_et_dyn(unsigned long base)
4235 -{
4236 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4237 -
4238 - if (ret < base)
4239 - return base;
4240 -
4241 - return ret;
4242 -}
4243 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4244 index 8d8e028..c2aeb50 100644
4245 --- a/arch/powerpc/kernel/ptrace.c
4246 +++ b/arch/powerpc/kernel/ptrace.c
4247 @@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4248 return ret;
4249 }
4250
4251 +#ifdef CONFIG_GRKERNSEC_SETXID
4252 +extern void gr_delayed_cred_worker(void);
4253 +#endif
4254 +
4255 /*
4256 * We must return the syscall number to actually look up in the table.
4257 * This can be -1L to skip running any syscall at all.
4258 @@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4259
4260 secure_computing(regs->gpr[0]);
4261
4262 +#ifdef CONFIG_GRKERNSEC_SETXID
4263 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4264 + gr_delayed_cred_worker();
4265 +#endif
4266 +
4267 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4268 tracehook_report_syscall_entry(regs))
4269 /*
4270 @@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4271 {
4272 int step;
4273
4274 +#ifdef CONFIG_GRKERNSEC_SETXID
4275 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4276 + gr_delayed_cred_worker();
4277 +#endif
4278 +
4279 audit_syscall_exit(regs);
4280
4281 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4282 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4283 index 45eb998..0cb36bc 100644
4284 --- a/arch/powerpc/kernel/signal_32.c
4285 +++ b/arch/powerpc/kernel/signal_32.c
4286 @@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4287 /* Save user registers on the stack */
4288 frame = &rt_sf->uc.uc_mcontext;
4289 addr = frame;
4290 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4291 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4292 if (save_user_regs(regs, frame, 0, 1))
4293 goto badframe;
4294 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4295 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4296 index 2692efd..6673d2e 100644
4297 --- a/arch/powerpc/kernel/signal_64.c
4298 +++ b/arch/powerpc/kernel/signal_64.c
4299 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4300 current->thread.fpscr.val = 0;
4301
4302 /* Set up to return from userspace. */
4303 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4304 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4305 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4306 } else {
4307 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4308 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4309 index 1589723..cefe690 100644
4310 --- a/arch/powerpc/kernel/traps.c
4311 +++ b/arch/powerpc/kernel/traps.c
4312 @@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4313 return flags;
4314 }
4315
4316 +extern void gr_handle_kernel_exploit(void);
4317 +
4318 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4319 int signr)
4320 {
4321 @@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4322 panic("Fatal exception in interrupt");
4323 if (panic_on_oops)
4324 panic("Fatal exception");
4325 +
4326 + gr_handle_kernel_exploit();
4327 +
4328 do_exit(signr);
4329 }
4330
4331 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4332 index 9eb5b9b..e45498a 100644
4333 --- a/arch/powerpc/kernel/vdso.c
4334 +++ b/arch/powerpc/kernel/vdso.c
4335 @@ -34,6 +34,7 @@
4336 #include <asm/firmware.h>
4337 #include <asm/vdso.h>
4338 #include <asm/vdso_datapage.h>
4339 +#include <asm/mman.h>
4340
4341 #include "setup.h"
4342
4343 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4344 vdso_base = VDSO32_MBASE;
4345 #endif
4346
4347 - current->mm->context.vdso_base = 0;
4348 + current->mm->context.vdso_base = ~0UL;
4349
4350 /* vDSO has a problem and was disabled, just don't "enable" it for the
4351 * process
4352 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4353 vdso_base = get_unmapped_area(NULL, vdso_base,
4354 (vdso_pages << PAGE_SHIFT) +
4355 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4356 - 0, 0);
4357 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4358 if (IS_ERR_VALUE(vdso_base)) {
4359 rc = vdso_base;
4360 goto fail_mmapsem;
4361 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4362 index 5eea6f3..5d10396 100644
4363 --- a/arch/powerpc/lib/usercopy_64.c
4364 +++ b/arch/powerpc/lib/usercopy_64.c
4365 @@ -9,22 +9,6 @@
4366 #include <linux/module.h>
4367 #include <asm/uaccess.h>
4368
4369 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4370 -{
4371 - if (likely(access_ok(VERIFY_READ, from, n)))
4372 - n = __copy_from_user(to, from, n);
4373 - else
4374 - memset(to, 0, n);
4375 - return n;
4376 -}
4377 -
4378 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4379 -{
4380 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4381 - n = __copy_to_user(to, from, n);
4382 - return n;
4383 -}
4384 -
4385 unsigned long copy_in_user(void __user *to, const void __user *from,
4386 unsigned long n)
4387 {
4388 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4389 return n;
4390 }
4391
4392 -EXPORT_SYMBOL(copy_from_user);
4393 -EXPORT_SYMBOL(copy_to_user);
4394 EXPORT_SYMBOL(copy_in_user);
4395
4396 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4397 index 08ffcf5..a0ab912 100644
4398 --- a/arch/powerpc/mm/fault.c
4399 +++ b/arch/powerpc/mm/fault.c
4400 @@ -32,6 +32,10 @@
4401 #include <linux/perf_event.h>
4402 #include <linux/magic.h>
4403 #include <linux/ratelimit.h>
4404 +#include <linux/slab.h>
4405 +#include <linux/pagemap.h>
4406 +#include <linux/compiler.h>
4407 +#include <linux/unistd.h>
4408
4409 #include <asm/firmware.h>
4410 #include <asm/page.h>
4411 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4412 }
4413 #endif
4414
4415 +#ifdef CONFIG_PAX_PAGEEXEC
4416 +/*
4417 + * PaX: decide what to do with offenders (regs->nip = fault address)
4418 + *
4419 + * returns 1 when task should be killed
4420 + */
4421 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4422 +{
4423 + return 1;
4424 +}
4425 +
4426 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4427 +{
4428 + unsigned long i;
4429 +
4430 + printk(KERN_ERR "PAX: bytes at PC: ");
4431 + for (i = 0; i < 5; i++) {
4432 + unsigned int c;
4433 + if (get_user(c, (unsigned int __user *)pc+i))
4434 + printk(KERN_CONT "???????? ");
4435 + else
4436 + printk(KERN_CONT "%08x ", c);
4437 + }
4438 + printk("\n");
4439 +}
4440 +#endif
4441 +
4442 /*
4443 * Check whether the instruction at regs->nip is a store using
4444 * an update addressing form which will update r1.
4445 @@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4446 * indicate errors in DSISR but can validly be set in SRR1.
4447 */
4448 if (trap == 0x400)
4449 - error_code &= 0x48200000;
4450 + error_code &= 0x58200000;
4451 else
4452 is_write = error_code & DSISR_ISSTORE;
4453 #else
4454 @@ -366,7 +397,7 @@ good_area:
4455 * "undefined". Of those that can be set, this is the only
4456 * one which seems bad.
4457 */
4458 - if (error_code & 0x10000000)
4459 + if (error_code & DSISR_GUARDED)
4460 /* Guarded storage error. */
4461 goto bad_area;
4462 #endif /* CONFIG_8xx */
4463 @@ -381,7 +412,7 @@ good_area:
4464 * processors use the same I/D cache coherency mechanism
4465 * as embedded.
4466 */
4467 - if (error_code & DSISR_PROTFAULT)
4468 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4469 goto bad_area;
4470 #endif /* CONFIG_PPC_STD_MMU */
4471
4472 @@ -463,6 +494,23 @@ bad_area:
4473 bad_area_nosemaphore:
4474 /* User mode accesses cause a SIGSEGV */
4475 if (user_mode(regs)) {
4476 +
4477 +#ifdef CONFIG_PAX_PAGEEXEC
4478 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4479 +#ifdef CONFIG_PPC_STD_MMU
4480 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4481 +#else
4482 + if (is_exec && regs->nip == address) {
4483 +#endif
4484 + switch (pax_handle_fetch_fault(regs)) {
4485 + }
4486 +
4487 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4488 + do_group_exit(SIGKILL);
4489 + }
4490 + }
4491 +#endif
4492 +
4493 _exception(SIGSEGV, regs, code, address);
4494 return 0;
4495 }
4496 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4497 index 67a42ed..1c7210c 100644
4498 --- a/arch/powerpc/mm/mmap_64.c
4499 +++ b/arch/powerpc/mm/mmap_64.c
4500 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4501 */
4502 if (mmap_is_legacy()) {
4503 mm->mmap_base = TASK_UNMAPPED_BASE;
4504 +
4505 +#ifdef CONFIG_PAX_RANDMMAP
4506 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4507 + mm->mmap_base += mm->delta_mmap;
4508 +#endif
4509 +
4510 mm->get_unmapped_area = arch_get_unmapped_area;
4511 mm->unmap_area = arch_unmap_area;
4512 } else {
4513 mm->mmap_base = mmap_base();
4514 +
4515 +#ifdef CONFIG_PAX_RANDMMAP
4516 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4517 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4518 +#endif
4519 +
4520 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4521 mm->unmap_area = arch_unmap_area_topdown;
4522 }
4523 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4524 index 73709f7..6b90313 100644
4525 --- a/arch/powerpc/mm/slice.c
4526 +++ b/arch/powerpc/mm/slice.c
4527 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4528 if ((mm->task_size - len) < addr)
4529 return 0;
4530 vma = find_vma(mm, addr);
4531 - return (!vma || (addr + len) <= vma->vm_start);
4532 + return check_heap_stack_gap(vma, addr, len);
4533 }
4534
4535 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4536 @@ -256,7 +256,7 @@ full_search:
4537 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4538 continue;
4539 }
4540 - if (!vma || addr + len <= vma->vm_start) {
4541 + if (check_heap_stack_gap(vma, addr, len)) {
4542 /*
4543 * Remember the place where we stopped the search:
4544 */
4545 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4546 }
4547 }
4548
4549 - addr = mm->mmap_base;
4550 - while (addr > len) {
4551 + if (mm->mmap_base < len)
4552 + addr = -ENOMEM;
4553 + else
4554 + addr = mm->mmap_base - len;
4555 +
4556 + while (!IS_ERR_VALUE(addr)) {
4557 /* Go down by chunk size */
4558 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4559 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4560
4561 /* Check for hit with different page size */
4562 mask = slice_range_to_mask(addr, len);
4563 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4564 * return with success:
4565 */
4566 vma = find_vma(mm, addr);
4567 - if (!vma || (addr + len) <= vma->vm_start) {
4568 + if (check_heap_stack_gap(vma, addr, len)) {
4569 /* remember the address as a hint for next time */
4570 if (use_cache)
4571 mm->free_area_cache = addr;
4572 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4573 mm->cached_hole_size = vma->vm_start - addr;
4574
4575 /* try just below the current vma->vm_start */
4576 - addr = vma->vm_start;
4577 + addr = skip_heap_stack_gap(vma, len);
4578 }
4579
4580 /*
4581 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4582 if (fixed && addr > (mm->task_size - len))
4583 return -EINVAL;
4584
4585 +#ifdef CONFIG_PAX_RANDMMAP
4586 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4587 + addr = 0;
4588 +#endif
4589 +
4590 /* If hint, make sure it matches our alignment restrictions */
4591 if (!fixed && addr) {
4592 addr = _ALIGN_UP(addr, 1ul << pshift);
4593 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4594 index 748347b..81bc6c7 100644
4595 --- a/arch/s390/include/asm/atomic.h
4596 +++ b/arch/s390/include/asm/atomic.h
4597 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4598 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4599 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4600
4601 +#define atomic64_read_unchecked(v) atomic64_read(v)
4602 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4603 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4604 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4605 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4606 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4607 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4608 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4609 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4610 +
4611 #define smp_mb__before_atomic_dec() smp_mb()
4612 #define smp_mb__after_atomic_dec() smp_mb()
4613 #define smp_mb__before_atomic_inc() smp_mb()
4614 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4615 index 2a30d5a..5e5586f 100644
4616 --- a/arch/s390/include/asm/cache.h
4617 +++ b/arch/s390/include/asm/cache.h
4618 @@ -11,8 +11,10 @@
4619 #ifndef __ARCH_S390_CACHE_H
4620 #define __ARCH_S390_CACHE_H
4621
4622 -#define L1_CACHE_BYTES 256
4623 +#include <linux/const.h>
4624 +
4625 #define L1_CACHE_SHIFT 8
4626 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4627 #define NET_SKB_PAD 32
4628
4629 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4630 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4631 index c4ee39f..352881b 100644
4632 --- a/arch/s390/include/asm/elf.h
4633 +++ b/arch/s390/include/asm/elf.h
4634 @@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4635 the loader. We need to make sure that it is out of the way of the program
4636 that it will "exec", and that there is sufficient room for the brk. */
4637
4638 -extern unsigned long randomize_et_dyn(unsigned long base);
4639 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4640 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4641 +
4642 +#ifdef CONFIG_PAX_ASLR
4643 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4644 +
4645 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4646 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4647 +#endif
4648
4649 /* This yields a mask that user programs can use to figure out what
4650 instruction set this CPU supports. */
4651 @@ -210,7 +216,4 @@ struct linux_binprm;
4652 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4653 int arch_setup_additional_pages(struct linux_binprm *, int);
4654
4655 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4656 -#define arch_randomize_brk arch_randomize_brk
4657 -
4658 #endif
4659 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4660 index c4a93d6..4d2a9b4 100644
4661 --- a/arch/s390/include/asm/exec.h
4662 +++ b/arch/s390/include/asm/exec.h
4663 @@ -7,6 +7,6 @@
4664 #ifndef __ASM_EXEC_H
4665 #define __ASM_EXEC_H
4666
4667 -extern unsigned long arch_align_stack(unsigned long sp);
4668 +#define arch_align_stack(x) ((x) & ~0xfUL)
4669
4670 #endif /* __ASM_EXEC_H */
4671 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4672 index 8f2cada..1cddd55 100644
4673 --- a/arch/s390/include/asm/uaccess.h
4674 +++ b/arch/s390/include/asm/uaccess.h
4675 @@ -236,6 +236,10 @@ static inline unsigned long __must_check
4676 copy_to_user(void __user *to, const void *from, unsigned long n)
4677 {
4678 might_fault();
4679 +
4680 + if ((long)n < 0)
4681 + return n;
4682 +
4683 if (access_ok(VERIFY_WRITE, to, n))
4684 n = __copy_to_user(to, from, n);
4685 return n;
4686 @@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4687 static inline unsigned long __must_check
4688 __copy_from_user(void *to, const void __user *from, unsigned long n)
4689 {
4690 + if ((long)n < 0)
4691 + return n;
4692 +
4693 if (__builtin_constant_p(n) && (n <= 256))
4694 return uaccess.copy_from_user_small(n, from, to);
4695 else
4696 @@ -295,6 +302,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4697 unsigned int sz = __compiletime_object_size(to);
4698
4699 might_fault();
4700 +
4701 + if ((long)n < 0)
4702 + return n;
4703 +
4704 if (unlikely(sz != -1 && sz < n)) {
4705 copy_from_user_overflow();
4706 return n;
4707 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4708 index dfcb343..eda788a 100644
4709 --- a/arch/s390/kernel/module.c
4710 +++ b/arch/s390/kernel/module.c
4711 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4712
4713 /* Increase core size by size of got & plt and set start
4714 offsets for got and plt. */
4715 - me->core_size = ALIGN(me->core_size, 4);
4716 - me->arch.got_offset = me->core_size;
4717 - me->core_size += me->arch.got_size;
4718 - me->arch.plt_offset = me->core_size;
4719 - me->core_size += me->arch.plt_size;
4720 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4721 + me->arch.got_offset = me->core_size_rw;
4722 + me->core_size_rw += me->arch.got_size;
4723 + me->arch.plt_offset = me->core_size_rx;
4724 + me->core_size_rx += me->arch.plt_size;
4725 return 0;
4726 }
4727
4728 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4729 if (info->got_initialized == 0) {
4730 Elf_Addr *gotent;
4731
4732 - gotent = me->module_core + me->arch.got_offset +
4733 + gotent = me->module_core_rw + me->arch.got_offset +
4734 info->got_offset;
4735 *gotent = val;
4736 info->got_initialized = 1;
4737 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4738 else if (r_type == R_390_GOTENT ||
4739 r_type == R_390_GOTPLTENT)
4740 *(unsigned int *) loc =
4741 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4742 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4743 else if (r_type == R_390_GOT64 ||
4744 r_type == R_390_GOTPLT64)
4745 *(unsigned long *) loc = val;
4746 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4747 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4748 if (info->plt_initialized == 0) {
4749 unsigned int *ip;
4750 - ip = me->module_core + me->arch.plt_offset +
4751 + ip = me->module_core_rx + me->arch.plt_offset +
4752 info->plt_offset;
4753 #ifndef CONFIG_64BIT
4754 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4755 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4756 val - loc + 0xffffUL < 0x1ffffeUL) ||
4757 (r_type == R_390_PLT32DBL &&
4758 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4759 - val = (Elf_Addr) me->module_core +
4760 + val = (Elf_Addr) me->module_core_rx +
4761 me->arch.plt_offset +
4762 info->plt_offset;
4763 val += rela->r_addend - loc;
4764 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4765 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4766 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4767 val = val + rela->r_addend -
4768 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4769 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4770 if (r_type == R_390_GOTOFF16)
4771 *(unsigned short *) loc = val;
4772 else if (r_type == R_390_GOTOFF32)
4773 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4774 break;
4775 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4776 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4777 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4778 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4779 rela->r_addend - loc;
4780 if (r_type == R_390_GOTPC)
4781 *(unsigned int *) loc = val;
4782 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4783 index 60055ce..ee4b252 100644
4784 --- a/arch/s390/kernel/process.c
4785 +++ b/arch/s390/kernel/process.c
4786 @@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4787 }
4788 return 0;
4789 }
4790 -
4791 -unsigned long arch_align_stack(unsigned long sp)
4792 -{
4793 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4794 - sp -= get_random_int() & ~PAGE_MASK;
4795 - return sp & ~0xf;
4796 -}
4797 -
4798 -static inline unsigned long brk_rnd(void)
4799 -{
4800 - /* 8MB for 32bit, 1GB for 64bit */
4801 - if (is_32bit_task())
4802 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4803 - else
4804 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4805 -}
4806 -
4807 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4808 -{
4809 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4810 -
4811 - if (ret < mm->brk)
4812 - return mm->brk;
4813 - return ret;
4814 -}
4815 -
4816 -unsigned long randomize_et_dyn(unsigned long base)
4817 -{
4818 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4819 -
4820 - if (!(current->flags & PF_RANDOMIZE))
4821 - return base;
4822 - if (ret < base)
4823 - return base;
4824 - return ret;
4825 -}
4826 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4827 index 2857c48..d047481 100644
4828 --- a/arch/s390/mm/mmap.c
4829 +++ b/arch/s390/mm/mmap.c
4830 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4831 */
4832 if (mmap_is_legacy()) {
4833 mm->mmap_base = TASK_UNMAPPED_BASE;
4834 +
4835 +#ifdef CONFIG_PAX_RANDMMAP
4836 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4837 + mm->mmap_base += mm->delta_mmap;
4838 +#endif
4839 +
4840 mm->get_unmapped_area = arch_get_unmapped_area;
4841 mm->unmap_area = arch_unmap_area;
4842 } else {
4843 mm->mmap_base = mmap_base();
4844 +
4845 +#ifdef CONFIG_PAX_RANDMMAP
4846 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4847 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4848 +#endif
4849 +
4850 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4851 mm->unmap_area = arch_unmap_area_topdown;
4852 }
4853 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4854 */
4855 if (mmap_is_legacy()) {
4856 mm->mmap_base = TASK_UNMAPPED_BASE;
4857 +
4858 +#ifdef CONFIG_PAX_RANDMMAP
4859 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4860 + mm->mmap_base += mm->delta_mmap;
4861 +#endif
4862 +
4863 mm->get_unmapped_area = s390_get_unmapped_area;
4864 mm->unmap_area = arch_unmap_area;
4865 } else {
4866 mm->mmap_base = mmap_base();
4867 +
4868 +#ifdef CONFIG_PAX_RANDMMAP
4869 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4870 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4871 +#endif
4872 +
4873 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4874 mm->unmap_area = arch_unmap_area_topdown;
4875 }
4876 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4877 index ae3d59f..f65f075 100644
4878 --- a/arch/score/include/asm/cache.h
4879 +++ b/arch/score/include/asm/cache.h
4880 @@ -1,7 +1,9 @@
4881 #ifndef _ASM_SCORE_CACHE_H
4882 #define _ASM_SCORE_CACHE_H
4883
4884 +#include <linux/const.h>
4885 +
4886 #define L1_CACHE_SHIFT 4
4887 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4888 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4889
4890 #endif /* _ASM_SCORE_CACHE_H */
4891 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4892 index f9f3cd5..58ff438 100644
4893 --- a/arch/score/include/asm/exec.h
4894 +++ b/arch/score/include/asm/exec.h
4895 @@ -1,6 +1,6 @@
4896 #ifndef _ASM_SCORE_EXEC_H
4897 #define _ASM_SCORE_EXEC_H
4898
4899 -extern unsigned long arch_align_stack(unsigned long sp);
4900 +#define arch_align_stack(x) (x)
4901
4902 #endif /* _ASM_SCORE_EXEC_H */
4903 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4904 index 2707023..1c2a3b7 100644
4905 --- a/arch/score/kernel/process.c
4906 +++ b/arch/score/kernel/process.c
4907 @@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
4908
4909 return task_pt_regs(task)->cp0_epc;
4910 }
4911 -
4912 -unsigned long arch_align_stack(unsigned long sp)
4913 -{
4914 - return sp;
4915 -}
4916 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4917 index ef9e555..331bd29 100644
4918 --- a/arch/sh/include/asm/cache.h
4919 +++ b/arch/sh/include/asm/cache.h
4920 @@ -9,10 +9,11 @@
4921 #define __ASM_SH_CACHE_H
4922 #ifdef __KERNEL__
4923
4924 +#include <linux/const.h>
4925 #include <linux/init.h>
4926 #include <cpu/cache.h>
4927
4928 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4929 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4930
4931 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4932
4933 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4934 index afeb710..d1d1289 100644
4935 --- a/arch/sh/mm/mmap.c
4936 +++ b/arch/sh/mm/mmap.c
4937 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4938 addr = PAGE_ALIGN(addr);
4939
4940 vma = find_vma(mm, addr);
4941 - if (TASK_SIZE - len >= addr &&
4942 - (!vma || addr + len <= vma->vm_start))
4943 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4944 return addr;
4945 }
4946
4947 @@ -106,7 +105,7 @@ full_search:
4948 }
4949 return -ENOMEM;
4950 }
4951 - if (likely(!vma || addr + len <= vma->vm_start)) {
4952 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4953 /*
4954 * Remember the place where we stopped the search:
4955 */
4956 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4957 addr = PAGE_ALIGN(addr);
4958
4959 vma = find_vma(mm, addr);
4960 - if (TASK_SIZE - len >= addr &&
4961 - (!vma || addr + len <= vma->vm_start))
4962 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4963 return addr;
4964 }
4965
4966 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4967 /* make sure it can fit in the remaining address space */
4968 if (likely(addr > len)) {
4969 vma = find_vma(mm, addr-len);
4970 - if (!vma || addr <= vma->vm_start) {
4971 + if (check_heap_stack_gap(vma, addr - len, len)) {
4972 /* remember the address as a hint for next time */
4973 return (mm->free_area_cache = addr-len);
4974 }
4975 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4976 if (unlikely(mm->mmap_base < len))
4977 goto bottomup;
4978
4979 - addr = mm->mmap_base-len;
4980 - if (do_colour_align)
4981 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4982 + addr = mm->mmap_base - len;
4983
4984 do {
4985 + if (do_colour_align)
4986 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4987 /*
4988 * Lookup failure means no vma is above this address,
4989 * else if new region fits below vma->vm_start,
4990 * return with success:
4991 */
4992 vma = find_vma(mm, addr);
4993 - if (likely(!vma || addr+len <= vma->vm_start)) {
4994 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4995 /* remember the address as a hint for next time */
4996 return (mm->free_area_cache = addr);
4997 }
4998 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4999 mm->cached_hole_size = vma->vm_start - addr;
5000
5001 /* try just below the current vma->vm_start */
5002 - addr = vma->vm_start-len;
5003 - if (do_colour_align)
5004 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5005 - } while (likely(len < vma->vm_start));
5006 + addr = skip_heap_stack_gap(vma, len);
5007 + } while (!IS_ERR_VALUE(addr));
5008
5009 bottomup:
5010 /*
5011 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5012 index eddcfb3..b117d90 100644
5013 --- a/arch/sparc/Makefile
5014 +++ b/arch/sparc/Makefile
5015 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5016 # Export what is needed by arch/sparc/boot/Makefile
5017 export VMLINUX_INIT VMLINUX_MAIN
5018 VMLINUX_INIT := $(head-y) $(init-y)
5019 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5020 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5021 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5022 VMLINUX_MAIN += $(drivers-y) $(net-y)
5023
5024 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5025 index ce35a1c..2e7b8f9 100644
5026 --- a/arch/sparc/include/asm/atomic_64.h
5027 +++ b/arch/sparc/include/asm/atomic_64.h
5028 @@ -14,18 +14,40 @@
5029 #define ATOMIC64_INIT(i) { (i) }
5030
5031 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5032 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5033 +{
5034 + return v->counter;
5035 +}
5036 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5037 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5038 +{
5039 + return v->counter;
5040 +}
5041
5042 #define atomic_set(v, i) (((v)->counter) = i)
5043 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5044 +{
5045 + v->counter = i;
5046 +}
5047 #define atomic64_set(v, i) (((v)->counter) = i)
5048 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5049 +{
5050 + v->counter = i;
5051 +}
5052
5053 extern void atomic_add(int, atomic_t *);
5054 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5055 extern void atomic64_add(long, atomic64_t *);
5056 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5057 extern void atomic_sub(int, atomic_t *);
5058 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5059 extern void atomic64_sub(long, atomic64_t *);
5060 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5061
5062 extern int atomic_add_ret(int, atomic_t *);
5063 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5064 extern long atomic64_add_ret(long, atomic64_t *);
5065 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5066 extern int atomic_sub_ret(int, atomic_t *);
5067 extern long atomic64_sub_ret(long, atomic64_t *);
5068
5069 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5070 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5071
5072 #define atomic_inc_return(v) atomic_add_ret(1, v)
5073 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5074 +{
5075 + return atomic_add_ret_unchecked(1, v);
5076 +}
5077 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5078 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5079 +{
5080 + return atomic64_add_ret_unchecked(1, v);
5081 +}
5082
5083 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5084 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5085
5086 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5087 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5088 +{
5089 + return atomic_add_ret_unchecked(i, v);
5090 +}
5091 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5092 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5093 +{
5094 + return atomic64_add_ret_unchecked(i, v);
5095 +}
5096
5097 /*
5098 * atomic_inc_and_test - increment and test
5099 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5100 * other cases.
5101 */
5102 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5103 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5104 +{
5105 + return atomic_inc_return_unchecked(v) == 0;
5106 +}
5107 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5108
5109 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5110 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5111 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5112
5113 #define atomic_inc(v) atomic_add(1, v)
5114 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5115 +{
5116 + atomic_add_unchecked(1, v);
5117 +}
5118 #define atomic64_inc(v) atomic64_add(1, v)
5119 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5120 +{
5121 + atomic64_add_unchecked(1, v);
5122 +}
5123
5124 #define atomic_dec(v) atomic_sub(1, v)
5125 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5126 +{
5127 + atomic_sub_unchecked(1, v);
5128 +}
5129 #define atomic64_dec(v) atomic64_sub(1, v)
5130 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5131 +{
5132 + atomic64_sub_unchecked(1, v);
5133 +}
5134
5135 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5136 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5137
5138 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5139 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5140 +{
5141 + return cmpxchg(&v->counter, old, new);
5142 +}
5143 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5144 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5145 +{
5146 + return xchg(&v->counter, new);
5147 +}
5148
5149 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5150 {
5151 - int c, old;
5152 + int c, old, new;
5153 c = atomic_read(v);
5154 for (;;) {
5155 - if (unlikely(c == (u)))
5156 + if (unlikely(c == u))
5157 break;
5158 - old = atomic_cmpxchg((v), c, c + (a));
5159 +
5160 + asm volatile("addcc %2, %0, %0\n"
5161 +
5162 +#ifdef CONFIG_PAX_REFCOUNT
5163 + "tvs %%icc, 6\n"
5164 +#endif
5165 +
5166 + : "=r" (new)
5167 + : "0" (c), "ir" (a)
5168 + : "cc");
5169 +
5170 + old = atomic_cmpxchg(v, c, new);
5171 if (likely(old == c))
5172 break;
5173 c = old;
5174 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5175 #define atomic64_cmpxchg(v, o, n) \
5176 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5177 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5178 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5179 +{
5180 + return xchg(&v->counter, new);
5181 +}
5182
5183 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5184 {
5185 - long c, old;
5186 + long c, old, new;
5187 c = atomic64_read(v);
5188 for (;;) {
5189 - if (unlikely(c == (u)))
5190 + if (unlikely(c == u))
5191 break;
5192 - old = atomic64_cmpxchg((v), c, c + (a));
5193 +
5194 + asm volatile("addcc %2, %0, %0\n"
5195 +
5196 +#ifdef CONFIG_PAX_REFCOUNT
5197 + "tvs %%xcc, 6\n"
5198 +#endif
5199 +
5200 + : "=r" (new)
5201 + : "0" (c), "ir" (a)
5202 + : "cc");
5203 +
5204 + old = atomic64_cmpxchg(v, c, new);
5205 if (likely(old == c))
5206 break;
5207 c = old;
5208 }
5209 - return c != (u);
5210 + return c != u;
5211 }
5212
5213 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5214 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5215 index 69358b5..9d0d492 100644
5216 --- a/arch/sparc/include/asm/cache.h
5217 +++ b/arch/sparc/include/asm/cache.h
5218 @@ -7,10 +7,12 @@
5219 #ifndef _SPARC_CACHE_H
5220 #define _SPARC_CACHE_H
5221
5222 +#include <linux/const.h>
5223 +
5224 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5225
5226 #define L1_CACHE_SHIFT 5
5227 -#define L1_CACHE_BYTES 32
5228 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5229
5230 #ifdef CONFIG_SPARC32
5231 #define SMP_CACHE_BYTES_SHIFT 5
5232 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5233 index 4269ca6..e3da77f 100644
5234 --- a/arch/sparc/include/asm/elf_32.h
5235 +++ b/arch/sparc/include/asm/elf_32.h
5236 @@ -114,6 +114,13 @@ typedef struct {
5237
5238 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5239
5240 +#ifdef CONFIG_PAX_ASLR
5241 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5242 +
5243 +#define PAX_DELTA_MMAP_LEN 16
5244 +#define PAX_DELTA_STACK_LEN 16
5245 +#endif
5246 +
5247 /* This yields a mask that user programs can use to figure out what
5248 instruction set this cpu supports. This can NOT be done in userspace
5249 on Sparc. */
5250 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5251 index 7df8b7f..4946269 100644
5252 --- a/arch/sparc/include/asm/elf_64.h
5253 +++ b/arch/sparc/include/asm/elf_64.h
5254 @@ -180,6 +180,13 @@ typedef struct {
5255 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5256 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5257
5258 +#ifdef CONFIG_PAX_ASLR
5259 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5260 +
5261 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5262 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5263 +#endif
5264 +
5265 extern unsigned long sparc64_elf_hwcap;
5266 #define ELF_HWCAP sparc64_elf_hwcap
5267
5268 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5269 index ca2b344..c6084f89 100644
5270 --- a/arch/sparc/include/asm/pgalloc_32.h
5271 +++ b/arch/sparc/include/asm/pgalloc_32.h
5272 @@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5273 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5274 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5275 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5276 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5277
5278 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5279 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5280 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5281 index 40b2d7a..22a665b 100644
5282 --- a/arch/sparc/include/asm/pgalloc_64.h
5283 +++ b/arch/sparc/include/asm/pgalloc_64.h
5284 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5285 }
5286
5287 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5288 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5289
5290 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5291 {
5292 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5293 index 3d71018..48a11c5 100644
5294 --- a/arch/sparc/include/asm/pgtable_32.h
5295 +++ b/arch/sparc/include/asm/pgtable_32.h
5296 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5297 BTFIXUPDEF_INT(page_none)
5298 BTFIXUPDEF_INT(page_copy)
5299 BTFIXUPDEF_INT(page_readonly)
5300 +
5301 +#ifdef CONFIG_PAX_PAGEEXEC
5302 +BTFIXUPDEF_INT(page_shared_noexec)
5303 +BTFIXUPDEF_INT(page_copy_noexec)
5304 +BTFIXUPDEF_INT(page_readonly_noexec)
5305 +#endif
5306 +
5307 BTFIXUPDEF_INT(page_kernel)
5308
5309 #define PMD_SHIFT SUN4C_PMD_SHIFT
5310 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5311 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5312 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5313
5314 +#ifdef CONFIG_PAX_PAGEEXEC
5315 +extern pgprot_t PAGE_SHARED_NOEXEC;
5316 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5317 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5318 +#else
5319 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5320 +# define PAGE_COPY_NOEXEC PAGE_COPY
5321 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5322 +#endif
5323 +
5324 extern unsigned long page_kernel;
5325
5326 #ifdef MODULE
5327 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5328 index f6ae2b2..b03ffc7 100644
5329 --- a/arch/sparc/include/asm/pgtsrmmu.h
5330 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5331 @@ -115,6 +115,13 @@
5332 SRMMU_EXEC | SRMMU_REF)
5333 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5334 SRMMU_EXEC | SRMMU_REF)
5335 +
5336 +#ifdef CONFIG_PAX_PAGEEXEC
5337 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5338 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5339 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5340 +#endif
5341 +
5342 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5343 SRMMU_DIRTY | SRMMU_REF)
5344
5345 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5346 index 9689176..63c18ea 100644
5347 --- a/arch/sparc/include/asm/spinlock_64.h
5348 +++ b/arch/sparc/include/asm/spinlock_64.h
5349 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5350
5351 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5352
5353 -static void inline arch_read_lock(arch_rwlock_t *lock)
5354 +static inline void arch_read_lock(arch_rwlock_t *lock)
5355 {
5356 unsigned long tmp1, tmp2;
5357
5358 __asm__ __volatile__ (
5359 "1: ldsw [%2], %0\n"
5360 " brlz,pn %0, 2f\n"
5361 -"4: add %0, 1, %1\n"
5362 +"4: addcc %0, 1, %1\n"
5363 +
5364 +#ifdef CONFIG_PAX_REFCOUNT
5365 +" tvs %%icc, 6\n"
5366 +#endif
5367 +
5368 " cas [%2], %0, %1\n"
5369 " cmp %0, %1\n"
5370 " bne,pn %%icc, 1b\n"
5371 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5372 " .previous"
5373 : "=&r" (tmp1), "=&r" (tmp2)
5374 : "r" (lock)
5375 - : "memory");
5376 + : "memory", "cc");
5377 }
5378
5379 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5380 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5381 {
5382 int tmp1, tmp2;
5383
5384 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5385 "1: ldsw [%2], %0\n"
5386 " brlz,a,pn %0, 2f\n"
5387 " mov 0, %0\n"
5388 -" add %0, 1, %1\n"
5389 +" addcc %0, 1, %1\n"
5390 +
5391 +#ifdef CONFIG_PAX_REFCOUNT
5392 +" tvs %%icc, 6\n"
5393 +#endif
5394 +
5395 " cas [%2], %0, %1\n"
5396 " cmp %0, %1\n"
5397 " bne,pn %%icc, 1b\n"
5398 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5399 return tmp1;
5400 }
5401
5402 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5403 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5404 {
5405 unsigned long tmp1, tmp2;
5406
5407 __asm__ __volatile__(
5408 "1: lduw [%2], %0\n"
5409 -" sub %0, 1, %1\n"
5410 +" subcc %0, 1, %1\n"
5411 +
5412 +#ifdef CONFIG_PAX_REFCOUNT
5413 +" tvs %%icc, 6\n"
5414 +#endif
5415 +
5416 " cas [%2], %0, %1\n"
5417 " cmp %0, %1\n"
5418 " bne,pn %%xcc, 1b\n"
5419 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5420 : "memory");
5421 }
5422
5423 -static void inline arch_write_lock(arch_rwlock_t *lock)
5424 +static inline void arch_write_lock(arch_rwlock_t *lock)
5425 {
5426 unsigned long mask, tmp1, tmp2;
5427
5428 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5429 : "memory");
5430 }
5431
5432 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5433 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5434 {
5435 __asm__ __volatile__(
5436 " stw %%g0, [%0]"
5437 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5438 : "memory");
5439 }
5440
5441 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5442 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5443 {
5444 unsigned long mask, tmp1, tmp2, result;
5445
5446 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5447 index c2a1080..21ed218 100644
5448 --- a/arch/sparc/include/asm/thread_info_32.h
5449 +++ b/arch/sparc/include/asm/thread_info_32.h
5450 @@ -50,6 +50,8 @@ struct thread_info {
5451 unsigned long w_saved;
5452
5453 struct restart_block restart_block;
5454 +
5455 + unsigned long lowest_stack;
5456 };
5457
5458 /*
5459 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5460 index 01d057f..13a7d2f 100644
5461 --- a/arch/sparc/include/asm/thread_info_64.h
5462 +++ b/arch/sparc/include/asm/thread_info_64.h
5463 @@ -63,6 +63,8 @@ struct thread_info {
5464 struct pt_regs *kern_una_regs;
5465 unsigned int kern_una_insn;
5466
5467 + unsigned long lowest_stack;
5468 +
5469 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5470 };
5471
5472 @@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5473 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5474 /* flag bit 6 is available */
5475 #define TIF_32BIT 7 /* 32-bit binary */
5476 -/* flag bit 8 is available */
5477 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5478 #define TIF_SECCOMP 9 /* secure computing */
5479 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5480 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5481 +
5482 /* NOTE: Thread flags >= 12 should be ones we have no interest
5483 * in using in assembly, else we can't use the mask as
5484 * an immediate value in instructions such as andcc.
5485 @@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5486 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5487 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5488 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5489 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5490
5491 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5492 _TIF_DO_NOTIFY_RESUME_MASK | \
5493 _TIF_NEED_RESCHED)
5494 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5495
5496 +#define _TIF_WORK_SYSCALL \
5497 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5498 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5499 +
5500 +
5501 /*
5502 * Thread-synchronous status.
5503 *
5504 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5505 index e88fbe5..96b0ce5 100644
5506 --- a/arch/sparc/include/asm/uaccess.h
5507 +++ b/arch/sparc/include/asm/uaccess.h
5508 @@ -1,5 +1,13 @@
5509 #ifndef ___ASM_SPARC_UACCESS_H
5510 #define ___ASM_SPARC_UACCESS_H
5511 +
5512 +#ifdef __KERNEL__
5513 +#ifndef __ASSEMBLY__
5514 +#include <linux/types.h>
5515 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5516 +#endif
5517 +#endif
5518 +
5519 #if defined(__sparc__) && defined(__arch64__)
5520 #include <asm/uaccess_64.h>
5521 #else
5522 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5523 index 8303ac4..07f333d 100644
5524 --- a/arch/sparc/include/asm/uaccess_32.h
5525 +++ b/arch/sparc/include/asm/uaccess_32.h
5526 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5527
5528 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5529 {
5530 - if (n && __access_ok((unsigned long) to, n))
5531 + if ((long)n < 0)
5532 + return n;
5533 +
5534 + if (n && __access_ok((unsigned long) to, n)) {
5535 + if (!__builtin_constant_p(n))
5536 + check_object_size(from, n, true);
5537 return __copy_user(to, (__force void __user *) from, n);
5538 - else
5539 + } else
5540 return n;
5541 }
5542
5543 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5544 {
5545 + if ((long)n < 0)
5546 + return n;
5547 +
5548 + if (!__builtin_constant_p(n))
5549 + check_object_size(from, n, true);
5550 +
5551 return __copy_user(to, (__force void __user *) from, n);
5552 }
5553
5554 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5555 {
5556 - if (n && __access_ok((unsigned long) from, n))
5557 + if ((long)n < 0)
5558 + return n;
5559 +
5560 + if (n && __access_ok((unsigned long) from, n)) {
5561 + if (!__builtin_constant_p(n))
5562 + check_object_size(to, n, false);
5563 return __copy_user((__force void __user *) to, from, n);
5564 - else
5565 + } else
5566 return n;
5567 }
5568
5569 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5570 {
5571 + if ((long)n < 0)
5572 + return n;
5573 +
5574 return __copy_user((__force void __user *) to, from, n);
5575 }
5576
5577 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5578 index a1091afb..380228e 100644
5579 --- a/arch/sparc/include/asm/uaccess_64.h
5580 +++ b/arch/sparc/include/asm/uaccess_64.h
5581 @@ -10,6 +10,7 @@
5582 #include <linux/compiler.h>
5583 #include <linux/string.h>
5584 #include <linux/thread_info.h>
5585 +#include <linux/kernel.h>
5586 #include <asm/asi.h>
5587 #include <asm/spitfire.h>
5588 #include <asm-generic/uaccess-unaligned.h>
5589 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5590 static inline unsigned long __must_check
5591 copy_from_user(void *to, const void __user *from, unsigned long size)
5592 {
5593 - unsigned long ret = ___copy_from_user(to, from, size);
5594 + unsigned long ret;
5595
5596 + if ((long)size < 0 || size > INT_MAX)
5597 + return size;
5598 +
5599 + if (!__builtin_constant_p(size))
5600 + check_object_size(to, size, false);
5601 +
5602 + ret = ___copy_from_user(to, from, size);
5603 if (unlikely(ret))
5604 ret = copy_from_user_fixup(to, from, size);
5605
5606 @@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5607 static inline unsigned long __must_check
5608 copy_to_user(void __user *to, const void *from, unsigned long size)
5609 {
5610 - unsigned long ret = ___copy_to_user(to, from, size);
5611 + unsigned long ret;
5612
5613 + if ((long)size < 0 || size > INT_MAX)
5614 + return size;
5615 +
5616 + if (!__builtin_constant_p(size))
5617 + check_object_size(from, size, true);
5618 +
5619 + ret = ___copy_to_user(to, from, size);
5620 if (unlikely(ret))
5621 ret = copy_to_user_fixup(to, from, size);
5622 return ret;
5623 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5624 index cb85458..e063f17 100644
5625 --- a/arch/sparc/kernel/Makefile
5626 +++ b/arch/sparc/kernel/Makefile
5627 @@ -3,7 +3,7 @@
5628 #
5629
5630 asflags-y := -ansi
5631 -ccflags-y := -Werror
5632 +#ccflags-y := -Werror
5633
5634 extra-y := head_$(BITS).o
5635 extra-y += init_task.o
5636 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5637 index efa0754..74b03fe 100644
5638 --- a/arch/sparc/kernel/process_32.c
5639 +++ b/arch/sparc/kernel/process_32.c
5640 @@ -200,7 +200,7 @@ void __show_backtrace(unsigned long fp)
5641 rw->ins[4], rw->ins[5],
5642 rw->ins[6],
5643 rw->ins[7]);
5644 - printk("%pS\n", (void *) rw->ins[7]);
5645 + printk("%pA\n", (void *) rw->ins[7]);
5646 rw = (struct reg_window32 *) rw->ins[6];
5647 }
5648 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5649 @@ -267,14 +267,14 @@ void show_regs(struct pt_regs *r)
5650
5651 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5652 r->psr, r->pc, r->npc, r->y, print_tainted());
5653 - printk("PC: <%pS>\n", (void *) r->pc);
5654 + printk("PC: <%pA>\n", (void *) r->pc);
5655 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5656 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5657 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5658 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5659 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5660 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5661 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5662 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5663
5664 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5665 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5666 @@ -309,7 +309,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5667 rw = (struct reg_window32 *) fp;
5668 pc = rw->ins[7];
5669 printk("[%08lx : ", pc);
5670 - printk("%pS ] ", (void *) pc);
5671 + printk("%pA ] ", (void *) pc);
5672 fp = rw->ins[6];
5673 } while (++count < 16);
5674 printk("\n");
5675 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5676 index aff0c72..9067b39 100644
5677 --- a/arch/sparc/kernel/process_64.c
5678 +++ b/arch/sparc/kernel/process_64.c
5679 @@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5680 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5681 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5682 if (regs->tstate & TSTATE_PRIV)
5683 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5684 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5685 }
5686
5687 void show_regs(struct pt_regs *regs)
5688 {
5689 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5690 regs->tpc, regs->tnpc, regs->y, print_tainted());
5691 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5692 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5693 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5694 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5695 regs->u_regs[3]);
5696 @@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5697 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5698 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5699 regs->u_regs[15]);
5700 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5701 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5702 show_regwindow(regs);
5703 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5704 }
5705 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5706 ((tp && tp->task) ? tp->task->pid : -1));
5707
5708 if (gp->tstate & TSTATE_PRIV) {
5709 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5710 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5711 (void *) gp->tpc,
5712 (void *) gp->o7,
5713 (void *) gp->i7,
5714 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5715 index 6f97c07..b1300ec 100644
5716 --- a/arch/sparc/kernel/ptrace_64.c
5717 +++ b/arch/sparc/kernel/ptrace_64.c
5718 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5719 return ret;
5720 }
5721
5722 +#ifdef CONFIG_GRKERNSEC_SETXID
5723 +extern void gr_delayed_cred_worker(void);
5724 +#endif
5725 +
5726 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5727 {
5728 int ret = 0;
5729 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5730 /* do the secure computing check first */
5731 secure_computing(regs->u_regs[UREG_G1]);
5732
5733 +#ifdef CONFIG_GRKERNSEC_SETXID
5734 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5735 + gr_delayed_cred_worker();
5736 +#endif
5737 +
5738 if (test_thread_flag(TIF_SYSCALL_TRACE))
5739 ret = tracehook_report_syscall_entry(regs);
5740
5741 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5742
5743 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5744 {
5745 +#ifdef CONFIG_GRKERNSEC_SETXID
5746 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5747 + gr_delayed_cred_worker();
5748 +#endif
5749 +
5750 audit_syscall_exit(regs);
5751
5752 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5753 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5754 index 42b282f..28ce9f2 100644
5755 --- a/arch/sparc/kernel/sys_sparc_32.c
5756 +++ b/arch/sparc/kernel/sys_sparc_32.c
5757 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5758 if (ARCH_SUN4C && len > 0x20000000)
5759 return -ENOMEM;
5760 if (!addr)
5761 - addr = TASK_UNMAPPED_BASE;
5762 + addr = current->mm->mmap_base;
5763
5764 if (flags & MAP_SHARED)
5765 addr = COLOUR_ALIGN(addr);
5766 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5767 }
5768 if (TASK_SIZE - PAGE_SIZE - len < addr)
5769 return -ENOMEM;
5770 - if (!vmm || addr + len <= vmm->vm_start)
5771 + if (check_heap_stack_gap(vmm, addr, len))
5772 return addr;
5773 addr = vmm->vm_end;
5774 if (flags & MAP_SHARED)
5775 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5776 index 3ee51f1..2ba4913 100644
5777 --- a/arch/sparc/kernel/sys_sparc_64.c
5778 +++ b/arch/sparc/kernel/sys_sparc_64.c
5779 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5780 /* We do not accept a shared mapping if it would violate
5781 * cache aliasing constraints.
5782 */
5783 - if ((flags & MAP_SHARED) &&
5784 + if ((filp || (flags & MAP_SHARED)) &&
5785 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5786 return -EINVAL;
5787 return addr;
5788 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5789 if (filp || (flags & MAP_SHARED))
5790 do_color_align = 1;
5791
5792 +#ifdef CONFIG_PAX_RANDMMAP
5793 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5794 +#endif
5795 +
5796 if (addr) {
5797 if (do_color_align)
5798 addr = COLOUR_ALIGN(addr, pgoff);
5799 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5800 addr = PAGE_ALIGN(addr);
5801
5802 vma = find_vma(mm, addr);
5803 - if (task_size - len >= addr &&
5804 - (!vma || addr + len <= vma->vm_start))
5805 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5806 return addr;
5807 }
5808
5809 if (len > mm->cached_hole_size) {
5810 - start_addr = addr = mm->free_area_cache;
5811 + start_addr = addr = mm->free_area_cache;
5812 } else {
5813 - start_addr = addr = TASK_UNMAPPED_BASE;
5814 + start_addr = addr = mm->mmap_base;
5815 mm->cached_hole_size = 0;
5816 }
5817
5818 @@ -174,14 +177,14 @@ full_search:
5819 vma = find_vma(mm, VA_EXCLUDE_END);
5820 }
5821 if (unlikely(task_size < addr)) {
5822 - if (start_addr != TASK_UNMAPPED_BASE) {
5823 - start_addr = addr = TASK_UNMAPPED_BASE;
5824 + if (start_addr != mm->mmap_base) {
5825 + start_addr = addr = mm->mmap_base;
5826 mm->cached_hole_size = 0;
5827 goto full_search;
5828 }
5829 return -ENOMEM;
5830 }
5831 - if (likely(!vma || addr + len <= vma->vm_start)) {
5832 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5833 /*
5834 * Remember the place where we stopped the search:
5835 */
5836 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5837 /* We do not accept a shared mapping if it would violate
5838 * cache aliasing constraints.
5839 */
5840 - if ((flags & MAP_SHARED) &&
5841 + if ((filp || (flags & MAP_SHARED)) &&
5842 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5843 return -EINVAL;
5844 return addr;
5845 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5846 addr = PAGE_ALIGN(addr);
5847
5848 vma = find_vma(mm, addr);
5849 - if (task_size - len >= addr &&
5850 - (!vma || addr + len <= vma->vm_start))
5851 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5852 return addr;
5853 }
5854
5855 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5856 /* make sure it can fit in the remaining address space */
5857 if (likely(addr > len)) {
5858 vma = find_vma(mm, addr-len);
5859 - if (!vma || addr <= vma->vm_start) {
5860 + if (check_heap_stack_gap(vma, addr - len, len)) {
5861 /* remember the address as a hint for next time */
5862 return (mm->free_area_cache = addr-len);
5863 }
5864 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5865 if (unlikely(mm->mmap_base < len))
5866 goto bottomup;
5867
5868 - addr = mm->mmap_base-len;
5869 - if (do_color_align)
5870 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5871 + addr = mm->mmap_base - len;
5872
5873 do {
5874 + if (do_color_align)
5875 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5876 /*
5877 * Lookup failure means no vma is above this address,
5878 * else if new region fits below vma->vm_start,
5879 * return with success:
5880 */
5881 vma = find_vma(mm, addr);
5882 - if (likely(!vma || addr+len <= vma->vm_start)) {
5883 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5884 /* remember the address as a hint for next time */
5885 return (mm->free_area_cache = addr);
5886 }
5887 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5888 mm->cached_hole_size = vma->vm_start - addr;
5889
5890 /* try just below the current vma->vm_start */
5891 - addr = vma->vm_start-len;
5892 - if (do_color_align)
5893 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5894 - } while (likely(len < vma->vm_start));
5895 + addr = skip_heap_stack_gap(vma, len);
5896 + } while (!IS_ERR_VALUE(addr));
5897
5898 bottomup:
5899 /*
5900 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5901 gap == RLIM_INFINITY ||
5902 sysctl_legacy_va_layout) {
5903 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5904 +
5905 +#ifdef CONFIG_PAX_RANDMMAP
5906 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5907 + mm->mmap_base += mm->delta_mmap;
5908 +#endif
5909 +
5910 mm->get_unmapped_area = arch_get_unmapped_area;
5911 mm->unmap_area = arch_unmap_area;
5912 } else {
5913 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5914 gap = (task_size / 6 * 5);
5915
5916 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5917 +
5918 +#ifdef CONFIG_PAX_RANDMMAP
5919 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5920 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5921 +#endif
5922 +
5923 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5924 mm->unmap_area = arch_unmap_area_topdown;
5925 }
5926 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
5927 index 1d7e274..b39c527 100644
5928 --- a/arch/sparc/kernel/syscalls.S
5929 +++ b/arch/sparc/kernel/syscalls.S
5930 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
5931 #endif
5932 .align 32
5933 1: ldx [%g6 + TI_FLAGS], %l5
5934 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5935 + andcc %l5, _TIF_WORK_SYSCALL, %g0
5936 be,pt %icc, rtrap
5937 nop
5938 call syscall_trace_leave
5939 @@ -179,7 +179,7 @@ linux_sparc_syscall32:
5940
5941 srl %i5, 0, %o5 ! IEU1
5942 srl %i2, 0, %o2 ! IEU0 Group
5943 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5944 + andcc %l0, _TIF_WORK_SYSCALL, %g0
5945 bne,pn %icc, linux_syscall_trace32 ! CTI
5946 mov %i0, %l5 ! IEU1
5947 call %l7 ! CTI Group brk forced
5948 @@ -202,7 +202,7 @@ linux_sparc_syscall:
5949
5950 mov %i3, %o3 ! IEU1
5951 mov %i4, %o4 ! IEU0 Group
5952 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5953 + andcc %l0, _TIF_WORK_SYSCALL, %g0
5954 bne,pn %icc, linux_syscall_trace ! CTI Group
5955 mov %i0, %l5 ! IEU0
5956 2: call %l7 ! CTI Group brk forced
5957 @@ -226,7 +226,7 @@ ret_sys_call:
5958
5959 cmp %o0, -ERESTART_RESTARTBLOCK
5960 bgeu,pn %xcc, 1f
5961 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
5962 + andcc %l0, _TIF_WORK_SYSCALL, %l6
5963 80:
5964 /* System call success, clear Carry condition code. */
5965 andn %g3, %g2, %g3
5966 @@ -241,7 +241,7 @@ ret_sys_call:
5967 /* System call failure, set Carry condition code.
5968 * Also, get abs(errno) to return to the process.
5969 */
5970 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
5971 + andcc %l0, _TIF_WORK_SYSCALL, %l6
5972 sub %g0, %o0, %o0
5973 or %g3, %g2, %g3
5974 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
5975 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5976 index d2de213..6b22bc3 100644
5977 --- a/arch/sparc/kernel/traps_32.c
5978 +++ b/arch/sparc/kernel/traps_32.c
5979 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5980 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5981 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5982
5983 +extern void gr_handle_kernel_exploit(void);
5984 +
5985 void die_if_kernel(char *str, struct pt_regs *regs)
5986 {
5987 static int die_counter;
5988 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5989 count++ < 30 &&
5990 (((unsigned long) rw) >= PAGE_OFFSET) &&
5991 !(((unsigned long) rw) & 0x7)) {
5992 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5993 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5994 (void *) rw->ins[7]);
5995 rw = (struct reg_window32 *)rw->ins[6];
5996 }
5997 }
5998 printk("Instruction DUMP:");
5999 instruction_dump ((unsigned long *) regs->pc);
6000 - if(regs->psr & PSR_PS)
6001 + if(regs->psr & PSR_PS) {
6002 + gr_handle_kernel_exploit();
6003 do_exit(SIGKILL);
6004 + }
6005 do_exit(SIGSEGV);
6006 }
6007
6008 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6009 index c72fdf5..743a344 100644
6010 --- a/arch/sparc/kernel/traps_64.c
6011 +++ b/arch/sparc/kernel/traps_64.c
6012 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6013 i + 1,
6014 p->trapstack[i].tstate, p->trapstack[i].tpc,
6015 p->trapstack[i].tnpc, p->trapstack[i].tt);
6016 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6017 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6018 }
6019 }
6020
6021 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6022
6023 lvl -= 0x100;
6024 if (regs->tstate & TSTATE_PRIV) {
6025 +
6026 +#ifdef CONFIG_PAX_REFCOUNT
6027 + if (lvl == 6)
6028 + pax_report_refcount_overflow(regs);
6029 +#endif
6030 +
6031 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6032 die_if_kernel(buffer, regs);
6033 }
6034 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6035 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6036 {
6037 char buffer[32];
6038 -
6039 +
6040 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6041 0, lvl, SIGTRAP) == NOTIFY_STOP)
6042 return;
6043
6044 +#ifdef CONFIG_PAX_REFCOUNT
6045 + if (lvl == 6)
6046 + pax_report_refcount_overflow(regs);
6047 +#endif
6048 +
6049 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6050
6051 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6052 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6053 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6054 printk("%s" "ERROR(%d): ",
6055 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6056 - printk("TPC<%pS>\n", (void *) regs->tpc);
6057 + printk("TPC<%pA>\n", (void *) regs->tpc);
6058 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6059 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6060 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6061 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6062 smp_processor_id(),
6063 (type & 0x1) ? 'I' : 'D',
6064 regs->tpc);
6065 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6066 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6067 panic("Irrecoverable Cheetah+ parity error.");
6068 }
6069
6070 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6071 smp_processor_id(),
6072 (type & 0x1) ? 'I' : 'D',
6073 regs->tpc);
6074 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6075 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6076 }
6077
6078 struct sun4v_error_entry {
6079 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6080
6081 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6082 regs->tpc, tl);
6083 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6084 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6085 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6086 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6087 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6088 (void *) regs->u_regs[UREG_I7]);
6089 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6090 "pte[%lx] error[%lx]\n",
6091 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6092
6093 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6094 regs->tpc, tl);
6095 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6096 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6097 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6098 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6099 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6100 (void *) regs->u_regs[UREG_I7]);
6101 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6102 "pte[%lx] error[%lx]\n",
6103 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6104 fp = (unsigned long)sf->fp + STACK_BIAS;
6105 }
6106
6107 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6108 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6109 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6110 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6111 int index = tsk->curr_ret_stack;
6112 if (tsk->ret_stack && index >= graph) {
6113 pc = tsk->ret_stack[index - graph].ret;
6114 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6115 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6116 graph++;
6117 }
6118 }
6119 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6120 return (struct reg_window *) (fp + STACK_BIAS);
6121 }
6122
6123 +extern void gr_handle_kernel_exploit(void);
6124 +
6125 void die_if_kernel(char *str, struct pt_regs *regs)
6126 {
6127 static int die_counter;
6128 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6129 while (rw &&
6130 count++ < 30 &&
6131 kstack_valid(tp, (unsigned long) rw)) {
6132 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6133 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6134 (void *) rw->ins[7]);
6135
6136 rw = kernel_stack_up(rw);
6137 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6138 }
6139 user_instruction_dump ((unsigned int __user *) regs->tpc);
6140 }
6141 - if (regs->tstate & TSTATE_PRIV)
6142 + if (regs->tstate & TSTATE_PRIV) {
6143 + gr_handle_kernel_exploit();
6144 do_exit(SIGKILL);
6145 + }
6146 do_exit(SIGSEGV);
6147 }
6148 EXPORT_SYMBOL(die_if_kernel);
6149 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6150 index dae85bc..af1e19d 100644
6151 --- a/arch/sparc/kernel/unaligned_64.c
6152 +++ b/arch/sparc/kernel/unaligned_64.c
6153 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6154 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6155
6156 if (__ratelimit(&ratelimit)) {
6157 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6158 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6159 regs->tpc, (void *) regs->tpc);
6160 }
6161 }
6162 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6163 index a3fc437..fea9957 100644
6164 --- a/arch/sparc/lib/Makefile
6165 +++ b/arch/sparc/lib/Makefile
6166 @@ -2,7 +2,7 @@
6167 #
6168
6169 asflags-y := -ansi -DST_DIV0=0x02
6170 -ccflags-y := -Werror
6171 +#ccflags-y := -Werror
6172
6173 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6174 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6175 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6176 index 59186e0..f747d7a 100644
6177 --- a/arch/sparc/lib/atomic_64.S
6178 +++ b/arch/sparc/lib/atomic_64.S
6179 @@ -18,7 +18,12 @@
6180 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6181 BACKOFF_SETUP(%o2)
6182 1: lduw [%o1], %g1
6183 - add %g1, %o0, %g7
6184 + addcc %g1, %o0, %g7
6185 +
6186 +#ifdef CONFIG_PAX_REFCOUNT
6187 + tvs %icc, 6
6188 +#endif
6189 +
6190 cas [%o1], %g1, %g7
6191 cmp %g1, %g7
6192 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6193 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6194 2: BACKOFF_SPIN(%o2, %o3, 1b)
6195 .size atomic_add, .-atomic_add
6196
6197 + .globl atomic_add_unchecked
6198 + .type atomic_add_unchecked,#function
6199 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6200 + BACKOFF_SETUP(%o2)
6201 +1: lduw [%o1], %g1
6202 + add %g1, %o0, %g7
6203 + cas [%o1], %g1, %g7
6204 + cmp %g1, %g7
6205 + bne,pn %icc, 2f
6206 + nop
6207 + retl
6208 + nop
6209 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6210 + .size atomic_add_unchecked, .-atomic_add_unchecked
6211 +
6212 .globl atomic_sub
6213 .type atomic_sub,#function
6214 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6215 BACKOFF_SETUP(%o2)
6216 1: lduw [%o1], %g1
6217 - sub %g1, %o0, %g7
6218 + subcc %g1, %o0, %g7
6219 +
6220 +#ifdef CONFIG_PAX_REFCOUNT
6221 + tvs %icc, 6
6222 +#endif
6223 +
6224 cas [%o1], %g1, %g7
6225 cmp %g1, %g7
6226 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6227 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6228 2: BACKOFF_SPIN(%o2, %o3, 1b)
6229 .size atomic_sub, .-atomic_sub
6230
6231 + .globl atomic_sub_unchecked
6232 + .type atomic_sub_unchecked,#function
6233 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6234 + BACKOFF_SETUP(%o2)
6235 +1: lduw [%o1], %g1
6236 + sub %g1, %o0, %g7
6237 + cas [%o1], %g1, %g7
6238 + cmp %g1, %g7
6239 + bne,pn %icc, 2f
6240 + nop
6241 + retl
6242 + nop
6243 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6244 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
6245 +
6246 .globl atomic_add_ret
6247 .type atomic_add_ret,#function
6248 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6249 BACKOFF_SETUP(%o2)
6250 1: lduw [%o1], %g1
6251 - add %g1, %o0, %g7
6252 + addcc %g1, %o0, %g7
6253 +
6254 +#ifdef CONFIG_PAX_REFCOUNT
6255 + tvs %icc, 6
6256 +#endif
6257 +
6258 cas [%o1], %g1, %g7
6259 cmp %g1, %g7
6260 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6261 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6262 2: BACKOFF_SPIN(%o2, %o3, 1b)
6263 .size atomic_add_ret, .-atomic_add_ret
6264
6265 + .globl atomic_add_ret_unchecked
6266 + .type atomic_add_ret_unchecked,#function
6267 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6268 + BACKOFF_SETUP(%o2)
6269 +1: lduw [%o1], %g1
6270 + addcc %g1, %o0, %g7
6271 + cas [%o1], %g1, %g7
6272 + cmp %g1, %g7
6273 + bne,pn %icc, 2f
6274 + add %g7, %o0, %g7
6275 + sra %g7, 0, %o0
6276 + retl
6277 + nop
6278 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6279 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6280 +
6281 .globl atomic_sub_ret
6282 .type atomic_sub_ret,#function
6283 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6284 BACKOFF_SETUP(%o2)
6285 1: lduw [%o1], %g1
6286 - sub %g1, %o0, %g7
6287 + subcc %g1, %o0, %g7
6288 +
6289 +#ifdef CONFIG_PAX_REFCOUNT
6290 + tvs %icc, 6
6291 +#endif
6292 +
6293 cas [%o1], %g1, %g7
6294 cmp %g1, %g7
6295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6296 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6297 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6298 BACKOFF_SETUP(%o2)
6299 1: ldx [%o1], %g1
6300 - add %g1, %o0, %g7
6301 + addcc %g1, %o0, %g7
6302 +
6303 +#ifdef CONFIG_PAX_REFCOUNT
6304 + tvs %xcc, 6
6305 +#endif
6306 +
6307 casx [%o1], %g1, %g7
6308 cmp %g1, %g7
6309 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6310 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6311 2: BACKOFF_SPIN(%o2, %o3, 1b)
6312 .size atomic64_add, .-atomic64_add
6313
6314 + .globl atomic64_add_unchecked
6315 + .type atomic64_add_unchecked,#function
6316 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6317 + BACKOFF_SETUP(%o2)
6318 +1: ldx [%o1], %g1
6319 + addcc %g1, %o0, %g7
6320 + casx [%o1], %g1, %g7
6321 + cmp %g1, %g7
6322 + bne,pn %xcc, 2f
6323 + nop
6324 + retl
6325 + nop
6326 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6327 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
6328 +
6329 .globl atomic64_sub
6330 .type atomic64_sub,#function
6331 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6332 BACKOFF_SETUP(%o2)
6333 1: ldx [%o1], %g1
6334 - sub %g1, %o0, %g7
6335 + subcc %g1, %o0, %g7
6336 +
6337 +#ifdef CONFIG_PAX_REFCOUNT
6338 + tvs %xcc, 6
6339 +#endif
6340 +
6341 casx [%o1], %g1, %g7
6342 cmp %g1, %g7
6343 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6344 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6345 2: BACKOFF_SPIN(%o2, %o3, 1b)
6346 .size atomic64_sub, .-atomic64_sub
6347
6348 + .globl atomic64_sub_unchecked
6349 + .type atomic64_sub_unchecked,#function
6350 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6351 + BACKOFF_SETUP(%o2)
6352 +1: ldx [%o1], %g1
6353 + subcc %g1, %o0, %g7
6354 + casx [%o1], %g1, %g7
6355 + cmp %g1, %g7
6356 + bne,pn %xcc, 2f
6357 + nop
6358 + retl
6359 + nop
6360 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6361 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6362 +
6363 .globl atomic64_add_ret
6364 .type atomic64_add_ret,#function
6365 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6366 BACKOFF_SETUP(%o2)
6367 1: ldx [%o1], %g1
6368 - add %g1, %o0, %g7
6369 + addcc %g1, %o0, %g7
6370 +
6371 +#ifdef CONFIG_PAX_REFCOUNT
6372 + tvs %xcc, 6
6373 +#endif
6374 +
6375 casx [%o1], %g1, %g7
6376 cmp %g1, %g7
6377 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6378 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6379 2: BACKOFF_SPIN(%o2, %o3, 1b)
6380 .size atomic64_add_ret, .-atomic64_add_ret
6381
6382 + .globl atomic64_add_ret_unchecked
6383 + .type atomic64_add_ret_unchecked,#function
6384 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6385 + BACKOFF_SETUP(%o2)
6386 +1: ldx [%o1], %g1
6387 + addcc %g1, %o0, %g7
6388 + casx [%o1], %g1, %g7
6389 + cmp %g1, %g7
6390 + bne,pn %xcc, 2f
6391 + add %g7, %o0, %g7
6392 + mov %g7, %o0
6393 + retl
6394 + nop
6395 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6396 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6397 +
6398 .globl atomic64_sub_ret
6399 .type atomic64_sub_ret,#function
6400 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6401 BACKOFF_SETUP(%o2)
6402 1: ldx [%o1], %g1
6403 - sub %g1, %o0, %g7
6404 + subcc %g1, %o0, %g7
6405 +
6406 +#ifdef CONFIG_PAX_REFCOUNT
6407 + tvs %xcc, 6
6408 +#endif
6409 +
6410 casx [%o1], %g1, %g7
6411 cmp %g1, %g7
6412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6413 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6414 index f73c224..662af10 100644
6415 --- a/arch/sparc/lib/ksyms.c
6416 +++ b/arch/sparc/lib/ksyms.c
6417 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6418
6419 /* Atomic counter implementation. */
6420 EXPORT_SYMBOL(atomic_add);
6421 +EXPORT_SYMBOL(atomic_add_unchecked);
6422 EXPORT_SYMBOL(atomic_add_ret);
6423 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6424 EXPORT_SYMBOL(atomic_sub);
6425 +EXPORT_SYMBOL(atomic_sub_unchecked);
6426 EXPORT_SYMBOL(atomic_sub_ret);
6427 EXPORT_SYMBOL(atomic64_add);
6428 +EXPORT_SYMBOL(atomic64_add_unchecked);
6429 EXPORT_SYMBOL(atomic64_add_ret);
6430 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6431 EXPORT_SYMBOL(atomic64_sub);
6432 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6433 EXPORT_SYMBOL(atomic64_sub_ret);
6434
6435 /* Atomic bit operations. */
6436 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6437 index 301421c..e2535d1 100644
6438 --- a/arch/sparc/mm/Makefile
6439 +++ b/arch/sparc/mm/Makefile
6440 @@ -2,7 +2,7 @@
6441 #
6442
6443 asflags-y := -ansi
6444 -ccflags-y := -Werror
6445 +#ccflags-y := -Werror
6446
6447 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6448 obj-y += fault_$(BITS).o
6449 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6450 index df3155a..eb708b8 100644
6451 --- a/arch/sparc/mm/fault_32.c
6452 +++ b/arch/sparc/mm/fault_32.c
6453 @@ -21,6 +21,9 @@
6454 #include <linux/perf_event.h>
6455 #include <linux/interrupt.h>
6456 #include <linux/kdebug.h>
6457 +#include <linux/slab.h>
6458 +#include <linux/pagemap.h>
6459 +#include <linux/compiler.h>
6460
6461 #include <asm/page.h>
6462 #include <asm/pgtable.h>
6463 @@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6464 return safe_compute_effective_address(regs, insn);
6465 }
6466
6467 +#ifdef CONFIG_PAX_PAGEEXEC
6468 +#ifdef CONFIG_PAX_DLRESOLVE
6469 +static void pax_emuplt_close(struct vm_area_struct *vma)
6470 +{
6471 + vma->vm_mm->call_dl_resolve = 0UL;
6472 +}
6473 +
6474 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6475 +{
6476 + unsigned int *kaddr;
6477 +
6478 + vmf->page = alloc_page(GFP_HIGHUSER);
6479 + if (!vmf->page)
6480 + return VM_FAULT_OOM;
6481 +
6482 + kaddr = kmap(vmf->page);
6483 + memset(kaddr, 0, PAGE_SIZE);
6484 + kaddr[0] = 0x9DE3BFA8U; /* save */
6485 + flush_dcache_page(vmf->page);
6486 + kunmap(vmf->page);
6487 + return VM_FAULT_MAJOR;
6488 +}
6489 +
6490 +static const struct vm_operations_struct pax_vm_ops = {
6491 + .close = pax_emuplt_close,
6492 + .fault = pax_emuplt_fault
6493 +};
6494 +
6495 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6496 +{
6497 + int ret;
6498 +
6499 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6500 + vma->vm_mm = current->mm;
6501 + vma->vm_start = addr;
6502 + vma->vm_end = addr + PAGE_SIZE;
6503 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6504 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6505 + vma->vm_ops = &pax_vm_ops;
6506 +
6507 + ret = insert_vm_struct(current->mm, vma);
6508 + if (ret)
6509 + return ret;
6510 +
6511 + ++current->mm->total_vm;
6512 + return 0;
6513 +}
6514 +#endif
6515 +
6516 +/*
6517 + * PaX: decide what to do with offenders (regs->pc = fault address)
6518 + *
6519 + * returns 1 when task should be killed
6520 + * 2 when patched PLT trampoline was detected
6521 + * 3 when unpatched PLT trampoline was detected
6522 + */
6523 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6524 +{
6525 +
6526 +#ifdef CONFIG_PAX_EMUPLT
6527 + int err;
6528 +
6529 + do { /* PaX: patched PLT emulation #1 */
6530 + unsigned int sethi1, sethi2, jmpl;
6531 +
6532 + err = get_user(sethi1, (unsigned int *)regs->pc);
6533 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6534 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6535 +
6536 + if (err)
6537 + break;
6538 +
6539 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6540 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6541 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6542 + {
6543 + unsigned int addr;
6544 +
6545 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6546 + addr = regs->u_regs[UREG_G1];
6547 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6548 + regs->pc = addr;
6549 + regs->npc = addr+4;
6550 + return 2;
6551 + }
6552 + } while (0);
6553 +
6554 + { /* PaX: patched PLT emulation #2 */
6555 + unsigned int ba;
6556 +
6557 + err = get_user(ba, (unsigned int *)regs->pc);
6558 +
6559 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6560 + unsigned int addr;
6561 +
6562 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6563 + regs->pc = addr;
6564 + regs->npc = addr+4;
6565 + return 2;
6566 + }
6567 + }
6568 +
6569 + do { /* PaX: patched PLT emulation #3 */
6570 + unsigned int sethi, jmpl, nop;
6571 +
6572 + err = get_user(sethi, (unsigned int *)regs->pc);
6573 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6574 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6575 +
6576 + if (err)
6577 + break;
6578 +
6579 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6580 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6581 + nop == 0x01000000U)
6582 + {
6583 + unsigned int addr;
6584 +
6585 + addr = (sethi & 0x003FFFFFU) << 10;
6586 + regs->u_regs[UREG_G1] = addr;
6587 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6588 + regs->pc = addr;
6589 + regs->npc = addr+4;
6590 + return 2;
6591 + }
6592 + } while (0);
6593 +
6594 + do { /* PaX: unpatched PLT emulation step 1 */
6595 + unsigned int sethi, ba, nop;
6596 +
6597 + err = get_user(sethi, (unsigned int *)regs->pc);
6598 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6599 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6600 +
6601 + if (err)
6602 + break;
6603 +
6604 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6605 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6606 + nop == 0x01000000U)
6607 + {
6608 + unsigned int addr, save, call;
6609 +
6610 + if ((ba & 0xFFC00000U) == 0x30800000U)
6611 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6612 + else
6613 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6614 +
6615 + err = get_user(save, (unsigned int *)addr);
6616 + err |= get_user(call, (unsigned int *)(addr+4));
6617 + err |= get_user(nop, (unsigned int *)(addr+8));
6618 + if (err)
6619 + break;
6620 +
6621 +#ifdef CONFIG_PAX_DLRESOLVE
6622 + if (save == 0x9DE3BFA8U &&
6623 + (call & 0xC0000000U) == 0x40000000U &&
6624 + nop == 0x01000000U)
6625 + {
6626 + struct vm_area_struct *vma;
6627 + unsigned long call_dl_resolve;
6628 +
6629 + down_read(&current->mm->mmap_sem);
6630 + call_dl_resolve = current->mm->call_dl_resolve;
6631 + up_read(&current->mm->mmap_sem);
6632 + if (likely(call_dl_resolve))
6633 + goto emulate;
6634 +
6635 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6636 +
6637 + down_write(&current->mm->mmap_sem);
6638 + if (current->mm->call_dl_resolve) {
6639 + call_dl_resolve = current->mm->call_dl_resolve;
6640 + up_write(&current->mm->mmap_sem);
6641 + if (vma)
6642 + kmem_cache_free(vm_area_cachep, vma);
6643 + goto emulate;
6644 + }
6645 +
6646 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6647 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6648 + up_write(&current->mm->mmap_sem);
6649 + if (vma)
6650 + kmem_cache_free(vm_area_cachep, vma);
6651 + return 1;
6652 + }
6653 +
6654 + if (pax_insert_vma(vma, call_dl_resolve)) {
6655 + up_write(&current->mm->mmap_sem);
6656 + kmem_cache_free(vm_area_cachep, vma);
6657 + return 1;
6658 + }
6659 +
6660 + current->mm->call_dl_resolve = call_dl_resolve;
6661 + up_write(&current->mm->mmap_sem);
6662 +
6663 +emulate:
6664 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6665 + regs->pc = call_dl_resolve;
6666 + regs->npc = addr+4;
6667 + return 3;
6668 + }
6669 +#endif
6670 +
6671 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6672 + if ((save & 0xFFC00000U) == 0x05000000U &&
6673 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6674 + nop == 0x01000000U)
6675 + {
6676 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6677 + regs->u_regs[UREG_G2] = addr + 4;
6678 + addr = (save & 0x003FFFFFU) << 10;
6679 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6680 + regs->pc = addr;
6681 + regs->npc = addr+4;
6682 + return 3;
6683 + }
6684 + }
6685 + } while (0);
6686 +
6687 + do { /* PaX: unpatched PLT emulation step 2 */
6688 + unsigned int save, call, nop;
6689 +
6690 + err = get_user(save, (unsigned int *)(regs->pc-4));
6691 + err |= get_user(call, (unsigned int *)regs->pc);
6692 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6693 + if (err)
6694 + break;
6695 +
6696 + if (save == 0x9DE3BFA8U &&
6697 + (call & 0xC0000000U) == 0x40000000U &&
6698 + nop == 0x01000000U)
6699 + {
6700 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6701 +
6702 + regs->u_regs[UREG_RETPC] = regs->pc;
6703 + regs->pc = dl_resolve;
6704 + regs->npc = dl_resolve+4;
6705 + return 3;
6706 + }
6707 + } while (0);
6708 +#endif
6709 +
6710 + return 1;
6711 +}
6712 +
6713 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6714 +{
6715 + unsigned long i;
6716 +
6717 + printk(KERN_ERR "PAX: bytes at PC: ");
6718 + for (i = 0; i < 8; i++) {
6719 + unsigned int c;
6720 + if (get_user(c, (unsigned int *)pc+i))
6721 + printk(KERN_CONT "???????? ");
6722 + else
6723 + printk(KERN_CONT "%08x ", c);
6724 + }
6725 + printk("\n");
6726 +}
6727 +#endif
6728 +
6729 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6730 int text_fault)
6731 {
6732 @@ -282,6 +547,24 @@ good_area:
6733 if(!(vma->vm_flags & VM_WRITE))
6734 goto bad_area;
6735 } else {
6736 +
6737 +#ifdef CONFIG_PAX_PAGEEXEC
6738 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6739 + up_read(&mm->mmap_sem);
6740 + switch (pax_handle_fetch_fault(regs)) {
6741 +
6742 +#ifdef CONFIG_PAX_EMUPLT
6743 + case 2:
6744 + case 3:
6745 + return;
6746 +#endif
6747 +
6748 + }
6749 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6750 + do_group_exit(SIGKILL);
6751 + }
6752 +#endif
6753 +
6754 /* Allow reads even for write-only mappings */
6755 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6756 goto bad_area;
6757 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6758 index 1fe0429..aee2e87 100644
6759 --- a/arch/sparc/mm/fault_64.c
6760 +++ b/arch/sparc/mm/fault_64.c
6761 @@ -21,6 +21,9 @@
6762 #include <linux/kprobes.h>
6763 #include <linux/kdebug.h>
6764 #include <linux/percpu.h>
6765 +#include <linux/slab.h>
6766 +#include <linux/pagemap.h>
6767 +#include <linux/compiler.h>
6768
6769 #include <asm/page.h>
6770 #include <asm/pgtable.h>
6771 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6772 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6773 regs->tpc);
6774 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6775 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6776 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6777 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6778 dump_stack();
6779 unhandled_fault(regs->tpc, current, regs);
6780 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6781 show_regs(regs);
6782 }
6783
6784 +#ifdef CONFIG_PAX_PAGEEXEC
6785 +#ifdef CONFIG_PAX_DLRESOLVE
6786 +static void pax_emuplt_close(struct vm_area_struct *vma)
6787 +{
6788 + vma->vm_mm->call_dl_resolve = 0UL;
6789 +}
6790 +
6791 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6792 +{
6793 + unsigned int *kaddr;
6794 +
6795 + vmf->page = alloc_page(GFP_HIGHUSER);
6796 + if (!vmf->page)
6797 + return VM_FAULT_OOM;
6798 +
6799 + kaddr = kmap(vmf->page);
6800 + memset(kaddr, 0, PAGE_SIZE);
6801 + kaddr[0] = 0x9DE3BFA8U; /* save */
6802 + flush_dcache_page(vmf->page);
6803 + kunmap(vmf->page);
6804 + return VM_FAULT_MAJOR;
6805 +}
6806 +
6807 +static const struct vm_operations_struct pax_vm_ops = {
6808 + .close = pax_emuplt_close,
6809 + .fault = pax_emuplt_fault
6810 +};
6811 +
6812 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6813 +{
6814 + int ret;
6815 +
6816 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6817 + vma->vm_mm = current->mm;
6818 + vma->vm_start = addr;
6819 + vma->vm_end = addr + PAGE_SIZE;
6820 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6821 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6822 + vma->vm_ops = &pax_vm_ops;
6823 +
6824 + ret = insert_vm_struct(current->mm, vma);
6825 + if (ret)
6826 + return ret;
6827 +
6828 + ++current->mm->total_vm;
6829 + return 0;
6830 +}
6831 +#endif
6832 +
6833 +/*
6834 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6835 + *
6836 + * returns 1 when task should be killed
6837 + * 2 when patched PLT trampoline was detected
6838 + * 3 when unpatched PLT trampoline was detected
6839 + */
6840 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6841 +{
6842 +
6843 +#ifdef CONFIG_PAX_EMUPLT
6844 + int err;
6845 +
6846 + do { /* PaX: patched PLT emulation #1 */
6847 + unsigned int sethi1, sethi2, jmpl;
6848 +
6849 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6850 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6851 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6852 +
6853 + if (err)
6854 + break;
6855 +
6856 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6857 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6858 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6859 + {
6860 + unsigned long addr;
6861 +
6862 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6863 + addr = regs->u_regs[UREG_G1];
6864 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6865 +
6866 + if (test_thread_flag(TIF_32BIT))
6867 + addr &= 0xFFFFFFFFUL;
6868 +
6869 + regs->tpc = addr;
6870 + regs->tnpc = addr+4;
6871 + return 2;
6872 + }
6873 + } while (0);
6874 +
6875 + { /* PaX: patched PLT emulation #2 */
6876 + unsigned int ba;
6877 +
6878 + err = get_user(ba, (unsigned int *)regs->tpc);
6879 +
6880 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6881 + unsigned long addr;
6882 +
6883 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6884 +
6885 + if (test_thread_flag(TIF_32BIT))
6886 + addr &= 0xFFFFFFFFUL;
6887 +
6888 + regs->tpc = addr;
6889 + regs->tnpc = addr+4;
6890 + return 2;
6891 + }
6892 + }
6893 +
6894 + do { /* PaX: patched PLT emulation #3 */
6895 + unsigned int sethi, jmpl, nop;
6896 +
6897 + err = get_user(sethi, (unsigned int *)regs->tpc);
6898 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6899 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6900 +
6901 + if (err)
6902 + break;
6903 +
6904 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6905 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6906 + nop == 0x01000000U)
6907 + {
6908 + unsigned long addr;
6909 +
6910 + addr = (sethi & 0x003FFFFFU) << 10;
6911 + regs->u_regs[UREG_G1] = addr;
6912 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6913 +
6914 + if (test_thread_flag(TIF_32BIT))
6915 + addr &= 0xFFFFFFFFUL;
6916 +
6917 + regs->tpc = addr;
6918 + regs->tnpc = addr+4;
6919 + return 2;
6920 + }
6921 + } while (0);
6922 +
6923 + do { /* PaX: patched PLT emulation #4 */
6924 + unsigned int sethi, mov1, call, mov2;
6925 +
6926 + err = get_user(sethi, (unsigned int *)regs->tpc);
6927 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6928 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6929 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6930 +
6931 + if (err)
6932 + break;
6933 +
6934 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6935 + mov1 == 0x8210000FU &&
6936 + (call & 0xC0000000U) == 0x40000000U &&
6937 + mov2 == 0x9E100001U)
6938 + {
6939 + unsigned long addr;
6940 +
6941 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6942 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6943 +
6944 + if (test_thread_flag(TIF_32BIT))
6945 + addr &= 0xFFFFFFFFUL;
6946 +
6947 + regs->tpc = addr;
6948 + regs->tnpc = addr+4;
6949 + return 2;
6950 + }
6951 + } while (0);
6952 +
6953 + do { /* PaX: patched PLT emulation #5 */
6954 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6955 +
6956 + err = get_user(sethi, (unsigned int *)regs->tpc);
6957 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6958 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6959 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6960 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6961 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6962 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6963 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6964 +
6965 + if (err)
6966 + break;
6967 +
6968 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6969 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6970 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6971 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6972 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6973 + sllx == 0x83287020U &&
6974 + jmpl == 0x81C04005U &&
6975 + nop == 0x01000000U)
6976 + {
6977 + unsigned long addr;
6978 +
6979 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6980 + regs->u_regs[UREG_G1] <<= 32;
6981 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6982 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6983 + regs->tpc = addr;
6984 + regs->tnpc = addr+4;
6985 + return 2;
6986 + }
6987 + } while (0);
6988 +
6989 + do { /* PaX: patched PLT emulation #6 */
6990 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6991 +
6992 + err = get_user(sethi, (unsigned int *)regs->tpc);
6993 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6994 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6995 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6996 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6997 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6998 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6999 +
7000 + if (err)
7001 + break;
7002 +
7003 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7004 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7005 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7006 + sllx == 0x83287020U &&
7007 + (or & 0xFFFFE000U) == 0x8A116000U &&
7008 + jmpl == 0x81C04005U &&
7009 + nop == 0x01000000U)
7010 + {
7011 + unsigned long addr;
7012 +
7013 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7014 + regs->u_regs[UREG_G1] <<= 32;
7015 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7016 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7017 + regs->tpc = addr;
7018 + regs->tnpc = addr+4;
7019 + return 2;
7020 + }
7021 + } while (0);
7022 +
7023 + do { /* PaX: unpatched PLT emulation step 1 */
7024 + unsigned int sethi, ba, nop;
7025 +
7026 + err = get_user(sethi, (unsigned int *)regs->tpc);
7027 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7028 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7029 +
7030 + if (err)
7031 + break;
7032 +
7033 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7034 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7035 + nop == 0x01000000U)
7036 + {
7037 + unsigned long addr;
7038 + unsigned int save, call;
7039 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7040 +
7041 + if ((ba & 0xFFC00000U) == 0x30800000U)
7042 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7043 + else
7044 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7045 +
7046 + if (test_thread_flag(TIF_32BIT))
7047 + addr &= 0xFFFFFFFFUL;
7048 +
7049 + err = get_user(save, (unsigned int *)addr);
7050 + err |= get_user(call, (unsigned int *)(addr+4));
7051 + err |= get_user(nop, (unsigned int *)(addr+8));
7052 + if (err)
7053 + break;
7054 +
7055 +#ifdef CONFIG_PAX_DLRESOLVE
7056 + if (save == 0x9DE3BFA8U &&
7057 + (call & 0xC0000000U) == 0x40000000U &&
7058 + nop == 0x01000000U)
7059 + {
7060 + struct vm_area_struct *vma;
7061 + unsigned long call_dl_resolve;
7062 +
7063 + down_read(&current->mm->mmap_sem);
7064 + call_dl_resolve = current->mm->call_dl_resolve;
7065 + up_read(&current->mm->mmap_sem);
7066 + if (likely(call_dl_resolve))
7067 + goto emulate;
7068 +
7069 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7070 +
7071 + down_write(&current->mm->mmap_sem);
7072 + if (current->mm->call_dl_resolve) {
7073 + call_dl_resolve = current->mm->call_dl_resolve;
7074 + up_write(&current->mm->mmap_sem);
7075 + if (vma)
7076 + kmem_cache_free(vm_area_cachep, vma);
7077 + goto emulate;
7078 + }
7079 +
7080 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7081 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7082 + up_write(&current->mm->mmap_sem);
7083 + if (vma)
7084 + kmem_cache_free(vm_area_cachep, vma);
7085 + return 1;
7086 + }
7087 +
7088 + if (pax_insert_vma(vma, call_dl_resolve)) {
7089 + up_write(&current->mm->mmap_sem);
7090 + kmem_cache_free(vm_area_cachep, vma);
7091 + return 1;
7092 + }
7093 +
7094 + current->mm->call_dl_resolve = call_dl_resolve;
7095 + up_write(&current->mm->mmap_sem);
7096 +
7097 +emulate:
7098 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7099 + regs->tpc = call_dl_resolve;
7100 + regs->tnpc = addr+4;
7101 + return 3;
7102 + }
7103 +#endif
7104 +
7105 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7106 + if ((save & 0xFFC00000U) == 0x05000000U &&
7107 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7108 + nop == 0x01000000U)
7109 + {
7110 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7111 + regs->u_regs[UREG_G2] = addr + 4;
7112 + addr = (save & 0x003FFFFFU) << 10;
7113 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7114 +
7115 + if (test_thread_flag(TIF_32BIT))
7116 + addr &= 0xFFFFFFFFUL;
7117 +
7118 + regs->tpc = addr;
7119 + regs->tnpc = addr+4;
7120 + return 3;
7121 + }
7122 +
7123 + /* PaX: 64-bit PLT stub */
7124 + err = get_user(sethi1, (unsigned int *)addr);
7125 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7126 + err |= get_user(or1, (unsigned int *)(addr+8));
7127 + err |= get_user(or2, (unsigned int *)(addr+12));
7128 + err |= get_user(sllx, (unsigned int *)(addr+16));
7129 + err |= get_user(add, (unsigned int *)(addr+20));
7130 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7131 + err |= get_user(nop, (unsigned int *)(addr+28));
7132 + if (err)
7133 + break;
7134 +
7135 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7136 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7137 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7138 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7139 + sllx == 0x89293020U &&
7140 + add == 0x8A010005U &&
7141 + jmpl == 0x89C14000U &&
7142 + nop == 0x01000000U)
7143 + {
7144 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7145 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7146 + regs->u_regs[UREG_G4] <<= 32;
7147 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7148 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7149 + regs->u_regs[UREG_G4] = addr + 24;
7150 + addr = regs->u_regs[UREG_G5];
7151 + regs->tpc = addr;
7152 + regs->tnpc = addr+4;
7153 + return 3;
7154 + }
7155 + }
7156 + } while (0);
7157 +
7158 +#ifdef CONFIG_PAX_DLRESOLVE
7159 + do { /* PaX: unpatched PLT emulation step 2 */
7160 + unsigned int save, call, nop;
7161 +
7162 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7163 + err |= get_user(call, (unsigned int *)regs->tpc);
7164 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7165 + if (err)
7166 + break;
7167 +
7168 + if (save == 0x9DE3BFA8U &&
7169 + (call & 0xC0000000U) == 0x40000000U &&
7170 + nop == 0x01000000U)
7171 + {
7172 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7173 +
7174 + if (test_thread_flag(TIF_32BIT))
7175 + dl_resolve &= 0xFFFFFFFFUL;
7176 +
7177 + regs->u_regs[UREG_RETPC] = regs->tpc;
7178 + regs->tpc = dl_resolve;
7179 + regs->tnpc = dl_resolve+4;
7180 + return 3;
7181 + }
7182 + } while (0);
7183 +#endif
7184 +
7185 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7186 + unsigned int sethi, ba, nop;
7187 +
7188 + err = get_user(sethi, (unsigned int *)regs->tpc);
7189 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7190 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7191 +
7192 + if (err)
7193 + break;
7194 +
7195 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7196 + (ba & 0xFFF00000U) == 0x30600000U &&
7197 + nop == 0x01000000U)
7198 + {
7199 + unsigned long addr;
7200 +
7201 + addr = (sethi & 0x003FFFFFU) << 10;
7202 + regs->u_regs[UREG_G1] = addr;
7203 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7204 +
7205 + if (test_thread_flag(TIF_32BIT))
7206 + addr &= 0xFFFFFFFFUL;
7207 +
7208 + regs->tpc = addr;
7209 + regs->tnpc = addr+4;
7210 + return 2;
7211 + }
7212 + } while (0);
7213 +
7214 +#endif
7215 +
7216 + return 1;
7217 +}
7218 +
7219 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7220 +{
7221 + unsigned long i;
7222 +
7223 + printk(KERN_ERR "PAX: bytes at PC: ");
7224 + for (i = 0; i < 8; i++) {
7225 + unsigned int c;
7226 + if (get_user(c, (unsigned int *)pc+i))
7227 + printk(KERN_CONT "???????? ");
7228 + else
7229 + printk(KERN_CONT "%08x ", c);
7230 + }
7231 + printk("\n");
7232 +}
7233 +#endif
7234 +
7235 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7236 {
7237 struct mm_struct *mm = current->mm;
7238 @@ -343,6 +797,29 @@ retry:
7239 if (!vma)
7240 goto bad_area;
7241
7242 +#ifdef CONFIG_PAX_PAGEEXEC
7243 + /* PaX: detect ITLB misses on non-exec pages */
7244 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7245 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7246 + {
7247 + if (address != regs->tpc)
7248 + goto good_area;
7249 +
7250 + up_read(&mm->mmap_sem);
7251 + switch (pax_handle_fetch_fault(regs)) {
7252 +
7253 +#ifdef CONFIG_PAX_EMUPLT
7254 + case 2:
7255 + case 3:
7256 + return;
7257 +#endif
7258 +
7259 + }
7260 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7261 + do_group_exit(SIGKILL);
7262 + }
7263 +#endif
7264 +
7265 /* Pure DTLB misses do not tell us whether the fault causing
7266 * load/store/atomic was a write or not, it only says that there
7267 * was no match. So in such a case we (carefully) read the
7268 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7269 index 07e1453..0a7d9e9 100644
7270 --- a/arch/sparc/mm/hugetlbpage.c
7271 +++ b/arch/sparc/mm/hugetlbpage.c
7272 @@ -67,7 +67,7 @@ full_search:
7273 }
7274 return -ENOMEM;
7275 }
7276 - if (likely(!vma || addr + len <= vma->vm_start)) {
7277 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7278 /*
7279 * Remember the place where we stopped the search:
7280 */
7281 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7282 /* make sure it can fit in the remaining address space */
7283 if (likely(addr > len)) {
7284 vma = find_vma(mm, addr-len);
7285 - if (!vma || addr <= vma->vm_start) {
7286 + if (check_heap_stack_gap(vma, addr - len, len)) {
7287 /* remember the address as a hint for next time */
7288 return (mm->free_area_cache = addr-len);
7289 }
7290 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7291 if (unlikely(mm->mmap_base < len))
7292 goto bottomup;
7293
7294 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7295 + addr = mm->mmap_base - len;
7296
7297 do {
7298 + addr &= HPAGE_MASK;
7299 /*
7300 * Lookup failure means no vma is above this address,
7301 * else if new region fits below vma->vm_start,
7302 * return with success:
7303 */
7304 vma = find_vma(mm, addr);
7305 - if (likely(!vma || addr+len <= vma->vm_start)) {
7306 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7307 /* remember the address as a hint for next time */
7308 return (mm->free_area_cache = addr);
7309 }
7310 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7311 mm->cached_hole_size = vma->vm_start - addr;
7312
7313 /* try just below the current vma->vm_start */
7314 - addr = (vma->vm_start-len) & HPAGE_MASK;
7315 - } while (likely(len < vma->vm_start));
7316 + addr = skip_heap_stack_gap(vma, len);
7317 + } while (!IS_ERR_VALUE(addr));
7318
7319 bottomup:
7320 /*
7321 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7322 if (addr) {
7323 addr = ALIGN(addr, HPAGE_SIZE);
7324 vma = find_vma(mm, addr);
7325 - if (task_size - len >= addr &&
7326 - (!vma || addr + len <= vma->vm_start))
7327 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7328 return addr;
7329 }
7330 if (mm->get_unmapped_area == arch_get_unmapped_area)
7331 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7332 index c5f9021..7591bae 100644
7333 --- a/arch/sparc/mm/init_32.c
7334 +++ b/arch/sparc/mm/init_32.c
7335 @@ -315,6 +315,9 @@ extern void device_scan(void);
7336 pgprot_t PAGE_SHARED __read_mostly;
7337 EXPORT_SYMBOL(PAGE_SHARED);
7338
7339 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7340 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7341 +
7342 void __init paging_init(void)
7343 {
7344 switch(sparc_cpu_model) {
7345 @@ -343,17 +346,17 @@ void __init paging_init(void)
7346
7347 /* Initialize the protection map with non-constant, MMU dependent values. */
7348 protection_map[0] = PAGE_NONE;
7349 - protection_map[1] = PAGE_READONLY;
7350 - protection_map[2] = PAGE_COPY;
7351 - protection_map[3] = PAGE_COPY;
7352 + protection_map[1] = PAGE_READONLY_NOEXEC;
7353 + protection_map[2] = PAGE_COPY_NOEXEC;
7354 + protection_map[3] = PAGE_COPY_NOEXEC;
7355 protection_map[4] = PAGE_READONLY;
7356 protection_map[5] = PAGE_READONLY;
7357 protection_map[6] = PAGE_COPY;
7358 protection_map[7] = PAGE_COPY;
7359 protection_map[8] = PAGE_NONE;
7360 - protection_map[9] = PAGE_READONLY;
7361 - protection_map[10] = PAGE_SHARED;
7362 - protection_map[11] = PAGE_SHARED;
7363 + protection_map[9] = PAGE_READONLY_NOEXEC;
7364 + protection_map[10] = PAGE_SHARED_NOEXEC;
7365 + protection_map[11] = PAGE_SHARED_NOEXEC;
7366 protection_map[12] = PAGE_READONLY;
7367 protection_map[13] = PAGE_READONLY;
7368 protection_map[14] = PAGE_SHARED;
7369 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7370 index cbef74e..c38fead 100644
7371 --- a/arch/sparc/mm/srmmu.c
7372 +++ b/arch/sparc/mm/srmmu.c
7373 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7374 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7375 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7376 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7377 +
7378 +#ifdef CONFIG_PAX_PAGEEXEC
7379 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7380 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7381 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7382 +#endif
7383 +
7384 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7385 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7386
7387 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7388 index f4500c6..889656c 100644
7389 --- a/arch/tile/include/asm/atomic_64.h
7390 +++ b/arch/tile/include/asm/atomic_64.h
7391 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7392
7393 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7394
7395 +#define atomic64_read_unchecked(v) atomic64_read(v)
7396 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7397 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7398 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7399 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7400 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7401 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7402 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7403 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7404 +
7405 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7406 #define smp_mb__before_atomic_dec() smp_mb()
7407 #define smp_mb__after_atomic_dec() smp_mb()
7408 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7409 index 392e533..536b092 100644
7410 --- a/arch/tile/include/asm/cache.h
7411 +++ b/arch/tile/include/asm/cache.h
7412 @@ -15,11 +15,12 @@
7413 #ifndef _ASM_TILE_CACHE_H
7414 #define _ASM_TILE_CACHE_H
7415
7416 +#include <linux/const.h>
7417 #include <arch/chip.h>
7418
7419 /* bytes per L1 data cache line */
7420 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7421 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7422 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7423
7424 /* bytes per L2 cache line */
7425 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7426 diff --git a/arch/um/Makefile b/arch/um/Makefile
7427 index 55c0661..86ad413 100644
7428 --- a/arch/um/Makefile
7429 +++ b/arch/um/Makefile
7430 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7431 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7432 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7433
7434 +ifdef CONSTIFY_PLUGIN
7435 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7436 +endif
7437 +
7438 #This will adjust *FLAGS accordingly to the platform.
7439 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7440
7441 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7442 index 19e1bdd..3665b77 100644
7443 --- a/arch/um/include/asm/cache.h
7444 +++ b/arch/um/include/asm/cache.h
7445 @@ -1,6 +1,7 @@
7446 #ifndef __UM_CACHE_H
7447 #define __UM_CACHE_H
7448
7449 +#include <linux/const.h>
7450
7451 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7452 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7453 @@ -12,6 +13,6 @@
7454 # define L1_CACHE_SHIFT 5
7455 #endif
7456
7457 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7458 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7459
7460 #endif
7461 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7462 index 6c03acd..a5e0215 100644
7463 --- a/arch/um/include/asm/kmap_types.h
7464 +++ b/arch/um/include/asm/kmap_types.h
7465 @@ -23,6 +23,7 @@ enum km_type {
7466 KM_IRQ1,
7467 KM_SOFTIRQ0,
7468 KM_SOFTIRQ1,
7469 + KM_CLEARPAGE,
7470 KM_TYPE_NR
7471 };
7472
7473 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7474 index 7cfc3ce..cbd1a58 100644
7475 --- a/arch/um/include/asm/page.h
7476 +++ b/arch/um/include/asm/page.h
7477 @@ -14,6 +14,9 @@
7478 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7479 #define PAGE_MASK (~(PAGE_SIZE-1))
7480
7481 +#define ktla_ktva(addr) (addr)
7482 +#define ktva_ktla(addr) (addr)
7483 +
7484 #ifndef __ASSEMBLY__
7485
7486 struct page;
7487 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7488 index 0032f92..cd151e0 100644
7489 --- a/arch/um/include/asm/pgtable-3level.h
7490 +++ b/arch/um/include/asm/pgtable-3level.h
7491 @@ -58,6 +58,7 @@
7492 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7493 #define pud_populate(mm, pud, pmd) \
7494 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7495 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7496
7497 #ifdef CONFIG_64BIT
7498 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7499 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7500 index 2b73ded..804f540 100644
7501 --- a/arch/um/kernel/process.c
7502 +++ b/arch/um/kernel/process.c
7503 @@ -404,22 +404,6 @@ int singlestepping(void * t)
7504 return 2;
7505 }
7506
7507 -/*
7508 - * Only x86 and x86_64 have an arch_align_stack().
7509 - * All other arches have "#define arch_align_stack(x) (x)"
7510 - * in their asm/system.h
7511 - * As this is included in UML from asm-um/system-generic.h,
7512 - * we can use it to behave as the subarch does.
7513 - */
7514 -#ifndef arch_align_stack
7515 -unsigned long arch_align_stack(unsigned long sp)
7516 -{
7517 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7518 - sp -= get_random_int() % 8192;
7519 - return sp & ~0xf;
7520 -}
7521 -#endif
7522 -
7523 unsigned long get_wchan(struct task_struct *p)
7524 {
7525 unsigned long stack_page, sp, ip;
7526 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7527 index ad8f795..2c7eec6 100644
7528 --- a/arch/unicore32/include/asm/cache.h
7529 +++ b/arch/unicore32/include/asm/cache.h
7530 @@ -12,8 +12,10 @@
7531 #ifndef __UNICORE_CACHE_H__
7532 #define __UNICORE_CACHE_H__
7533
7534 -#define L1_CACHE_SHIFT (5)
7535 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7536 +#include <linux/const.h>
7537 +
7538 +#define L1_CACHE_SHIFT 5
7539 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7540
7541 /*
7542 * Memory returned by kmalloc() may be used for DMA, so we must make
7543 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7544 index c9866b0..fe53aef 100644
7545 --- a/arch/x86/Kconfig
7546 +++ b/arch/x86/Kconfig
7547 @@ -229,7 +229,7 @@ config X86_HT
7548
7549 config X86_32_LAZY_GS
7550 def_bool y
7551 - depends on X86_32 && !CC_STACKPROTECTOR
7552 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7553
7554 config ARCH_HWEIGHT_CFLAGS
7555 string
7556 @@ -1042,7 +1042,7 @@ choice
7557
7558 config NOHIGHMEM
7559 bool "off"
7560 - depends on !X86_NUMAQ
7561 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7562 ---help---
7563 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7564 However, the address space of 32-bit x86 processors is only 4
7565 @@ -1079,7 +1079,7 @@ config NOHIGHMEM
7566
7567 config HIGHMEM4G
7568 bool "4GB"
7569 - depends on !X86_NUMAQ
7570 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7571 ---help---
7572 Select this if you have a 32-bit processor and between 1 and 4
7573 gigabytes of physical RAM.
7574 @@ -1133,7 +1133,7 @@ config PAGE_OFFSET
7575 hex
7576 default 0xB0000000 if VMSPLIT_3G_OPT
7577 default 0x80000000 if VMSPLIT_2G
7578 - default 0x78000000 if VMSPLIT_2G_OPT
7579 + default 0x70000000 if VMSPLIT_2G_OPT
7580 default 0x40000000 if VMSPLIT_1G
7581 default 0xC0000000
7582 depends on X86_32
7583 @@ -1523,6 +1523,7 @@ config SECCOMP
7584
7585 config CC_STACKPROTECTOR
7586 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7587 + depends on X86_64 || !PAX_MEMORY_UDEREF
7588 ---help---
7589 This option turns on the -fstack-protector GCC feature. This
7590 feature puts, at the beginning of functions, a canary value on
7591 @@ -1580,6 +1581,7 @@ config KEXEC_JUMP
7592 config PHYSICAL_START
7593 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7594 default "0x1000000"
7595 + range 0x400000 0x40000000
7596 ---help---
7597 This gives the physical address where the kernel is loaded.
7598
7599 @@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
7600 config PHYSICAL_ALIGN
7601 hex "Alignment value to which kernel should be aligned" if X86_32
7602 default "0x1000000"
7603 + range 0x400000 0x1000000 if PAX_KERNEXEC
7604 range 0x2000 0x1000000
7605 ---help---
7606 This value puts the alignment restrictions on physical address
7607 @@ -1674,9 +1677,10 @@ config HOTPLUG_CPU
7608 Say N if you want to disable CPU hotplug.
7609
7610 config COMPAT_VDSO
7611 - def_bool y
7612 + def_bool n
7613 prompt "Compat VDSO support"
7614 depends on X86_32 || IA32_EMULATION
7615 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7616 ---help---
7617 Map the 32-bit VDSO to the predictable old-style address too.
7618
7619 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7620 index 706e12e..62e4feb 100644
7621 --- a/arch/x86/Kconfig.cpu
7622 +++ b/arch/x86/Kconfig.cpu
7623 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7624
7625 config X86_F00F_BUG
7626 def_bool y
7627 - depends on M586MMX || M586TSC || M586 || M486 || M386
7628 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7629
7630 config X86_INVD_BUG
7631 def_bool y
7632 @@ -358,7 +358,7 @@ config X86_POPAD_OK
7633
7634 config X86_ALIGNMENT_16
7635 def_bool y
7636 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7637 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7638
7639 config X86_INTEL_USERCOPY
7640 def_bool y
7641 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
7642 # generates cmov.
7643 config X86_CMOV
7644 def_bool y
7645 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7646 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7647
7648 config X86_MINIMUM_CPU_FAMILY
7649 int
7650 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7651 index e46c214..7c72b55 100644
7652 --- a/arch/x86/Kconfig.debug
7653 +++ b/arch/x86/Kconfig.debug
7654 @@ -84,7 +84,7 @@ config X86_PTDUMP
7655 config DEBUG_RODATA
7656 bool "Write protect kernel read-only data structures"
7657 default y
7658 - depends on DEBUG_KERNEL
7659 + depends on DEBUG_KERNEL && BROKEN
7660 ---help---
7661 Mark the kernel read-only data as write-protected in the pagetables,
7662 in order to catch accidental (and incorrect) writes to such const
7663 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7664
7665 config DEBUG_SET_MODULE_RONX
7666 bool "Set loadable kernel module data as NX and text as RO"
7667 - depends on MODULES
7668 + depends on MODULES && BROKEN
7669 ---help---
7670 This option helps catch unintended modifications to loadable
7671 kernel module's text and read-only data. It also prevents execution
7672 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7673 index b1c611e..2c1a823 100644
7674 --- a/arch/x86/Makefile
7675 +++ b/arch/x86/Makefile
7676 @@ -46,6 +46,7 @@ else
7677 UTS_MACHINE := x86_64
7678 CHECKFLAGS += -D__x86_64__ -m64
7679
7680 + biarch := $(call cc-option,-m64)
7681 KBUILD_AFLAGS += -m64
7682 KBUILD_CFLAGS += -m64
7683
7684 @@ -222,3 +223,12 @@ define archhelp
7685 echo ' FDARGS="..." arguments for the booted kernel'
7686 echo ' FDINITRD=file initrd for the booted kernel'
7687 endef
7688 +
7689 +define OLD_LD
7690 +
7691 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7692 +*** Please upgrade your binutils to 2.18 or newer
7693 +endef
7694 +
7695 +archprepare:
7696 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7697 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7698 index 5a747dd..ff7b12c 100644
7699 --- a/arch/x86/boot/Makefile
7700 +++ b/arch/x86/boot/Makefile
7701 @@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7702 $(call cc-option, -fno-stack-protector) \
7703 $(call cc-option, -mpreferred-stack-boundary=2)
7704 KBUILD_CFLAGS += $(call cc-option, -m32)
7705 +ifdef CONSTIFY_PLUGIN
7706 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7707 +endif
7708 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7709 GCOV_PROFILE := n
7710
7711 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7712 index 878e4b9..20537ab 100644
7713 --- a/arch/x86/boot/bitops.h
7714 +++ b/arch/x86/boot/bitops.h
7715 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7716 u8 v;
7717 const u32 *p = (const u32 *)addr;
7718
7719 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7720 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7721 return v;
7722 }
7723
7724 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7725
7726 static inline void set_bit(int nr, void *addr)
7727 {
7728 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7729 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7730 }
7731
7732 #endif /* BOOT_BITOPS_H */
7733 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7734 index 18997e5..83d9c67 100644
7735 --- a/arch/x86/boot/boot.h
7736 +++ b/arch/x86/boot/boot.h
7737 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7738 static inline u16 ds(void)
7739 {
7740 u16 seg;
7741 - asm("movw %%ds,%0" : "=rm" (seg));
7742 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7743 return seg;
7744 }
7745
7746 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7747 static inline int memcmp(const void *s1, const void *s2, size_t len)
7748 {
7749 u8 diff;
7750 - asm("repe; cmpsb; setnz %0"
7751 + asm volatile("repe; cmpsb; setnz %0"
7752 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7753 return diff;
7754 }
7755 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7756 index e398bb5..3a382ca 100644
7757 --- a/arch/x86/boot/compressed/Makefile
7758 +++ b/arch/x86/boot/compressed/Makefile
7759 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7760 KBUILD_CFLAGS += $(cflags-y)
7761 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7762 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7763 +ifdef CONSTIFY_PLUGIN
7764 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7765 +endif
7766
7767 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7768 GCOV_PROFILE := n
7769 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7770 index 0cdfc0d..6e79437 100644
7771 --- a/arch/x86/boot/compressed/eboot.c
7772 +++ b/arch/x86/boot/compressed/eboot.c
7773 @@ -122,7 +122,6 @@ again:
7774 *addr = max_addr;
7775 }
7776
7777 -free_pool:
7778 efi_call_phys1(sys_table->boottime->free_pool, map);
7779
7780 fail:
7781 @@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7782 if (i == map_size / desc_size)
7783 status = EFI_NOT_FOUND;
7784
7785 -free_pool:
7786 efi_call_phys1(sys_table->boottime->free_pool, map);
7787 fail:
7788 return status;
7789 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7790 index c85e3ac..6f5aa80 100644
7791 --- a/arch/x86/boot/compressed/head_32.S
7792 +++ b/arch/x86/boot/compressed/head_32.S
7793 @@ -106,7 +106,7 @@ preferred_addr:
7794 notl %eax
7795 andl %eax, %ebx
7796 #else
7797 - movl $LOAD_PHYSICAL_ADDR, %ebx
7798 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7799 #endif
7800
7801 /* Target address to relocate to for decompression */
7802 @@ -192,7 +192,7 @@ relocated:
7803 * and where it was actually loaded.
7804 */
7805 movl %ebp, %ebx
7806 - subl $LOAD_PHYSICAL_ADDR, %ebx
7807 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7808 jz 2f /* Nothing to be done if loaded at compiled addr. */
7809 /*
7810 * Process relocations.
7811 @@ -200,8 +200,7 @@ relocated:
7812
7813 1: subl $4, %edi
7814 movl (%edi), %ecx
7815 - testl %ecx, %ecx
7816 - jz 2f
7817 + jecxz 2f
7818 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7819 jmp 1b
7820 2:
7821 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7822 index 87e03a1..0d94c76 100644
7823 --- a/arch/x86/boot/compressed/head_64.S
7824 +++ b/arch/x86/boot/compressed/head_64.S
7825 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7826 notl %eax
7827 andl %eax, %ebx
7828 #else
7829 - movl $LOAD_PHYSICAL_ADDR, %ebx
7830 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7831 #endif
7832
7833 /* Target address to relocate to for decompression */
7834 @@ -263,7 +263,7 @@ preferred_addr:
7835 notq %rax
7836 andq %rax, %rbp
7837 #else
7838 - movq $LOAD_PHYSICAL_ADDR, %rbp
7839 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7840 #endif
7841
7842 /* Target address to relocate to for decompression */
7843 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7844 index 7116dcb..d9ae1d7 100644
7845 --- a/arch/x86/boot/compressed/misc.c
7846 +++ b/arch/x86/boot/compressed/misc.c
7847 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7848 case PT_LOAD:
7849 #ifdef CONFIG_RELOCATABLE
7850 dest = output;
7851 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7852 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7853 #else
7854 dest = (void *)(phdr->p_paddr);
7855 #endif
7856 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7857 error("Destination address too large");
7858 #endif
7859 #ifndef CONFIG_RELOCATABLE
7860 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7861 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7862 error("Wrong destination address");
7863 #endif
7864
7865 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7866 index 4d3ff03..e4972ff 100644
7867 --- a/arch/x86/boot/cpucheck.c
7868 +++ b/arch/x86/boot/cpucheck.c
7869 @@ -74,7 +74,7 @@ static int has_fpu(void)
7870 u16 fcw = -1, fsw = -1;
7871 u32 cr0;
7872
7873 - asm("movl %%cr0,%0" : "=r" (cr0));
7874 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7875 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7876 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7877 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7878 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7879 {
7880 u32 f0, f1;
7881
7882 - asm("pushfl ; "
7883 + asm volatile("pushfl ; "
7884 "pushfl ; "
7885 "popl %0 ; "
7886 "movl %0,%1 ; "
7887 @@ -115,7 +115,7 @@ static void get_flags(void)
7888 set_bit(X86_FEATURE_FPU, cpu.flags);
7889
7890 if (has_eflag(X86_EFLAGS_ID)) {
7891 - asm("cpuid"
7892 + asm volatile("cpuid"
7893 : "=a" (max_intel_level),
7894 "=b" (cpu_vendor[0]),
7895 "=d" (cpu_vendor[1]),
7896 @@ -124,7 +124,7 @@ static void get_flags(void)
7897
7898 if (max_intel_level >= 0x00000001 &&
7899 max_intel_level <= 0x0000ffff) {
7900 - asm("cpuid"
7901 + asm volatile("cpuid"
7902 : "=a" (tfms),
7903 "=c" (cpu.flags[4]),
7904 "=d" (cpu.flags[0])
7905 @@ -136,7 +136,7 @@ static void get_flags(void)
7906 cpu.model += ((tfms >> 16) & 0xf) << 4;
7907 }
7908
7909 - asm("cpuid"
7910 + asm volatile("cpuid"
7911 : "=a" (max_amd_level)
7912 : "a" (0x80000000)
7913 : "ebx", "ecx", "edx");
7914 @@ -144,7 +144,7 @@ static void get_flags(void)
7915 if (max_amd_level >= 0x80000001 &&
7916 max_amd_level <= 0x8000ffff) {
7917 u32 eax = 0x80000001;
7918 - asm("cpuid"
7919 + asm volatile("cpuid"
7920 : "+a" (eax),
7921 "=c" (cpu.flags[6]),
7922 "=d" (cpu.flags[1])
7923 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7924 u32 ecx = MSR_K7_HWCR;
7925 u32 eax, edx;
7926
7927 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7928 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7929 eax &= ~(1 << 15);
7930 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7931 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7932
7933 get_flags(); /* Make sure it really did something */
7934 err = check_flags();
7935 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7936 u32 ecx = MSR_VIA_FCR;
7937 u32 eax, edx;
7938
7939 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7940 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7941 eax |= (1<<1)|(1<<7);
7942 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7943 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7944
7945 set_bit(X86_FEATURE_CX8, cpu.flags);
7946 err = check_flags();
7947 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7948 u32 eax, edx;
7949 u32 level = 1;
7950
7951 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7952 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7953 - asm("cpuid"
7954 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7955 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7956 + asm volatile("cpuid"
7957 : "+a" (level), "=d" (cpu.flags[0])
7958 : : "ecx", "ebx");
7959 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7960 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7961
7962 err = check_flags();
7963 }
7964 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7965 index f1bbeeb..aff09cb 100644
7966 --- a/arch/x86/boot/header.S
7967 +++ b/arch/x86/boot/header.S
7968 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7969 # single linked list of
7970 # struct setup_data
7971
7972 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7973 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7974
7975 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7976 #define VO_INIT_SIZE (VO__end - VO__text)
7977 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7978 index db75d07..8e6d0af 100644
7979 --- a/arch/x86/boot/memory.c
7980 +++ b/arch/x86/boot/memory.c
7981 @@ -19,7 +19,7 @@
7982
7983 static int detect_memory_e820(void)
7984 {
7985 - int count = 0;
7986 + unsigned int count = 0;
7987 struct biosregs ireg, oreg;
7988 struct e820entry *desc = boot_params.e820_map;
7989 static struct e820entry buf; /* static so it is zeroed */
7990 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7991 index 11e8c6e..fdbb1ed 100644
7992 --- a/arch/x86/boot/video-vesa.c
7993 +++ b/arch/x86/boot/video-vesa.c
7994 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7995
7996 boot_params.screen_info.vesapm_seg = oreg.es;
7997 boot_params.screen_info.vesapm_off = oreg.di;
7998 + boot_params.screen_info.vesapm_size = oreg.cx;
7999 }
8000
8001 /*
8002 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8003 index 43eda28..5ab5fdb 100644
8004 --- a/arch/x86/boot/video.c
8005 +++ b/arch/x86/boot/video.c
8006 @@ -96,7 +96,7 @@ static void store_mode_params(void)
8007 static unsigned int get_entry(void)
8008 {
8009 char entry_buf[4];
8010 - int i, len = 0;
8011 + unsigned int i, len = 0;
8012 int key;
8013 unsigned int v;
8014
8015 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8016 index 5b577d5..3c1fed4 100644
8017 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8018 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8019 @@ -8,6 +8,8 @@
8020 * including this sentence is retained in full.
8021 */
8022
8023 +#include <asm/alternative-asm.h>
8024 +
8025 .extern crypto_ft_tab
8026 .extern crypto_it_tab
8027 .extern crypto_fl_tab
8028 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8029 je B192; \
8030 leaq 32(r9),r9;
8031
8032 +#define ret pax_force_retaddr 0, 1; ret
8033 +
8034 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8035 movq r1,r2; \
8036 movq r3,r4; \
8037 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8038 index be6d9e3..21fbbca 100644
8039 --- a/arch/x86/crypto/aesni-intel_asm.S
8040 +++ b/arch/x86/crypto/aesni-intel_asm.S
8041 @@ -31,6 +31,7 @@
8042
8043 #include <linux/linkage.h>
8044 #include <asm/inst.h>
8045 +#include <asm/alternative-asm.h>
8046
8047 #ifdef __x86_64__
8048 .data
8049 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8050 pop %r14
8051 pop %r13
8052 pop %r12
8053 + pax_force_retaddr 0, 1
8054 ret
8055 +ENDPROC(aesni_gcm_dec)
8056
8057
8058 /*****************************************************************************
8059 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8060 pop %r14
8061 pop %r13
8062 pop %r12
8063 + pax_force_retaddr 0, 1
8064 ret
8065 +ENDPROC(aesni_gcm_enc)
8066
8067 #endif
8068
8069 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
8070 pxor %xmm1, %xmm0
8071 movaps %xmm0, (TKEYP)
8072 add $0x10, TKEYP
8073 + pax_force_retaddr_bts
8074 ret
8075
8076 .align 4
8077 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
8078 shufps $0b01001110, %xmm2, %xmm1
8079 movaps %xmm1, 0x10(TKEYP)
8080 add $0x20, TKEYP
8081 + pax_force_retaddr_bts
8082 ret
8083
8084 .align 4
8085 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
8086
8087 movaps %xmm0, (TKEYP)
8088 add $0x10, TKEYP
8089 + pax_force_retaddr_bts
8090 ret
8091
8092 .align 4
8093 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
8094 pxor %xmm1, %xmm2
8095 movaps %xmm2, (TKEYP)
8096 add $0x10, TKEYP
8097 + pax_force_retaddr_bts
8098 ret
8099
8100 /*
8101 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8102 #ifndef __x86_64__
8103 popl KEYP
8104 #endif
8105 + pax_force_retaddr 0, 1
8106 ret
8107 +ENDPROC(aesni_set_key)
8108
8109 /*
8110 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8111 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8112 popl KLEN
8113 popl KEYP
8114 #endif
8115 + pax_force_retaddr 0, 1
8116 ret
8117 +ENDPROC(aesni_enc)
8118
8119 /*
8120 * _aesni_enc1: internal ABI
8121 @@ -1959,6 +1972,7 @@ _aesni_enc1:
8122 AESENC KEY STATE
8123 movaps 0x70(TKEYP), KEY
8124 AESENCLAST KEY STATE
8125 + pax_force_retaddr_bts
8126 ret
8127
8128 /*
8129 @@ -2067,6 +2081,7 @@ _aesni_enc4:
8130 AESENCLAST KEY STATE2
8131 AESENCLAST KEY STATE3
8132 AESENCLAST KEY STATE4
8133 + pax_force_retaddr_bts
8134 ret
8135
8136 /*
8137 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8138 popl KLEN
8139 popl KEYP
8140 #endif
8141 + pax_force_retaddr 0, 1
8142 ret
8143 +ENDPROC(aesni_dec)
8144
8145 /*
8146 * _aesni_dec1: internal ABI
8147 @@ -2146,6 +2163,7 @@ _aesni_dec1:
8148 AESDEC KEY STATE
8149 movaps 0x70(TKEYP), KEY
8150 AESDECLAST KEY STATE
8151 + pax_force_retaddr_bts
8152 ret
8153
8154 /*
8155 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8156 AESDECLAST KEY STATE2
8157 AESDECLAST KEY STATE3
8158 AESDECLAST KEY STATE4
8159 + pax_force_retaddr_bts
8160 ret
8161
8162 /*
8163 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8164 popl KEYP
8165 popl LEN
8166 #endif
8167 + pax_force_retaddr 0, 1
8168 ret
8169 +ENDPROC(aesni_ecb_enc)
8170
8171 /*
8172 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8173 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8174 popl KEYP
8175 popl LEN
8176 #endif
8177 + pax_force_retaddr 0, 1
8178 ret
8179 +ENDPROC(aesni_ecb_dec)
8180
8181 /*
8182 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8183 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8184 popl LEN
8185 popl IVP
8186 #endif
8187 + pax_force_retaddr 0, 1
8188 ret
8189 +ENDPROC(aesni_cbc_enc)
8190
8191 /*
8192 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8193 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
8194 popl LEN
8195 popl IVP
8196 #endif
8197 + pax_force_retaddr 0, 1
8198 ret
8199 +ENDPROC(aesni_cbc_dec)
8200
8201 #ifdef __x86_64__
8202 .align 16
8203 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
8204 mov $1, TCTR_LOW
8205 MOVQ_R64_XMM TCTR_LOW INC
8206 MOVQ_R64_XMM CTR TCTR_LOW
8207 + pax_force_retaddr_bts
8208 ret
8209
8210 /*
8211 @@ -2552,6 +2580,7 @@ _aesni_inc:
8212 .Linc_low:
8213 movaps CTR, IV
8214 PSHUFB_XMM BSWAP_MASK IV
8215 + pax_force_retaddr_bts
8216 ret
8217
8218 /*
8219 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
8220 .Lctr_enc_ret:
8221 movups IV, (IVP)
8222 .Lctr_enc_just_ret:
8223 + pax_force_retaddr 0, 1
8224 ret
8225 +ENDPROC(aesni_ctr_enc)
8226 #endif
8227 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8228 index 391d245..67f35c2 100644
8229 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8230 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8231 @@ -20,6 +20,8 @@
8232 *
8233 */
8234
8235 +#include <asm/alternative-asm.h>
8236 +
8237 .file "blowfish-x86_64-asm.S"
8238 .text
8239
8240 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8241 jnz __enc_xor;
8242
8243 write_block();
8244 + pax_force_retaddr 0, 1
8245 ret;
8246 __enc_xor:
8247 xor_block();
8248 + pax_force_retaddr 0, 1
8249 ret;
8250
8251 .align 8
8252 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8253
8254 movq %r11, %rbp;
8255
8256 + pax_force_retaddr 0, 1
8257 ret;
8258
8259 /**********************************************************************
8260 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8261
8262 popq %rbx;
8263 popq %rbp;
8264 + pax_force_retaddr 0, 1
8265 ret;
8266
8267 __enc_xor4:
8268 @@ -349,6 +355,7 @@ __enc_xor4:
8269
8270 popq %rbx;
8271 popq %rbp;
8272 + pax_force_retaddr 0, 1
8273 ret;
8274
8275 .align 8
8276 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8277 popq %rbx;
8278 popq %rbp;
8279
8280 + pax_force_retaddr 0, 1
8281 ret;
8282
8283 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8284 index 0b33743..7a56206 100644
8285 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8286 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8287 @@ -20,6 +20,8 @@
8288 *
8289 */
8290
8291 +#include <asm/alternative-asm.h>
8292 +
8293 .file "camellia-x86_64-asm_64.S"
8294 .text
8295
8296 @@ -229,12 +231,14 @@ __enc_done:
8297 enc_outunpack(mov, RT1);
8298
8299 movq RRBP, %rbp;
8300 + pax_force_retaddr 0, 1
8301 ret;
8302
8303 __enc_xor:
8304 enc_outunpack(xor, RT1);
8305
8306 movq RRBP, %rbp;
8307 + pax_force_retaddr 0, 1
8308 ret;
8309
8310 .global camellia_dec_blk;
8311 @@ -275,6 +279,7 @@ __dec_rounds16:
8312 dec_outunpack();
8313
8314 movq RRBP, %rbp;
8315 + pax_force_retaddr 0, 1
8316 ret;
8317
8318 /**********************************************************************
8319 @@ -468,6 +473,7 @@ __enc2_done:
8320
8321 movq RRBP, %rbp;
8322 popq %rbx;
8323 + pax_force_retaddr 0, 1
8324 ret;
8325
8326 __enc2_xor:
8327 @@ -475,6 +481,7 @@ __enc2_xor:
8328
8329 movq RRBP, %rbp;
8330 popq %rbx;
8331 + pax_force_retaddr 0, 1
8332 ret;
8333
8334 .global camellia_dec_blk_2way;
8335 @@ -517,4 +524,5 @@ __dec2_rounds16:
8336
8337 movq RRBP, %rbp;
8338 movq RXOR, %rbx;
8339 + pax_force_retaddr 0, 1
8340 ret;
8341 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8342 index 6214a9b..1f4fc9a 100644
8343 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8344 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8345 @@ -1,3 +1,5 @@
8346 +#include <asm/alternative-asm.h>
8347 +
8348 # enter ECRYPT_encrypt_bytes
8349 .text
8350 .p2align 5
8351 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8352 add %r11,%rsp
8353 mov %rdi,%rax
8354 mov %rsi,%rdx
8355 + pax_force_retaddr 0, 1
8356 ret
8357 # bytesatleast65:
8358 ._bytesatleast65:
8359 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8360 add %r11,%rsp
8361 mov %rdi,%rax
8362 mov %rsi,%rdx
8363 + pax_force_retaddr
8364 ret
8365 # enter ECRYPT_ivsetup
8366 .text
8367 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8368 add %r11,%rsp
8369 mov %rdi,%rax
8370 mov %rsi,%rdx
8371 + pax_force_retaddr
8372 ret
8373 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8374 index 3ee1ff0..cbc568b 100644
8375 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8376 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8377 @@ -24,6 +24,8 @@
8378 *
8379 */
8380
8381 +#include <asm/alternative-asm.h>
8382 +
8383 .file "serpent-sse2-x86_64-asm_64.S"
8384 .text
8385
8386 @@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8387 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8388 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8389
8390 + pax_force_retaddr
8391 ret;
8392
8393 __enc_xor8:
8394 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8395 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8396
8397 + pax_force_retaddr
8398 ret;
8399
8400 .align 8
8401 @@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8402 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8403 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8404
8405 + pax_force_retaddr
8406 ret;
8407 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8408 index b2c2f57..8470cab 100644
8409 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8410 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8411 @@ -28,6 +28,8 @@
8412 * (at your option) any later version.
8413 */
8414
8415 +#include <asm/alternative-asm.h>
8416 +
8417 #define CTX %rdi // arg1
8418 #define BUF %rsi // arg2
8419 #define CNT %rdx // arg3
8420 @@ -104,6 +106,7 @@
8421 pop %r12
8422 pop %rbp
8423 pop %rbx
8424 + pax_force_retaddr 0, 1
8425 ret
8426
8427 .size \name, .-\name
8428 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8429 index 5b012a2..36d5364 100644
8430 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8431 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8432 @@ -20,6 +20,8 @@
8433 *
8434 */
8435
8436 +#include <asm/alternative-asm.h>
8437 +
8438 .file "twofish-x86_64-asm-3way.S"
8439 .text
8440
8441 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8442 popq %r13;
8443 popq %r14;
8444 popq %r15;
8445 + pax_force_retaddr 0, 1
8446 ret;
8447
8448 __enc_xor3:
8449 @@ -271,6 +274,7 @@ __enc_xor3:
8450 popq %r13;
8451 popq %r14;
8452 popq %r15;
8453 + pax_force_retaddr 0, 1
8454 ret;
8455
8456 .global twofish_dec_blk_3way
8457 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8458 popq %r13;
8459 popq %r14;
8460 popq %r15;
8461 + pax_force_retaddr 0, 1
8462 ret;
8463
8464 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8465 index 7bcf3fc..f53832f 100644
8466 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8467 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8468 @@ -21,6 +21,7 @@
8469 .text
8470
8471 #include <asm/asm-offsets.h>
8472 +#include <asm/alternative-asm.h>
8473
8474 #define a_offset 0
8475 #define b_offset 4
8476 @@ -268,6 +269,7 @@ twofish_enc_blk:
8477
8478 popq R1
8479 movq $1,%rax
8480 + pax_force_retaddr 0, 1
8481 ret
8482
8483 twofish_dec_blk:
8484 @@ -319,4 +321,5 @@ twofish_dec_blk:
8485
8486 popq R1
8487 movq $1,%rax
8488 + pax_force_retaddr 0, 1
8489 ret
8490 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8491 index 07b3a68..bd2a388 100644
8492 --- a/arch/x86/ia32/ia32_aout.c
8493 +++ b/arch/x86/ia32/ia32_aout.c
8494 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8495 unsigned long dump_start, dump_size;
8496 struct user32 dump;
8497
8498 + memset(&dump, 0, sizeof(dump));
8499 +
8500 fs = get_fs();
8501 set_fs(KERNEL_DS);
8502 has_dumped = 1;
8503 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8504 index a69245b..6d145f4 100644
8505 --- a/arch/x86/ia32/ia32_signal.c
8506 +++ b/arch/x86/ia32/ia32_signal.c
8507 @@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8508 }
8509 seg = get_fs();
8510 set_fs(KERNEL_DS);
8511 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8512 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8513 set_fs(seg);
8514 if (ret >= 0 && uoss_ptr) {
8515 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8516 @@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8517 */
8518 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8519 size_t frame_size,
8520 - void **fpstate)
8521 + void __user **fpstate)
8522 {
8523 unsigned long sp;
8524
8525 @@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8526
8527 if (used_math()) {
8528 sp = sp - sig_xstate_ia32_size;
8529 - *fpstate = (struct _fpstate_ia32 *) sp;
8530 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8531 if (save_i387_xstate_ia32(*fpstate) < 0)
8532 return (void __user *) -1L;
8533 }
8534 @@ -398,7 +398,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8535 sp -= frame_size;
8536 /* Align the stack pointer according to the i386 ABI,
8537 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8538 - sp = ((sp + 4) & -16ul) - 4;
8539 + sp = ((sp - 12) & -16ul) - 4;
8540 return (void __user *) sp;
8541 }
8542
8543 @@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8544 * These are actually not used anymore, but left because some
8545 * gdb versions depend on them as a marker.
8546 */
8547 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8548 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8549 } put_user_catch(err);
8550
8551 if (err)
8552 @@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8553 0xb8,
8554 __NR_ia32_rt_sigreturn,
8555 0x80cd,
8556 - 0,
8557 + 0
8558 };
8559
8560 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8561 @@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8562
8563 if (ka->sa.sa_flags & SA_RESTORER)
8564 restorer = ka->sa.sa_restorer;
8565 + else if (current->mm->context.vdso)
8566 + /* Return stub is in 32bit vsyscall page */
8567 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8568 else
8569 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8570 - rt_sigreturn);
8571 + restorer = &frame->retcode;
8572 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8573
8574 /*
8575 * Not actually used anymore, but left because some gdb
8576 * versions need it.
8577 */
8578 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8579 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8580 } put_user_catch(err);
8581
8582 if (err)
8583 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8584 index e3e7340..05ed805 100644
8585 --- a/arch/x86/ia32/ia32entry.S
8586 +++ b/arch/x86/ia32/ia32entry.S
8587 @@ -13,8 +13,10 @@
8588 #include <asm/thread_info.h>
8589 #include <asm/segment.h>
8590 #include <asm/irqflags.h>
8591 +#include <asm/pgtable.h>
8592 #include <linux/linkage.h>
8593 #include <linux/err.h>
8594 +#include <asm/alternative-asm.h>
8595
8596 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8597 #include <linux/elf-em.h>
8598 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8599 ENDPROC(native_irq_enable_sysexit)
8600 #endif
8601
8602 + .macro pax_enter_kernel_user
8603 + pax_set_fptr_mask
8604 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8605 + call pax_enter_kernel_user
8606 +#endif
8607 + .endm
8608 +
8609 + .macro pax_exit_kernel_user
8610 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8611 + call pax_exit_kernel_user
8612 +#endif
8613 +#ifdef CONFIG_PAX_RANDKSTACK
8614 + pushq %rax
8615 + pushq %r11
8616 + call pax_randomize_kstack
8617 + popq %r11
8618 + popq %rax
8619 +#endif
8620 + .endm
8621 +
8622 +.macro pax_erase_kstack
8623 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8624 + call pax_erase_kstack
8625 +#endif
8626 +.endm
8627 +
8628 /*
8629 * 32bit SYSENTER instruction entry.
8630 *
8631 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8632 CFI_REGISTER rsp,rbp
8633 SWAPGS_UNSAFE_STACK
8634 movq PER_CPU_VAR(kernel_stack), %rsp
8635 - addq $(KERNEL_STACK_OFFSET),%rsp
8636 - /*
8637 - * No need to follow this irqs on/off section: the syscall
8638 - * disabled irqs, here we enable it straight after entry:
8639 - */
8640 - ENABLE_INTERRUPTS(CLBR_NONE)
8641 movl %ebp,%ebp /* zero extension */
8642 pushq_cfi $__USER32_DS
8643 /*CFI_REL_OFFSET ss,0*/
8644 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8645 CFI_REL_OFFSET rsp,0
8646 pushfq_cfi
8647 /*CFI_REL_OFFSET rflags,0*/
8648 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8649 - CFI_REGISTER rip,r10
8650 + orl $X86_EFLAGS_IF,(%rsp)
8651 + GET_THREAD_INFO(%r11)
8652 + movl TI_sysenter_return(%r11), %r11d
8653 + CFI_REGISTER rip,r11
8654 pushq_cfi $__USER32_CS
8655 /*CFI_REL_OFFSET cs,0*/
8656 movl %eax, %eax
8657 - pushq_cfi %r10
8658 + pushq_cfi %r11
8659 CFI_REL_OFFSET rip,0
8660 pushq_cfi %rax
8661 cld
8662 SAVE_ARGS 0,1,0
8663 + pax_enter_kernel_user
8664 + /*
8665 + * No need to follow this irqs on/off section: the syscall
8666 + * disabled irqs, here we enable it straight after entry:
8667 + */
8668 + ENABLE_INTERRUPTS(CLBR_NONE)
8669 /* no need to do an access_ok check here because rbp has been
8670 32bit zero extended */
8671 +
8672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8673 + mov $PAX_USER_SHADOW_BASE,%r11
8674 + add %r11,%rbp
8675 +#endif
8676 +
8677 1: movl (%rbp),%ebp
8678 .section __ex_table,"a"
8679 .quad 1b,ia32_badarg
8680 .previous
8681 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8682 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8683 + GET_THREAD_INFO(%r11)
8684 + orl $TS_COMPAT,TI_status(%r11)
8685 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8686 CFI_REMEMBER_STATE
8687 jnz sysenter_tracesys
8688 cmpq $(IA32_NR_syscalls-1),%rax
8689 @@ -160,12 +197,15 @@ sysenter_do_call:
8690 sysenter_dispatch:
8691 call *ia32_sys_call_table(,%rax,8)
8692 movq %rax,RAX-ARGOFFSET(%rsp)
8693 + GET_THREAD_INFO(%r11)
8694 DISABLE_INTERRUPTS(CLBR_NONE)
8695 TRACE_IRQS_OFF
8696 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8697 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8698 jnz sysexit_audit
8699 sysexit_from_sys_call:
8700 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8701 + pax_exit_kernel_user
8702 + pax_erase_kstack
8703 + andl $~TS_COMPAT,TI_status(%r11)
8704 /* clear IF, that popfq doesn't enable interrupts early */
8705 andl $~0x200,EFLAGS-R11(%rsp)
8706 movl RIP-R11(%rsp),%edx /* User %eip */
8707 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8708 movl %eax,%esi /* 2nd arg: syscall number */
8709 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8710 call __audit_syscall_entry
8711 +
8712 + pax_erase_kstack
8713 +
8714 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8715 cmpq $(IA32_NR_syscalls-1),%rax
8716 ja ia32_badsys
8717 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8718 .endm
8719
8720 .macro auditsys_exit exit
8721 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8722 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8723 jnz ia32_ret_from_sys_call
8724 TRACE_IRQS_ON
8725 sti
8726 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8727 1: setbe %al /* 1 if error, 0 if not */
8728 movzbl %al,%edi /* zero-extend that into %edi */
8729 call __audit_syscall_exit
8730 + GET_THREAD_INFO(%r11)
8731 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8732 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8733 cli
8734 TRACE_IRQS_OFF
8735 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8736 + testl %edi,TI_flags(%r11)
8737 jz \exit
8738 CLEAR_RREGS -ARGOFFSET
8739 jmp int_with_check
8740 @@ -235,7 +279,7 @@ sysexit_audit:
8741
8742 sysenter_tracesys:
8743 #ifdef CONFIG_AUDITSYSCALL
8744 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8745 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8746 jz sysenter_auditsys
8747 #endif
8748 SAVE_REST
8749 @@ -243,6 +287,9 @@ sysenter_tracesys:
8750 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8751 movq %rsp,%rdi /* &pt_regs -> arg1 */
8752 call syscall_trace_enter
8753 +
8754 + pax_erase_kstack
8755 +
8756 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8757 RESTORE_REST
8758 cmpq $(IA32_NR_syscalls-1),%rax
8759 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8760 ENTRY(ia32_cstar_target)
8761 CFI_STARTPROC32 simple
8762 CFI_SIGNAL_FRAME
8763 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8764 + CFI_DEF_CFA rsp,0
8765 CFI_REGISTER rip,rcx
8766 /*CFI_REGISTER rflags,r11*/
8767 SWAPGS_UNSAFE_STACK
8768 movl %esp,%r8d
8769 CFI_REGISTER rsp,r8
8770 movq PER_CPU_VAR(kernel_stack),%rsp
8771 + SAVE_ARGS 8*6,0,0
8772 + pax_enter_kernel_user
8773 /*
8774 * No need to follow this irqs on/off section: the syscall
8775 * disabled irqs and here we enable it straight after entry:
8776 */
8777 ENABLE_INTERRUPTS(CLBR_NONE)
8778 - SAVE_ARGS 8,0,0
8779 movl %eax,%eax /* zero extension */
8780 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8781 movq %rcx,RIP-ARGOFFSET(%rsp)
8782 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8783 /* no need to do an access_ok check here because r8 has been
8784 32bit zero extended */
8785 /* hardware stack frame is complete now */
8786 +
8787 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8788 + mov $PAX_USER_SHADOW_BASE,%r11
8789 + add %r11,%r8
8790 +#endif
8791 +
8792 1: movl (%r8),%r9d
8793 .section __ex_table,"a"
8794 .quad 1b,ia32_badarg
8795 .previous
8796 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8797 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8798 + GET_THREAD_INFO(%r11)
8799 + orl $TS_COMPAT,TI_status(%r11)
8800 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8801 CFI_REMEMBER_STATE
8802 jnz cstar_tracesys
8803 cmpq $IA32_NR_syscalls-1,%rax
8804 @@ -317,12 +372,15 @@ cstar_do_call:
8805 cstar_dispatch:
8806 call *ia32_sys_call_table(,%rax,8)
8807 movq %rax,RAX-ARGOFFSET(%rsp)
8808 + GET_THREAD_INFO(%r11)
8809 DISABLE_INTERRUPTS(CLBR_NONE)
8810 TRACE_IRQS_OFF
8811 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8812 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8813 jnz sysretl_audit
8814 sysretl_from_sys_call:
8815 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8816 + pax_exit_kernel_user
8817 + pax_erase_kstack
8818 + andl $~TS_COMPAT,TI_status(%r11)
8819 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8820 movl RIP-ARGOFFSET(%rsp),%ecx
8821 CFI_REGISTER rip,rcx
8822 @@ -350,7 +408,7 @@ sysretl_audit:
8823
8824 cstar_tracesys:
8825 #ifdef CONFIG_AUDITSYSCALL
8826 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8827 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8828 jz cstar_auditsys
8829 #endif
8830 xchgl %r9d,%ebp
8831 @@ -359,6 +417,9 @@ cstar_tracesys:
8832 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8833 movq %rsp,%rdi /* &pt_regs -> arg1 */
8834 call syscall_trace_enter
8835 +
8836 + pax_erase_kstack
8837 +
8838 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8839 RESTORE_REST
8840 xchgl %ebp,%r9d
8841 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8842 CFI_REL_OFFSET rip,RIP-RIP
8843 PARAVIRT_ADJUST_EXCEPTION_FRAME
8844 SWAPGS
8845 - /*
8846 - * No need to follow this irqs on/off section: the syscall
8847 - * disabled irqs and here we enable it straight after entry:
8848 - */
8849 - ENABLE_INTERRUPTS(CLBR_NONE)
8850 movl %eax,%eax
8851 pushq_cfi %rax
8852 cld
8853 /* note the registers are not zero extended to the sf.
8854 this could be a problem. */
8855 SAVE_ARGS 0,1,0
8856 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8857 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8858 + pax_enter_kernel_user
8859 + /*
8860 + * No need to follow this irqs on/off section: the syscall
8861 + * disabled irqs and here we enable it straight after entry:
8862 + */
8863 + ENABLE_INTERRUPTS(CLBR_NONE)
8864 + GET_THREAD_INFO(%r11)
8865 + orl $TS_COMPAT,TI_status(%r11)
8866 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8867 jnz ia32_tracesys
8868 cmpq $(IA32_NR_syscalls-1),%rax
8869 ja ia32_badsys
8870 @@ -435,6 +498,9 @@ ia32_tracesys:
8871 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8872 movq %rsp,%rdi /* &pt_regs -> arg1 */
8873 call syscall_trace_enter
8874 +
8875 + pax_erase_kstack
8876 +
8877 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8878 RESTORE_REST
8879 cmpq $(IA32_NR_syscalls-1),%rax
8880 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8881 index aec2202..f76174e 100644
8882 --- a/arch/x86/ia32/sys_ia32.c
8883 +++ b/arch/x86/ia32/sys_ia32.c
8884 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8885 */
8886 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8887 {
8888 - typeof(ubuf->st_uid) uid = 0;
8889 - typeof(ubuf->st_gid) gid = 0;
8890 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8891 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8892 SET_UID(uid, stat->uid);
8893 SET_GID(gid, stat->gid);
8894 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8895 @@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8896 return alarm_setitimer(seconds);
8897 }
8898
8899 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8900 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8901 int options)
8902 {
8903 return compat_sys_wait4(pid, stat_addr, options, NULL);
8904 @@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8905 mm_segment_t old_fs = get_fs();
8906
8907 set_fs(KERNEL_DS);
8908 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8909 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8910 set_fs(old_fs);
8911 if (put_compat_timespec(&t, interval))
8912 return -EFAULT;
8913 @@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8914 mm_segment_t old_fs = get_fs();
8915
8916 set_fs(KERNEL_DS);
8917 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8918 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8919 set_fs(old_fs);
8920 if (!ret) {
8921 switch (_NSIG_WORDS) {
8922 @@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8923 if (copy_siginfo_from_user32(&info, uinfo))
8924 return -EFAULT;
8925 set_fs(KERNEL_DS);
8926 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8927 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8928 set_fs(old_fs);
8929 return ret;
8930 }
8931 @@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8932 return -EFAULT;
8933
8934 set_fs(KERNEL_DS);
8935 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8936 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8937 count);
8938 set_fs(old_fs);
8939
8940 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8941 index 952bd01..7692c6f 100644
8942 --- a/arch/x86/include/asm/alternative-asm.h
8943 +++ b/arch/x86/include/asm/alternative-asm.h
8944 @@ -15,6 +15,45 @@
8945 .endm
8946 #endif
8947
8948 +#ifdef KERNEXEC_PLUGIN
8949 + .macro pax_force_retaddr_bts rip=0
8950 + btsq $63,\rip(%rsp)
8951 + .endm
8952 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8953 + .macro pax_force_retaddr rip=0, reload=0
8954 + btsq $63,\rip(%rsp)
8955 + .endm
8956 + .macro pax_force_fptr ptr
8957 + btsq $63,\ptr
8958 + .endm
8959 + .macro pax_set_fptr_mask
8960 + .endm
8961 +#endif
8962 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8963 + .macro pax_force_retaddr rip=0, reload=0
8964 + .if \reload
8965 + pax_set_fptr_mask
8966 + .endif
8967 + orq %r10,\rip(%rsp)
8968 + .endm
8969 + .macro pax_force_fptr ptr
8970 + orq %r10,\ptr
8971 + .endm
8972 + .macro pax_set_fptr_mask
8973 + movabs $0x8000000000000000,%r10
8974 + .endm
8975 +#endif
8976 +#else
8977 + .macro pax_force_retaddr rip=0, reload=0
8978 + .endm
8979 + .macro pax_force_fptr ptr
8980 + .endm
8981 + .macro pax_force_retaddr_bts rip=0
8982 + .endm
8983 + .macro pax_set_fptr_mask
8984 + .endm
8985 +#endif
8986 +
8987 .macro altinstruction_entry orig alt feature orig_len alt_len
8988 .long \orig - .
8989 .long \alt - .
8990 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8991 index 49331be..9706065 100644
8992 --- a/arch/x86/include/asm/alternative.h
8993 +++ b/arch/x86/include/asm/alternative.h
8994 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8995 ".section .discard,\"aw\",@progbits\n" \
8996 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8997 ".previous\n" \
8998 - ".section .altinstr_replacement, \"ax\"\n" \
8999 + ".section .altinstr_replacement, \"a\"\n" \
9000 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9001 ".previous"
9002
9003 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9004 index d854101..f6ea947 100644
9005 --- a/arch/x86/include/asm/apic.h
9006 +++ b/arch/x86/include/asm/apic.h
9007 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9008
9009 #ifdef CONFIG_X86_LOCAL_APIC
9010
9011 -extern unsigned int apic_verbosity;
9012 +extern int apic_verbosity;
9013 extern int local_apic_timer_c2_ok;
9014
9015 extern int disable_apic;
9016 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9017 index 20370c6..a2eb9b0 100644
9018 --- a/arch/x86/include/asm/apm.h
9019 +++ b/arch/x86/include/asm/apm.h
9020 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9021 __asm__ __volatile__(APM_DO_ZERO_SEGS
9022 "pushl %%edi\n\t"
9023 "pushl %%ebp\n\t"
9024 - "lcall *%%cs:apm_bios_entry\n\t"
9025 + "lcall *%%ss:apm_bios_entry\n\t"
9026 "setc %%al\n\t"
9027 "popl %%ebp\n\t"
9028 "popl %%edi\n\t"
9029 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9030 __asm__ __volatile__(APM_DO_ZERO_SEGS
9031 "pushl %%edi\n\t"
9032 "pushl %%ebp\n\t"
9033 - "lcall *%%cs:apm_bios_entry\n\t"
9034 + "lcall *%%ss:apm_bios_entry\n\t"
9035 "setc %%bl\n\t"
9036 "popl %%ebp\n\t"
9037 "popl %%edi\n\t"
9038 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9039 index 58cb6d4..ca9010d 100644
9040 --- a/arch/x86/include/asm/atomic.h
9041 +++ b/arch/x86/include/asm/atomic.h
9042 @@ -22,7 +22,18 @@
9043 */
9044 static inline int atomic_read(const atomic_t *v)
9045 {
9046 - return (*(volatile int *)&(v)->counter);
9047 + return (*(volatile const int *)&(v)->counter);
9048 +}
9049 +
9050 +/**
9051 + * atomic_read_unchecked - read atomic variable
9052 + * @v: pointer of type atomic_unchecked_t
9053 + *
9054 + * Atomically reads the value of @v.
9055 + */
9056 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9057 +{
9058 + return (*(volatile const int *)&(v)->counter);
9059 }
9060
9061 /**
9062 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9063 }
9064
9065 /**
9066 + * atomic_set_unchecked - set atomic variable
9067 + * @v: pointer of type atomic_unchecked_t
9068 + * @i: required value
9069 + *
9070 + * Atomically sets the value of @v to @i.
9071 + */
9072 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9073 +{
9074 + v->counter = i;
9075 +}
9076 +
9077 +/**
9078 * atomic_add - add integer to atomic variable
9079 * @i: integer value to add
9080 * @v: pointer of type atomic_t
9081 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9082 */
9083 static inline void atomic_add(int i, atomic_t *v)
9084 {
9085 - asm volatile(LOCK_PREFIX "addl %1,%0"
9086 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9087 +
9088 +#ifdef CONFIG_PAX_REFCOUNT
9089 + "jno 0f\n"
9090 + LOCK_PREFIX "subl %1,%0\n"
9091 + "int $4\n0:\n"
9092 + _ASM_EXTABLE(0b, 0b)
9093 +#endif
9094 +
9095 + : "+m" (v->counter)
9096 + : "ir" (i));
9097 +}
9098 +
9099 +/**
9100 + * atomic_add_unchecked - add integer to atomic variable
9101 + * @i: integer value to add
9102 + * @v: pointer of type atomic_unchecked_t
9103 + *
9104 + * Atomically adds @i to @v.
9105 + */
9106 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9107 +{
9108 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9109 : "+m" (v->counter)
9110 : "ir" (i));
9111 }
9112 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9113 */
9114 static inline void atomic_sub(int i, atomic_t *v)
9115 {
9116 - asm volatile(LOCK_PREFIX "subl %1,%0"
9117 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9118 +
9119 +#ifdef CONFIG_PAX_REFCOUNT
9120 + "jno 0f\n"
9121 + LOCK_PREFIX "addl %1,%0\n"
9122 + "int $4\n0:\n"
9123 + _ASM_EXTABLE(0b, 0b)
9124 +#endif
9125 +
9126 + : "+m" (v->counter)
9127 + : "ir" (i));
9128 +}
9129 +
9130 +/**
9131 + * atomic_sub_unchecked - subtract integer from atomic variable
9132 + * @i: integer value to subtract
9133 + * @v: pointer of type atomic_unchecked_t
9134 + *
9135 + * Atomically subtracts @i from @v.
9136 + */
9137 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9138 +{
9139 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9140 : "+m" (v->counter)
9141 : "ir" (i));
9142 }
9143 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9144 {
9145 unsigned char c;
9146
9147 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9148 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9149 +
9150 +#ifdef CONFIG_PAX_REFCOUNT
9151 + "jno 0f\n"
9152 + LOCK_PREFIX "addl %2,%0\n"
9153 + "int $4\n0:\n"
9154 + _ASM_EXTABLE(0b, 0b)
9155 +#endif
9156 +
9157 + "sete %1\n"
9158 : "+m" (v->counter), "=qm" (c)
9159 : "ir" (i) : "memory");
9160 return c;
9161 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9162 */
9163 static inline void atomic_inc(atomic_t *v)
9164 {
9165 - asm volatile(LOCK_PREFIX "incl %0"
9166 + asm volatile(LOCK_PREFIX "incl %0\n"
9167 +
9168 +#ifdef CONFIG_PAX_REFCOUNT
9169 + "jno 0f\n"
9170 + LOCK_PREFIX "decl %0\n"
9171 + "int $4\n0:\n"
9172 + _ASM_EXTABLE(0b, 0b)
9173 +#endif
9174 +
9175 + : "+m" (v->counter));
9176 +}
9177 +
9178 +/**
9179 + * atomic_inc_unchecked - increment atomic variable
9180 + * @v: pointer of type atomic_unchecked_t
9181 + *
9182 + * Atomically increments @v by 1.
9183 + */
9184 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9185 +{
9186 + asm volatile(LOCK_PREFIX "incl %0\n"
9187 : "+m" (v->counter));
9188 }
9189
9190 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9191 */
9192 static inline void atomic_dec(atomic_t *v)
9193 {
9194 - asm volatile(LOCK_PREFIX "decl %0"
9195 + asm volatile(LOCK_PREFIX "decl %0\n"
9196 +
9197 +#ifdef CONFIG_PAX_REFCOUNT
9198 + "jno 0f\n"
9199 + LOCK_PREFIX "incl %0\n"
9200 + "int $4\n0:\n"
9201 + _ASM_EXTABLE(0b, 0b)
9202 +#endif
9203 +
9204 + : "+m" (v->counter));
9205 +}
9206 +
9207 +/**
9208 + * atomic_dec_unchecked - decrement atomic variable
9209 + * @v: pointer of type atomic_unchecked_t
9210 + *
9211 + * Atomically decrements @v by 1.
9212 + */
9213 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9214 +{
9215 + asm volatile(LOCK_PREFIX "decl %0\n"
9216 : "+m" (v->counter));
9217 }
9218
9219 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9220 {
9221 unsigned char c;
9222
9223 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9224 + asm volatile(LOCK_PREFIX "decl %0\n"
9225 +
9226 +#ifdef CONFIG_PAX_REFCOUNT
9227 + "jno 0f\n"
9228 + LOCK_PREFIX "incl %0\n"
9229 + "int $4\n0:\n"
9230 + _ASM_EXTABLE(0b, 0b)
9231 +#endif
9232 +
9233 + "sete %1\n"
9234 : "+m" (v->counter), "=qm" (c)
9235 : : "memory");
9236 return c != 0;
9237 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9238 {
9239 unsigned char c;
9240
9241 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9242 + asm volatile(LOCK_PREFIX "incl %0\n"
9243 +
9244 +#ifdef CONFIG_PAX_REFCOUNT
9245 + "jno 0f\n"
9246 + LOCK_PREFIX "decl %0\n"
9247 + "int $4\n0:\n"
9248 + _ASM_EXTABLE(0b, 0b)
9249 +#endif
9250 +
9251 + "sete %1\n"
9252 + : "+m" (v->counter), "=qm" (c)
9253 + : : "memory");
9254 + return c != 0;
9255 +}
9256 +
9257 +/**
9258 + * atomic_inc_and_test_unchecked - increment and test
9259 + * @v: pointer of type atomic_unchecked_t
9260 + *
9261 + * Atomically increments @v by 1
9262 + * and returns true if the result is zero, or false for all
9263 + * other cases.
9264 + */
9265 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9266 +{
9267 + unsigned char c;
9268 +
9269 + asm volatile(LOCK_PREFIX "incl %0\n"
9270 + "sete %1\n"
9271 : "+m" (v->counter), "=qm" (c)
9272 : : "memory");
9273 return c != 0;
9274 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9275 {
9276 unsigned char c;
9277
9278 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9279 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9280 +
9281 +#ifdef CONFIG_PAX_REFCOUNT
9282 + "jno 0f\n"
9283 + LOCK_PREFIX "subl %2,%0\n"
9284 + "int $4\n0:\n"
9285 + _ASM_EXTABLE(0b, 0b)
9286 +#endif
9287 +
9288 + "sets %1\n"
9289 : "+m" (v->counter), "=qm" (c)
9290 : "ir" (i) : "memory");
9291 return c;
9292 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9293 goto no_xadd;
9294 #endif
9295 /* Modern 486+ processor */
9296 - return i + xadd(&v->counter, i);
9297 + return i + xadd_check_overflow(&v->counter, i);
9298
9299 #ifdef CONFIG_M386
9300 no_xadd: /* Legacy 386 processor */
9301 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9302 }
9303
9304 /**
9305 + * atomic_add_return_unchecked - add integer and return
9306 + * @i: integer value to add
9307 + * @v: pointer of type atomic_unchecked_t
9308 + *
9309 + * Atomically adds @i to @v and returns @i + @v
9310 + */
9311 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9312 +{
9313 +#ifdef CONFIG_M386
9314 + int __i;
9315 + unsigned long flags;
9316 + if (unlikely(boot_cpu_data.x86 <= 3))
9317 + goto no_xadd;
9318 +#endif
9319 + /* Modern 486+ processor */
9320 + return i + xadd(&v->counter, i);
9321 +
9322 +#ifdef CONFIG_M386
9323 +no_xadd: /* Legacy 386 processor */
9324 + raw_local_irq_save(flags);
9325 + __i = atomic_read_unchecked(v);
9326 + atomic_set_unchecked(v, i + __i);
9327 + raw_local_irq_restore(flags);
9328 + return i + __i;
9329 +#endif
9330 +}
9331 +
9332 +/**
9333 * atomic_sub_return - subtract integer and return
9334 * @v: pointer of type atomic_t
9335 * @i: integer value to subtract
9336 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9337 }
9338
9339 #define atomic_inc_return(v) (atomic_add_return(1, v))
9340 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9341 +{
9342 + return atomic_add_return_unchecked(1, v);
9343 +}
9344 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9345
9346 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9347 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9348 return cmpxchg(&v->counter, old, new);
9349 }
9350
9351 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9352 +{
9353 + return cmpxchg(&v->counter, old, new);
9354 +}
9355 +
9356 static inline int atomic_xchg(atomic_t *v, int new)
9357 {
9358 return xchg(&v->counter, new);
9359 }
9360
9361 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9362 +{
9363 + return xchg(&v->counter, new);
9364 +}
9365 +
9366 /**
9367 * __atomic_add_unless - add unless the number is already a given value
9368 * @v: pointer of type atomic_t
9369 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9370 */
9371 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9372 {
9373 - int c, old;
9374 + int c, old, new;
9375 c = atomic_read(v);
9376 for (;;) {
9377 - if (unlikely(c == (u)))
9378 + if (unlikely(c == u))
9379 break;
9380 - old = atomic_cmpxchg((v), c, c + (a));
9381 +
9382 + asm volatile("addl %2,%0\n"
9383 +
9384 +#ifdef CONFIG_PAX_REFCOUNT
9385 + "jno 0f\n"
9386 + "subl %2,%0\n"
9387 + "int $4\n0:\n"
9388 + _ASM_EXTABLE(0b, 0b)
9389 +#endif
9390 +
9391 + : "=r" (new)
9392 + : "0" (c), "ir" (a));
9393 +
9394 + old = atomic_cmpxchg(v, c, new);
9395 if (likely(old == c))
9396 break;
9397 c = old;
9398 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9399 return c;
9400 }
9401
9402 +/**
9403 + * atomic_inc_not_zero_hint - increment if not null
9404 + * @v: pointer of type atomic_t
9405 + * @hint: probable value of the atomic before the increment
9406 + *
9407 + * This version of atomic_inc_not_zero() gives a hint of probable
9408 + * value of the atomic. This helps processor to not read the memory
9409 + * before doing the atomic read/modify/write cycle, lowering
9410 + * number of bus transactions on some arches.
9411 + *
9412 + * Returns: 0 if increment was not done, 1 otherwise.
9413 + */
9414 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9415 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9416 +{
9417 + int val, c = hint, new;
9418 +
9419 + /* sanity test, should be removed by compiler if hint is a constant */
9420 + if (!hint)
9421 + return __atomic_add_unless(v, 1, 0);
9422 +
9423 + do {
9424 + asm volatile("incl %0\n"
9425 +
9426 +#ifdef CONFIG_PAX_REFCOUNT
9427 + "jno 0f\n"
9428 + "decl %0\n"
9429 + "int $4\n0:\n"
9430 + _ASM_EXTABLE(0b, 0b)
9431 +#endif
9432 +
9433 + : "=r" (new)
9434 + : "0" (c));
9435 +
9436 + val = atomic_cmpxchg(v, c, new);
9437 + if (val == c)
9438 + return 1;
9439 + c = val;
9440 + } while (c);
9441 +
9442 + return 0;
9443 +}
9444
9445 /*
9446 * atomic_dec_if_positive - decrement by 1 if old value positive
9447 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9448 index 1981199..36b9dfb 100644
9449 --- a/arch/x86/include/asm/atomic64_32.h
9450 +++ b/arch/x86/include/asm/atomic64_32.h
9451 @@ -12,6 +12,14 @@ typedef struct {
9452 u64 __aligned(8) counter;
9453 } atomic64_t;
9454
9455 +#ifdef CONFIG_PAX_REFCOUNT
9456 +typedef struct {
9457 + u64 __aligned(8) counter;
9458 +} atomic64_unchecked_t;
9459 +#else
9460 +typedef atomic64_t atomic64_unchecked_t;
9461 +#endif
9462 +
9463 #define ATOMIC64_INIT(val) { (val) }
9464
9465 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9466 @@ -37,21 +45,31 @@ typedef struct {
9467 ATOMIC64_DECL_ONE(sym##_386)
9468
9469 ATOMIC64_DECL_ONE(add_386);
9470 +ATOMIC64_DECL_ONE(add_unchecked_386);
9471 ATOMIC64_DECL_ONE(sub_386);
9472 +ATOMIC64_DECL_ONE(sub_unchecked_386);
9473 ATOMIC64_DECL_ONE(inc_386);
9474 +ATOMIC64_DECL_ONE(inc_unchecked_386);
9475 ATOMIC64_DECL_ONE(dec_386);
9476 +ATOMIC64_DECL_ONE(dec_unchecked_386);
9477 #endif
9478
9479 #define alternative_atomic64(f, out, in...) \
9480 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9481
9482 ATOMIC64_DECL(read);
9483 +ATOMIC64_DECL(read_unchecked);
9484 ATOMIC64_DECL(set);
9485 +ATOMIC64_DECL(set_unchecked);
9486 ATOMIC64_DECL(xchg);
9487 ATOMIC64_DECL(add_return);
9488 +ATOMIC64_DECL(add_return_unchecked);
9489 ATOMIC64_DECL(sub_return);
9490 +ATOMIC64_DECL(sub_return_unchecked);
9491 ATOMIC64_DECL(inc_return);
9492 +ATOMIC64_DECL(inc_return_unchecked);
9493 ATOMIC64_DECL(dec_return);
9494 +ATOMIC64_DECL(dec_return_unchecked);
9495 ATOMIC64_DECL(dec_if_positive);
9496 ATOMIC64_DECL(inc_not_zero);
9497 ATOMIC64_DECL(add_unless);
9498 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9499 }
9500
9501 /**
9502 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9503 + * @p: pointer to type atomic64_unchecked_t
9504 + * @o: expected value
9505 + * @n: new value
9506 + *
9507 + * Atomically sets @v to @n if it was equal to @o and returns
9508 + * the old value.
9509 + */
9510 +
9511 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9512 +{
9513 + return cmpxchg64(&v->counter, o, n);
9514 +}
9515 +
9516 +/**
9517 * atomic64_xchg - xchg atomic64 variable
9518 * @v: pointer to type atomic64_t
9519 * @n: value to assign
9520 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9521 }
9522
9523 /**
9524 + * atomic64_set_unchecked - set atomic64 variable
9525 + * @v: pointer to type atomic64_unchecked_t
9526 + * @n: value to assign
9527 + *
9528 + * Atomically sets the value of @v to @n.
9529 + */
9530 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9531 +{
9532 + unsigned high = (unsigned)(i >> 32);
9533 + unsigned low = (unsigned)i;
9534 + alternative_atomic64(set, /* no output */,
9535 + "S" (v), "b" (low), "c" (high)
9536 + : "eax", "edx", "memory");
9537 +}
9538 +
9539 +/**
9540 * atomic64_read - read atomic64 variable
9541 * @v: pointer to type atomic64_t
9542 *
9543 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9544 }
9545
9546 /**
9547 + * atomic64_read_unchecked - read atomic64 variable
9548 + * @v: pointer to type atomic64_unchecked_t
9549 + *
9550 + * Atomically reads the value of @v and returns it.
9551 + */
9552 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9553 +{
9554 + long long r;
9555 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9556 + return r;
9557 + }
9558 +
9559 +/**
9560 * atomic64_add_return - add and return
9561 * @i: integer value to add
9562 * @v: pointer to type atomic64_t
9563 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9564 return i;
9565 }
9566
9567 +/**
9568 + * atomic64_add_return_unchecked - add and return
9569 + * @i: integer value to add
9570 + * @v: pointer to type atomic64_unchecked_t
9571 + *
9572 + * Atomically adds @i to @v and returns @i + *@v
9573 + */
9574 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9575 +{
9576 + alternative_atomic64(add_return_unchecked,
9577 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9578 + ASM_NO_INPUT_CLOBBER("memory"));
9579 + return i;
9580 +}
9581 +
9582 /*
9583 * Other variants with different arithmetic operators:
9584 */
9585 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9586 return a;
9587 }
9588
9589 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9590 +{
9591 + long long a;
9592 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
9593 + "S" (v) : "memory", "ecx");
9594 + return a;
9595 +}
9596 +
9597 static inline long long atomic64_dec_return(atomic64_t *v)
9598 {
9599 long long a;
9600 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9601 }
9602
9603 /**
9604 + * atomic64_add_unchecked - add integer to atomic64 variable
9605 + * @i: integer value to add
9606 + * @v: pointer to type atomic64_unchecked_t
9607 + *
9608 + * Atomically adds @i to @v.
9609 + */
9610 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9611 +{
9612 + __alternative_atomic64(add_unchecked, add_return_unchecked,
9613 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9614 + ASM_NO_INPUT_CLOBBER("memory"));
9615 + return i;
9616 +}
9617 +
9618 +/**
9619 * atomic64_sub - subtract the atomic64 variable
9620 * @i: integer value to subtract
9621 * @v: pointer to type atomic64_t
9622 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9623 index 0e1cbfc..5623683 100644
9624 --- a/arch/x86/include/asm/atomic64_64.h
9625 +++ b/arch/x86/include/asm/atomic64_64.h
9626 @@ -18,7 +18,19 @@
9627 */
9628 static inline long atomic64_read(const atomic64_t *v)
9629 {
9630 - return (*(volatile long *)&(v)->counter);
9631 + return (*(volatile const long *)&(v)->counter);
9632 +}
9633 +
9634 +/**
9635 + * atomic64_read_unchecked - read atomic64 variable
9636 + * @v: pointer of type atomic64_unchecked_t
9637 + *
9638 + * Atomically reads the value of @v.
9639 + * Doesn't imply a read memory barrier.
9640 + */
9641 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9642 +{
9643 + return (*(volatile const long *)&(v)->counter);
9644 }
9645
9646 /**
9647 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9648 }
9649
9650 /**
9651 + * atomic64_set_unchecked - set atomic64 variable
9652 + * @v: pointer to type atomic64_unchecked_t
9653 + * @i: required value
9654 + *
9655 + * Atomically sets the value of @v to @i.
9656 + */
9657 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9658 +{
9659 + v->counter = i;
9660 +}
9661 +
9662 +/**
9663 * atomic64_add - add integer to atomic64 variable
9664 * @i: integer value to add
9665 * @v: pointer to type atomic64_t
9666 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9667 */
9668 static inline void atomic64_add(long i, atomic64_t *v)
9669 {
9670 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9671 +
9672 +#ifdef CONFIG_PAX_REFCOUNT
9673 + "jno 0f\n"
9674 + LOCK_PREFIX "subq %1,%0\n"
9675 + "int $4\n0:\n"
9676 + _ASM_EXTABLE(0b, 0b)
9677 +#endif
9678 +
9679 + : "=m" (v->counter)
9680 + : "er" (i), "m" (v->counter));
9681 +}
9682 +
9683 +/**
9684 + * atomic64_add_unchecked - add integer to atomic64 variable
9685 + * @i: integer value to add
9686 + * @v: pointer to type atomic64_unchecked_t
9687 + *
9688 + * Atomically adds @i to @v.
9689 + */
9690 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9691 +{
9692 asm volatile(LOCK_PREFIX "addq %1,%0"
9693 : "=m" (v->counter)
9694 : "er" (i), "m" (v->counter));
9695 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9696 */
9697 static inline void atomic64_sub(long i, atomic64_t *v)
9698 {
9699 - asm volatile(LOCK_PREFIX "subq %1,%0"
9700 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9701 +
9702 +#ifdef CONFIG_PAX_REFCOUNT
9703 + "jno 0f\n"
9704 + LOCK_PREFIX "addq %1,%0\n"
9705 + "int $4\n0:\n"
9706 + _ASM_EXTABLE(0b, 0b)
9707 +#endif
9708 +
9709 + : "=m" (v->counter)
9710 + : "er" (i), "m" (v->counter));
9711 +}
9712 +
9713 +/**
9714 + * atomic64_sub_unchecked - subtract the atomic64 variable
9715 + * @i: integer value to subtract
9716 + * @v: pointer to type atomic64_unchecked_t
9717 + *
9718 + * Atomically subtracts @i from @v.
9719 + */
9720 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9721 +{
9722 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9723 : "=m" (v->counter)
9724 : "er" (i), "m" (v->counter));
9725 }
9726 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9727 {
9728 unsigned char c;
9729
9730 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9731 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9732 +
9733 +#ifdef CONFIG_PAX_REFCOUNT
9734 + "jno 0f\n"
9735 + LOCK_PREFIX "addq %2,%0\n"
9736 + "int $4\n0:\n"
9737 + _ASM_EXTABLE(0b, 0b)
9738 +#endif
9739 +
9740 + "sete %1\n"
9741 : "=m" (v->counter), "=qm" (c)
9742 : "er" (i), "m" (v->counter) : "memory");
9743 return c;
9744 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9745 */
9746 static inline void atomic64_inc(atomic64_t *v)
9747 {
9748 + asm volatile(LOCK_PREFIX "incq %0\n"
9749 +
9750 +#ifdef CONFIG_PAX_REFCOUNT
9751 + "jno 0f\n"
9752 + LOCK_PREFIX "decq %0\n"
9753 + "int $4\n0:\n"
9754 + _ASM_EXTABLE(0b, 0b)
9755 +#endif
9756 +
9757 + : "=m" (v->counter)
9758 + : "m" (v->counter));
9759 +}
9760 +
9761 +/**
9762 + * atomic64_inc_unchecked - increment atomic64 variable
9763 + * @v: pointer to type atomic64_unchecked_t
9764 + *
9765 + * Atomically increments @v by 1.
9766 + */
9767 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9768 +{
9769 asm volatile(LOCK_PREFIX "incq %0"
9770 : "=m" (v->counter)
9771 : "m" (v->counter));
9772 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9773 */
9774 static inline void atomic64_dec(atomic64_t *v)
9775 {
9776 - asm volatile(LOCK_PREFIX "decq %0"
9777 + asm volatile(LOCK_PREFIX "decq %0\n"
9778 +
9779 +#ifdef CONFIG_PAX_REFCOUNT
9780 + "jno 0f\n"
9781 + LOCK_PREFIX "incq %0\n"
9782 + "int $4\n0:\n"
9783 + _ASM_EXTABLE(0b, 0b)
9784 +#endif
9785 +
9786 + : "=m" (v->counter)
9787 + : "m" (v->counter));
9788 +}
9789 +
9790 +/**
9791 + * atomic64_dec_unchecked - decrement atomic64 variable
9792 + * @v: pointer to type atomic64_t
9793 + *
9794 + * Atomically decrements @v by 1.
9795 + */
9796 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9797 +{
9798 + asm volatile(LOCK_PREFIX "decq %0\n"
9799 : "=m" (v->counter)
9800 : "m" (v->counter));
9801 }
9802 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9803 {
9804 unsigned char c;
9805
9806 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9807 + asm volatile(LOCK_PREFIX "decq %0\n"
9808 +
9809 +#ifdef CONFIG_PAX_REFCOUNT
9810 + "jno 0f\n"
9811 + LOCK_PREFIX "incq %0\n"
9812 + "int $4\n0:\n"
9813 + _ASM_EXTABLE(0b, 0b)
9814 +#endif
9815 +
9816 + "sete %1\n"
9817 : "=m" (v->counter), "=qm" (c)
9818 : "m" (v->counter) : "memory");
9819 return c != 0;
9820 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9821 {
9822 unsigned char c;
9823
9824 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9825 + asm volatile(LOCK_PREFIX "incq %0\n"
9826 +
9827 +#ifdef CONFIG_PAX_REFCOUNT
9828 + "jno 0f\n"
9829 + LOCK_PREFIX "decq %0\n"
9830 + "int $4\n0:\n"
9831 + _ASM_EXTABLE(0b, 0b)
9832 +#endif
9833 +
9834 + "sete %1\n"
9835 : "=m" (v->counter), "=qm" (c)
9836 : "m" (v->counter) : "memory");
9837 return c != 0;
9838 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9839 {
9840 unsigned char c;
9841
9842 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9843 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9844 +
9845 +#ifdef CONFIG_PAX_REFCOUNT
9846 + "jno 0f\n"
9847 + LOCK_PREFIX "subq %2,%0\n"
9848 + "int $4\n0:\n"
9849 + _ASM_EXTABLE(0b, 0b)
9850 +#endif
9851 +
9852 + "sets %1\n"
9853 : "=m" (v->counter), "=qm" (c)
9854 : "er" (i), "m" (v->counter) : "memory");
9855 return c;
9856 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9857 */
9858 static inline long atomic64_add_return(long i, atomic64_t *v)
9859 {
9860 + return i + xadd_check_overflow(&v->counter, i);
9861 +}
9862 +
9863 +/**
9864 + * atomic64_add_return_unchecked - add and return
9865 + * @i: integer value to add
9866 + * @v: pointer to type atomic64_unchecked_t
9867 + *
9868 + * Atomically adds @i to @v and returns @i + @v
9869 + */
9870 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9871 +{
9872 return i + xadd(&v->counter, i);
9873 }
9874
9875 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9876 }
9877
9878 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9879 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9880 +{
9881 + return atomic64_add_return_unchecked(1, v);
9882 +}
9883 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9884
9885 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9886 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9887 return cmpxchg(&v->counter, old, new);
9888 }
9889
9890 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9891 +{
9892 + return cmpxchg(&v->counter, old, new);
9893 +}
9894 +
9895 static inline long atomic64_xchg(atomic64_t *v, long new)
9896 {
9897 return xchg(&v->counter, new);
9898 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9899 */
9900 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9901 {
9902 - long c, old;
9903 + long c, old, new;
9904 c = atomic64_read(v);
9905 for (;;) {
9906 - if (unlikely(c == (u)))
9907 + if (unlikely(c == u))
9908 break;
9909 - old = atomic64_cmpxchg((v), c, c + (a));
9910 +
9911 + asm volatile("add %2,%0\n"
9912 +
9913 +#ifdef CONFIG_PAX_REFCOUNT
9914 + "jno 0f\n"
9915 + "sub %2,%0\n"
9916 + "int $4\n0:\n"
9917 + _ASM_EXTABLE(0b, 0b)
9918 +#endif
9919 +
9920 + : "=r" (new)
9921 + : "0" (c), "ir" (a));
9922 +
9923 + old = atomic64_cmpxchg(v, c, new);
9924 if (likely(old == c))
9925 break;
9926 c = old;
9927 }
9928 - return c != (u);
9929 + return c != u;
9930 }
9931
9932 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9933 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9934 index b97596e..9bd48b06 100644
9935 --- a/arch/x86/include/asm/bitops.h
9936 +++ b/arch/x86/include/asm/bitops.h
9937 @@ -38,7 +38,7 @@
9938 * a mask operation on a byte.
9939 */
9940 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9941 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9942 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9943 #define CONST_MASK(nr) (1 << ((nr) & 7))
9944
9945 /**
9946 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9947 index 5e1a2ee..c9f9533 100644
9948 --- a/arch/x86/include/asm/boot.h
9949 +++ b/arch/x86/include/asm/boot.h
9950 @@ -11,10 +11,15 @@
9951 #include <asm/pgtable_types.h>
9952
9953 /* Physical address where kernel should be loaded. */
9954 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9955 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9956 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9957 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9958
9959 +#ifndef __ASSEMBLY__
9960 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9961 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9962 +#endif
9963 +
9964 /* Minimum kernel alignment, as a power of two */
9965 #ifdef CONFIG_X86_64
9966 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9967 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9968 index 48f99f1..d78ebf9 100644
9969 --- a/arch/x86/include/asm/cache.h
9970 +++ b/arch/x86/include/asm/cache.h
9971 @@ -5,12 +5,13 @@
9972
9973 /* L1 cache line size */
9974 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9975 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9976 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9977
9978 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9979 +#define __read_only __attribute__((__section__(".data..read_only")))
9980
9981 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9982 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9983 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9984
9985 #ifdef CONFIG_X86_VSMP
9986 #ifdef CONFIG_SMP
9987 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9988 index 9863ee3..4a1f8e1 100644
9989 --- a/arch/x86/include/asm/cacheflush.h
9990 +++ b/arch/x86/include/asm/cacheflush.h
9991 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9992 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9993
9994 if (pg_flags == _PGMT_DEFAULT)
9995 - return -1;
9996 + return ~0UL;
9997 else if (pg_flags == _PGMT_WC)
9998 return _PAGE_CACHE_WC;
9999 else if (pg_flags == _PGMT_UC_MINUS)
10000 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10001 index 46fc474..b02b0f9 100644
10002 --- a/arch/x86/include/asm/checksum_32.h
10003 +++ b/arch/x86/include/asm/checksum_32.h
10004 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10005 int len, __wsum sum,
10006 int *src_err_ptr, int *dst_err_ptr);
10007
10008 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10009 + int len, __wsum sum,
10010 + int *src_err_ptr, int *dst_err_ptr);
10011 +
10012 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10013 + int len, __wsum sum,
10014 + int *src_err_ptr, int *dst_err_ptr);
10015 +
10016 /*
10017 * Note: when you get a NULL pointer exception here this means someone
10018 * passed in an incorrect kernel address to one of these functions.
10019 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10020 int *err_ptr)
10021 {
10022 might_sleep();
10023 - return csum_partial_copy_generic((__force void *)src, dst,
10024 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10025 len, sum, err_ptr, NULL);
10026 }
10027
10028 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10029 {
10030 might_sleep();
10031 if (access_ok(VERIFY_WRITE, dst, len))
10032 - return csum_partial_copy_generic(src, (__force void *)dst,
10033 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10034 len, sum, NULL, err_ptr);
10035
10036 if (len)
10037 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10038 index 99480e5..d81165b 100644
10039 --- a/arch/x86/include/asm/cmpxchg.h
10040 +++ b/arch/x86/include/asm/cmpxchg.h
10041 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10042 __compiletime_error("Bad argument size for cmpxchg");
10043 extern void __xadd_wrong_size(void)
10044 __compiletime_error("Bad argument size for xadd");
10045 +extern void __xadd_check_overflow_wrong_size(void)
10046 + __compiletime_error("Bad argument size for xadd_check_overflow");
10047 extern void __add_wrong_size(void)
10048 __compiletime_error("Bad argument size for add");
10049 +extern void __add_check_overflow_wrong_size(void)
10050 + __compiletime_error("Bad argument size for add_check_overflow");
10051
10052 /*
10053 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10054 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10055 __ret; \
10056 })
10057
10058 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10059 + ({ \
10060 + __typeof__ (*(ptr)) __ret = (arg); \
10061 + switch (sizeof(*(ptr))) { \
10062 + case __X86_CASE_L: \
10063 + asm volatile (lock #op "l %0, %1\n" \
10064 + "jno 0f\n" \
10065 + "mov %0,%1\n" \
10066 + "int $4\n0:\n" \
10067 + _ASM_EXTABLE(0b, 0b) \
10068 + : "+r" (__ret), "+m" (*(ptr)) \
10069 + : : "memory", "cc"); \
10070 + break; \
10071 + case __X86_CASE_Q: \
10072 + asm volatile (lock #op "q %q0, %1\n" \
10073 + "jno 0f\n" \
10074 + "mov %0,%1\n" \
10075 + "int $4\n0:\n" \
10076 + _ASM_EXTABLE(0b, 0b) \
10077 + : "+r" (__ret), "+m" (*(ptr)) \
10078 + : : "memory", "cc"); \
10079 + break; \
10080 + default: \
10081 + __ ## op ## _check_overflow_wrong_size(); \
10082 + } \
10083 + __ret; \
10084 + })
10085 +
10086 /*
10087 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10088 * Since this is generally used to protect other memory information, we
10089 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10090 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10091 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10092
10093 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10094 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10095 +
10096 #define __add(ptr, inc, lock) \
10097 ({ \
10098 __typeof__ (*(ptr)) __ret = (inc); \
10099 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10100 index 340ee49..4238ced 100644
10101 --- a/arch/x86/include/asm/cpufeature.h
10102 +++ b/arch/x86/include/asm/cpufeature.h
10103 @@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10104 ".section .discard,\"aw\",@progbits\n"
10105 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10106 ".previous\n"
10107 - ".section .altinstr_replacement,\"ax\"\n"
10108 + ".section .altinstr_replacement,\"a\"\n"
10109 "3: movb $1,%0\n"
10110 "4:\n"
10111 ".previous\n"
10112 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10113 index e95822d..a90010e 100644
10114 --- a/arch/x86/include/asm/desc.h
10115 +++ b/arch/x86/include/asm/desc.h
10116 @@ -4,6 +4,7 @@
10117 #include <asm/desc_defs.h>
10118 #include <asm/ldt.h>
10119 #include <asm/mmu.h>
10120 +#include <asm/pgtable.h>
10121
10122 #include <linux/smp.h>
10123
10124 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10125
10126 desc->type = (info->read_exec_only ^ 1) << 1;
10127 desc->type |= info->contents << 2;
10128 + desc->type |= info->seg_not_present ^ 1;
10129
10130 desc->s = 1;
10131 desc->dpl = 0x3;
10132 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10133 }
10134
10135 extern struct desc_ptr idt_descr;
10136 -extern gate_desc idt_table[];
10137 extern struct desc_ptr nmi_idt_descr;
10138 -extern gate_desc nmi_idt_table[];
10139 -
10140 -struct gdt_page {
10141 - struct desc_struct gdt[GDT_ENTRIES];
10142 -} __attribute__((aligned(PAGE_SIZE)));
10143 -
10144 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10145 +extern gate_desc idt_table[256];
10146 +extern gate_desc nmi_idt_table[256];
10147
10148 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10149 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10150 {
10151 - return per_cpu(gdt_page, cpu).gdt;
10152 + return cpu_gdt_table[cpu];
10153 }
10154
10155 #ifdef CONFIG_X86_64
10156 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10157 unsigned long base, unsigned dpl, unsigned flags,
10158 unsigned short seg)
10159 {
10160 - gate->a = (seg << 16) | (base & 0xffff);
10161 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10162 + gate->gate.offset_low = base;
10163 + gate->gate.seg = seg;
10164 + gate->gate.reserved = 0;
10165 + gate->gate.type = type;
10166 + gate->gate.s = 0;
10167 + gate->gate.dpl = dpl;
10168 + gate->gate.p = 1;
10169 + gate->gate.offset_high = base >> 16;
10170 }
10171
10172 #endif
10173 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10174
10175 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10176 {
10177 + pax_open_kernel();
10178 memcpy(&idt[entry], gate, sizeof(*gate));
10179 + pax_close_kernel();
10180 }
10181
10182 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10183 {
10184 + pax_open_kernel();
10185 memcpy(&ldt[entry], desc, 8);
10186 + pax_close_kernel();
10187 }
10188
10189 static inline void
10190 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10191 default: size = sizeof(*gdt); break;
10192 }
10193
10194 + pax_open_kernel();
10195 memcpy(&gdt[entry], desc, size);
10196 + pax_close_kernel();
10197 }
10198
10199 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10200 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10201
10202 static inline void native_load_tr_desc(void)
10203 {
10204 + pax_open_kernel();
10205 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10206 + pax_close_kernel();
10207 }
10208
10209 static inline void native_load_gdt(const struct desc_ptr *dtr)
10210 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10211 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10212 unsigned int i;
10213
10214 + pax_open_kernel();
10215 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10216 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10217 + pax_close_kernel();
10218 }
10219
10220 #define _LDT_empty(info) \
10221 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10222 }
10223
10224 #ifdef CONFIG_X86_64
10225 -static inline void set_nmi_gate(int gate, void *addr)
10226 +static inline void set_nmi_gate(int gate, const void *addr)
10227 {
10228 gate_desc s;
10229
10230 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10231 }
10232 #endif
10233
10234 -static inline void _set_gate(int gate, unsigned type, void *addr,
10235 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10236 unsigned dpl, unsigned ist, unsigned seg)
10237 {
10238 gate_desc s;
10239 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10240 * Pentium F0 0F bugfix can have resulted in the mapped
10241 * IDT being write-protected.
10242 */
10243 -static inline void set_intr_gate(unsigned int n, void *addr)
10244 +static inline void set_intr_gate(unsigned int n, const void *addr)
10245 {
10246 BUG_ON((unsigned)n > 0xFF);
10247 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10248 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10249 /*
10250 * This routine sets up an interrupt gate at directory privilege level 3.
10251 */
10252 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10253 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10254 {
10255 BUG_ON((unsigned)n > 0xFF);
10256 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10257 }
10258
10259 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10260 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10261 {
10262 BUG_ON((unsigned)n > 0xFF);
10263 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10264 }
10265
10266 -static inline void set_trap_gate(unsigned int n, void *addr)
10267 +static inline void set_trap_gate(unsigned int n, const void *addr)
10268 {
10269 BUG_ON((unsigned)n > 0xFF);
10270 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10271 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10272 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10273 {
10274 BUG_ON((unsigned)n > 0xFF);
10275 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10276 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10277 }
10278
10279 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10280 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10281 {
10282 BUG_ON((unsigned)n > 0xFF);
10283 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10284 }
10285
10286 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10287 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10288 {
10289 BUG_ON((unsigned)n > 0xFF);
10290 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10291 }
10292
10293 +#ifdef CONFIG_X86_32
10294 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10295 +{
10296 + struct desc_struct d;
10297 +
10298 + if (likely(limit))
10299 + limit = (limit - 1UL) >> PAGE_SHIFT;
10300 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10301 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10302 +}
10303 +#endif
10304 +
10305 #endif /* _ASM_X86_DESC_H */
10306 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10307 index 278441f..b95a174 100644
10308 --- a/arch/x86/include/asm/desc_defs.h
10309 +++ b/arch/x86/include/asm/desc_defs.h
10310 @@ -31,6 +31,12 @@ struct desc_struct {
10311 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10312 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10313 };
10314 + struct {
10315 + u16 offset_low;
10316 + u16 seg;
10317 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10318 + unsigned offset_high: 16;
10319 + } gate;
10320 };
10321 } __attribute__((packed));
10322
10323 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10324 index 3778256..c5d4fce 100644
10325 --- a/arch/x86/include/asm/e820.h
10326 +++ b/arch/x86/include/asm/e820.h
10327 @@ -69,7 +69,7 @@ struct e820map {
10328 #define ISA_START_ADDRESS 0xa0000
10329 #define ISA_END_ADDRESS 0x100000
10330
10331 -#define BIOS_BEGIN 0x000a0000
10332 +#define BIOS_BEGIN 0x000c0000
10333 #define BIOS_END 0x00100000
10334
10335 #define BIOS_ROM_BASE 0xffe00000
10336 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10337 index 5939f44..193f4a7 100644
10338 --- a/arch/x86/include/asm/elf.h
10339 +++ b/arch/x86/include/asm/elf.h
10340 @@ -243,7 +243,25 @@ extern int force_personality32;
10341 the loader. We need to make sure that it is out of the way of the program
10342 that it will "exec", and that there is sufficient room for the brk. */
10343
10344 +#ifdef CONFIG_PAX_SEGMEXEC
10345 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10346 +#else
10347 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10348 +#endif
10349 +
10350 +#ifdef CONFIG_PAX_ASLR
10351 +#ifdef CONFIG_X86_32
10352 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10353 +
10354 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10355 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10356 +#else
10357 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10358 +
10359 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10360 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10361 +#endif
10362 +#endif
10363
10364 /* This yields a mask that user programs can use to figure out what
10365 instruction set this CPU supports. This could be done in user space,
10366 @@ -296,16 +314,12 @@ do { \
10367
10368 #define ARCH_DLINFO \
10369 do { \
10370 - if (vdso_enabled) \
10371 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10372 - (unsigned long)current->mm->context.vdso); \
10373 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10374 } while (0)
10375
10376 #define ARCH_DLINFO_X32 \
10377 do { \
10378 - if (vdso_enabled) \
10379 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10380 - (unsigned long)current->mm->context.vdso); \
10381 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10382 } while (0)
10383
10384 #define AT_SYSINFO 32
10385 @@ -320,7 +334,7 @@ else \
10386
10387 #endif /* !CONFIG_X86_32 */
10388
10389 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10390 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10391
10392 #define VDSO_ENTRY \
10393 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10394 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10395 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10396 #define compat_arch_setup_additional_pages syscall32_setup_pages
10397
10398 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10399 -#define arch_randomize_brk arch_randomize_brk
10400 -
10401 /*
10402 * True on X86_32 or when emulating IA32 on X86_64
10403 */
10404 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10405 index cc70c1c..d96d011 100644
10406 --- a/arch/x86/include/asm/emergency-restart.h
10407 +++ b/arch/x86/include/asm/emergency-restart.h
10408 @@ -15,6 +15,6 @@ enum reboot_type {
10409
10410 extern enum reboot_type reboot_type;
10411
10412 -extern void machine_emergency_restart(void);
10413 +extern void machine_emergency_restart(void) __noreturn;
10414
10415 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10416 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10417 index 4fa8815..71b121a 100644
10418 --- a/arch/x86/include/asm/fpu-internal.h
10419 +++ b/arch/x86/include/asm/fpu-internal.h
10420 @@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10421 {
10422 int err;
10423
10424 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10425 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10426 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10427 +#endif
10428 +
10429 /* See comment in fxsave() below. */
10430 #ifdef CONFIG_AS_FXSAVEQ
10431 asm volatile("1: fxrstorq %[fx]\n\t"
10432 @@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10433 {
10434 int err;
10435
10436 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10437 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10438 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10439 +#endif
10440 +
10441 /*
10442 * Clear the bytes not touched by the fxsave and reserved
10443 * for the SW usage.
10444 @@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10445 "emms\n\t" /* clear stack tags */
10446 "fildl %P[addr]", /* set F?P to defined value */
10447 X86_FEATURE_FXSAVE_LEAK,
10448 - [addr] "m" (tsk->thread.fpu.has_fpu));
10449 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10450
10451 return fpu_restore_checking(&tsk->thread.fpu);
10452 }
10453 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10454 index 71ecbcb..bac10b7 100644
10455 --- a/arch/x86/include/asm/futex.h
10456 +++ b/arch/x86/include/asm/futex.h
10457 @@ -11,16 +11,18 @@
10458 #include <asm/processor.h>
10459
10460 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10461 + typecheck(u32 __user *, uaddr); \
10462 asm volatile("1:\t" insn "\n" \
10463 "2:\t.section .fixup,\"ax\"\n" \
10464 "3:\tmov\t%3, %1\n" \
10465 "\tjmp\t2b\n" \
10466 "\t.previous\n" \
10467 _ASM_EXTABLE(1b, 3b) \
10468 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10469 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10470 : "i" (-EFAULT), "0" (oparg), "1" (0))
10471
10472 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10473 + typecheck(u32 __user *, uaddr); \
10474 asm volatile("1:\tmovl %2, %0\n" \
10475 "\tmovl\t%0, %3\n" \
10476 "\t" insn "\n" \
10477 @@ -33,7 +35,7 @@
10478 _ASM_EXTABLE(1b, 4b) \
10479 _ASM_EXTABLE(2b, 4b) \
10480 : "=&a" (oldval), "=&r" (ret), \
10481 - "+m" (*uaddr), "=&r" (tem) \
10482 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10483 : "r" (oparg), "i" (-EFAULT), "1" (0))
10484
10485 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10486 @@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10487
10488 switch (op) {
10489 case FUTEX_OP_SET:
10490 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10491 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10492 break;
10493 case FUTEX_OP_ADD:
10494 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10495 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10496 uaddr, oparg);
10497 break;
10498 case FUTEX_OP_OR:
10499 @@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10500 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10501 return -EFAULT;
10502
10503 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10504 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10505 "2:\t.section .fixup, \"ax\"\n"
10506 "3:\tmov %3, %0\n"
10507 "\tjmp 2b\n"
10508 "\t.previous\n"
10509 _ASM_EXTABLE(1b, 3b)
10510 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10511 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10512 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10513 : "memory"
10514 );
10515 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10516 index eb92a6e..b98b2f4 100644
10517 --- a/arch/x86/include/asm/hw_irq.h
10518 +++ b/arch/x86/include/asm/hw_irq.h
10519 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10520 extern void enable_IO_APIC(void);
10521
10522 /* Statistics */
10523 -extern atomic_t irq_err_count;
10524 -extern atomic_t irq_mis_count;
10525 +extern atomic_unchecked_t irq_err_count;
10526 +extern atomic_unchecked_t irq_mis_count;
10527
10528 /* EISA */
10529 extern void eisa_set_level_irq(unsigned int irq);
10530 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10531 index d8e8eef..99f81ae 100644
10532 --- a/arch/x86/include/asm/io.h
10533 +++ b/arch/x86/include/asm/io.h
10534 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10535
10536 #include <linux/vmalloc.h>
10537
10538 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10539 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10540 +{
10541 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10542 +}
10543 +
10544 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10545 +{
10546 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10547 +}
10548 +
10549 /*
10550 * Convert a virtual cached pointer to an uncached pointer
10551 */
10552 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10553 index bba3cf8..06bc8da 100644
10554 --- a/arch/x86/include/asm/irqflags.h
10555 +++ b/arch/x86/include/asm/irqflags.h
10556 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10557 sti; \
10558 sysexit
10559
10560 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10561 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10562 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10563 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10564 +
10565 #else
10566 #define INTERRUPT_RETURN iret
10567 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10568 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10569 index 5478825..839e88c 100644
10570 --- a/arch/x86/include/asm/kprobes.h
10571 +++ b/arch/x86/include/asm/kprobes.h
10572 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10573 #define RELATIVEJUMP_SIZE 5
10574 #define RELATIVECALL_OPCODE 0xe8
10575 #define RELATIVE_ADDR_SIZE 4
10576 -#define MAX_STACK_SIZE 64
10577 -#define MIN_STACK_SIZE(ADDR) \
10578 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10579 - THREAD_SIZE - (unsigned long)(ADDR))) \
10580 - ? (MAX_STACK_SIZE) \
10581 - : (((unsigned long)current_thread_info()) + \
10582 - THREAD_SIZE - (unsigned long)(ADDR)))
10583 +#define MAX_STACK_SIZE 64UL
10584 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10585
10586 #define flush_insn_slot(p) do { } while (0)
10587
10588 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10589 index e216ba0..453f6ec 100644
10590 --- a/arch/x86/include/asm/kvm_host.h
10591 +++ b/arch/x86/include/asm/kvm_host.h
10592 @@ -679,7 +679,7 @@ struct kvm_x86_ops {
10593 int (*check_intercept)(struct kvm_vcpu *vcpu,
10594 struct x86_instruction_info *info,
10595 enum x86_intercept_stage stage);
10596 -};
10597 +} __do_const;
10598
10599 struct kvm_arch_async_pf {
10600 u32 token;
10601 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10602 index c8bed0d..e5721fa 100644
10603 --- a/arch/x86/include/asm/local.h
10604 +++ b/arch/x86/include/asm/local.h
10605 @@ -17,26 +17,58 @@ typedef struct {
10606
10607 static inline void local_inc(local_t *l)
10608 {
10609 - asm volatile(_ASM_INC "%0"
10610 + asm volatile(_ASM_INC "%0\n"
10611 +
10612 +#ifdef CONFIG_PAX_REFCOUNT
10613 + "jno 0f\n"
10614 + _ASM_DEC "%0\n"
10615 + "int $4\n0:\n"
10616 + _ASM_EXTABLE(0b, 0b)
10617 +#endif
10618 +
10619 : "+m" (l->a.counter));
10620 }
10621
10622 static inline void local_dec(local_t *l)
10623 {
10624 - asm volatile(_ASM_DEC "%0"
10625 + asm volatile(_ASM_DEC "%0\n"
10626 +
10627 +#ifdef CONFIG_PAX_REFCOUNT
10628 + "jno 0f\n"
10629 + _ASM_INC "%0\n"
10630 + "int $4\n0:\n"
10631 + _ASM_EXTABLE(0b, 0b)
10632 +#endif
10633 +
10634 : "+m" (l->a.counter));
10635 }
10636
10637 static inline void local_add(long i, local_t *l)
10638 {
10639 - asm volatile(_ASM_ADD "%1,%0"
10640 + asm volatile(_ASM_ADD "%1,%0\n"
10641 +
10642 +#ifdef CONFIG_PAX_REFCOUNT
10643 + "jno 0f\n"
10644 + _ASM_SUB "%1,%0\n"
10645 + "int $4\n0:\n"
10646 + _ASM_EXTABLE(0b, 0b)
10647 +#endif
10648 +
10649 : "+m" (l->a.counter)
10650 : "ir" (i));
10651 }
10652
10653 static inline void local_sub(long i, local_t *l)
10654 {
10655 - asm volatile(_ASM_SUB "%1,%0"
10656 + asm volatile(_ASM_SUB "%1,%0\n"
10657 +
10658 +#ifdef CONFIG_PAX_REFCOUNT
10659 + "jno 0f\n"
10660 + _ASM_ADD "%1,%0\n"
10661 + "int $4\n0:\n"
10662 + _ASM_EXTABLE(0b, 0b)
10663 +#endif
10664 +
10665 : "+m" (l->a.counter)
10666 : "ir" (i));
10667 }
10668 @@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10669 {
10670 unsigned char c;
10671
10672 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10673 + asm volatile(_ASM_SUB "%2,%0\n"
10674 +
10675 +#ifdef CONFIG_PAX_REFCOUNT
10676 + "jno 0f\n"
10677 + _ASM_ADD "%2,%0\n"
10678 + "int $4\n0:\n"
10679 + _ASM_EXTABLE(0b, 0b)
10680 +#endif
10681 +
10682 + "sete %1\n"
10683 : "+m" (l->a.counter), "=qm" (c)
10684 : "ir" (i) : "memory");
10685 return c;
10686 @@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10687 {
10688 unsigned char c;
10689
10690 - asm volatile(_ASM_DEC "%0; sete %1"
10691 + asm volatile(_ASM_DEC "%0\n"
10692 +
10693 +#ifdef CONFIG_PAX_REFCOUNT
10694 + "jno 0f\n"
10695 + _ASM_INC "%0\n"
10696 + "int $4\n0:\n"
10697 + _ASM_EXTABLE(0b, 0b)
10698 +#endif
10699 +
10700 + "sete %1\n"
10701 : "+m" (l->a.counter), "=qm" (c)
10702 : : "memory");
10703 return c != 0;
10704 @@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10705 {
10706 unsigned char c;
10707
10708 - asm volatile(_ASM_INC "%0; sete %1"
10709 + asm volatile(_ASM_INC "%0\n"
10710 +
10711 +#ifdef CONFIG_PAX_REFCOUNT
10712 + "jno 0f\n"
10713 + _ASM_DEC "%0\n"
10714 + "int $4\n0:\n"
10715 + _ASM_EXTABLE(0b, 0b)
10716 +#endif
10717 +
10718 + "sete %1\n"
10719 : "+m" (l->a.counter), "=qm" (c)
10720 : : "memory");
10721 return c != 0;
10722 @@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10723 {
10724 unsigned char c;
10725
10726 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10727 + asm volatile(_ASM_ADD "%2,%0\n"
10728 +
10729 +#ifdef CONFIG_PAX_REFCOUNT
10730 + "jno 0f\n"
10731 + _ASM_SUB "%2,%0\n"
10732 + "int $4\n0:\n"
10733 + _ASM_EXTABLE(0b, 0b)
10734 +#endif
10735 +
10736 + "sets %1\n"
10737 : "+m" (l->a.counter), "=qm" (c)
10738 : "ir" (i) : "memory");
10739 return c;
10740 @@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10741 #endif
10742 /* Modern 486+ processor */
10743 __i = i;
10744 - asm volatile(_ASM_XADD "%0, %1;"
10745 + asm volatile(_ASM_XADD "%0, %1\n"
10746 +
10747 +#ifdef CONFIG_PAX_REFCOUNT
10748 + "jno 0f\n"
10749 + _ASM_MOV "%0,%1\n"
10750 + "int $4\n0:\n"
10751 + _ASM_EXTABLE(0b, 0b)
10752 +#endif
10753 +
10754 : "+r" (i), "+m" (l->a.counter)
10755 : : "memory");
10756 return i + __i;
10757 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10758 index 593e51d..fa69c9a 100644
10759 --- a/arch/x86/include/asm/mman.h
10760 +++ b/arch/x86/include/asm/mman.h
10761 @@ -5,4 +5,14 @@
10762
10763 #include <asm-generic/mman.h>
10764
10765 +#ifdef __KERNEL__
10766 +#ifndef __ASSEMBLY__
10767 +#ifdef CONFIG_X86_32
10768 +#define arch_mmap_check i386_mmap_check
10769 +int i386_mmap_check(unsigned long addr, unsigned long len,
10770 + unsigned long flags);
10771 +#endif
10772 +#endif
10773 +#endif
10774 +
10775 #endif /* _ASM_X86_MMAN_H */
10776 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10777 index 5f55e69..e20bfb1 100644
10778 --- a/arch/x86/include/asm/mmu.h
10779 +++ b/arch/x86/include/asm/mmu.h
10780 @@ -9,7 +9,7 @@
10781 * we put the segment information here.
10782 */
10783 typedef struct {
10784 - void *ldt;
10785 + struct desc_struct *ldt;
10786 int size;
10787
10788 #ifdef CONFIG_X86_64
10789 @@ -18,7 +18,19 @@ typedef struct {
10790 #endif
10791
10792 struct mutex lock;
10793 - void *vdso;
10794 + unsigned long vdso;
10795 +
10796 +#ifdef CONFIG_X86_32
10797 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10798 + unsigned long user_cs_base;
10799 + unsigned long user_cs_limit;
10800 +
10801 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10802 + cpumask_t cpu_user_cs_mask;
10803 +#endif
10804 +
10805 +#endif
10806 +#endif
10807 } mm_context_t;
10808
10809 #ifdef CONFIG_SMP
10810 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10811 index 6902152..da4283a 100644
10812 --- a/arch/x86/include/asm/mmu_context.h
10813 +++ b/arch/x86/include/asm/mmu_context.h
10814 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10815
10816 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10817 {
10818 +
10819 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10820 + unsigned int i;
10821 + pgd_t *pgd;
10822 +
10823 + pax_open_kernel();
10824 + pgd = get_cpu_pgd(smp_processor_id());
10825 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10826 + set_pgd_batched(pgd+i, native_make_pgd(0));
10827 + pax_close_kernel();
10828 +#endif
10829 +
10830 #ifdef CONFIG_SMP
10831 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10832 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10833 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10834 struct task_struct *tsk)
10835 {
10836 unsigned cpu = smp_processor_id();
10837 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10838 + int tlbstate = TLBSTATE_OK;
10839 +#endif
10840
10841 if (likely(prev != next)) {
10842 #ifdef CONFIG_SMP
10843 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10844 + tlbstate = percpu_read(cpu_tlbstate.state);
10845 +#endif
10846 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10847 percpu_write(cpu_tlbstate.active_mm, next);
10848 #endif
10849 cpumask_set_cpu(cpu, mm_cpumask(next));
10850
10851 /* Re-load page tables */
10852 +#ifdef CONFIG_PAX_PER_CPU_PGD
10853 + pax_open_kernel();
10854 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10855 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10856 + pax_close_kernel();
10857 + load_cr3(get_cpu_pgd(cpu));
10858 +#else
10859 load_cr3(next->pgd);
10860 +#endif
10861
10862 /* stop flush ipis for the previous mm */
10863 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10864 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10865 */
10866 if (unlikely(prev->context.ldt != next->context.ldt))
10867 load_LDT_nolock(&next->context);
10868 - }
10869 +
10870 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10871 + if (!(__supported_pte_mask & _PAGE_NX)) {
10872 + smp_mb__before_clear_bit();
10873 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10874 + smp_mb__after_clear_bit();
10875 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10876 + }
10877 +#endif
10878 +
10879 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10880 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10881 + prev->context.user_cs_limit != next->context.user_cs_limit))
10882 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10883 #ifdef CONFIG_SMP
10884 + else if (unlikely(tlbstate != TLBSTATE_OK))
10885 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10886 +#endif
10887 +#endif
10888 +
10889 + }
10890 else {
10891 +
10892 +#ifdef CONFIG_PAX_PER_CPU_PGD
10893 + pax_open_kernel();
10894 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10895 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10896 + pax_close_kernel();
10897 + load_cr3(get_cpu_pgd(cpu));
10898 +#endif
10899 +
10900 +#ifdef CONFIG_SMP
10901 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10902 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10903
10904 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10905 * tlb flush IPI delivery. We must reload CR3
10906 * to make sure to use no freed page tables.
10907 */
10908 +
10909 +#ifndef CONFIG_PAX_PER_CPU_PGD
10910 load_cr3(next->pgd);
10911 +#endif
10912 +
10913 load_LDT_nolock(&next->context);
10914 +
10915 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10916 + if (!(__supported_pte_mask & _PAGE_NX))
10917 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10918 +#endif
10919 +
10920 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10921 +#ifdef CONFIG_PAX_PAGEEXEC
10922 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10923 +#endif
10924 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10925 +#endif
10926 +
10927 }
10928 +#endif
10929 }
10930 -#endif
10931 }
10932
10933 #define activate_mm(prev, next) \
10934 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10935 index 9eae775..c914fea 100644
10936 --- a/arch/x86/include/asm/module.h
10937 +++ b/arch/x86/include/asm/module.h
10938 @@ -5,6 +5,7 @@
10939
10940 #ifdef CONFIG_X86_64
10941 /* X86_64 does not define MODULE_PROC_FAMILY */
10942 +#define MODULE_PROC_FAMILY ""
10943 #elif defined CONFIG_M386
10944 #define MODULE_PROC_FAMILY "386 "
10945 #elif defined CONFIG_M486
10946 @@ -59,8 +60,20 @@
10947 #error unknown processor family
10948 #endif
10949
10950 -#ifdef CONFIG_X86_32
10951 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10952 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10953 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10954 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10955 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10956 +#else
10957 +#define MODULE_PAX_KERNEXEC ""
10958 #endif
10959
10960 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10961 +#define MODULE_PAX_UDEREF "UDEREF "
10962 +#else
10963 +#define MODULE_PAX_UDEREF ""
10964 +#endif
10965 +
10966 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10967 +
10968 #endif /* _ASM_X86_MODULE_H */
10969 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10970 index 7639dbf..e08a58c 100644
10971 --- a/arch/x86/include/asm/page_64_types.h
10972 +++ b/arch/x86/include/asm/page_64_types.h
10973 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10974
10975 /* duplicated to the one in bootmem.h */
10976 extern unsigned long max_pfn;
10977 -extern unsigned long phys_base;
10978 +extern const unsigned long phys_base;
10979
10980 extern unsigned long __phys_addr(unsigned long);
10981 #define __phys_reloc_hide(x) (x)
10982 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10983 index aa0f913..0c5bc6a 100644
10984 --- a/arch/x86/include/asm/paravirt.h
10985 +++ b/arch/x86/include/asm/paravirt.h
10986 @@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10987 val);
10988 }
10989
10990 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10991 +{
10992 + pgdval_t val = native_pgd_val(pgd);
10993 +
10994 + if (sizeof(pgdval_t) > sizeof(long))
10995 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10996 + val, (u64)val >> 32);
10997 + else
10998 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10999 + val);
11000 +}
11001 +
11002 static inline void pgd_clear(pgd_t *pgdp)
11003 {
11004 set_pgd(pgdp, __pgd(0));
11005 @@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11006 pv_mmu_ops.set_fixmap(idx, phys, flags);
11007 }
11008
11009 +#ifdef CONFIG_PAX_KERNEXEC
11010 +static inline unsigned long pax_open_kernel(void)
11011 +{
11012 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11013 +}
11014 +
11015 +static inline unsigned long pax_close_kernel(void)
11016 +{
11017 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11018 +}
11019 +#else
11020 +static inline unsigned long pax_open_kernel(void) { return 0; }
11021 +static inline unsigned long pax_close_kernel(void) { return 0; }
11022 +#endif
11023 +
11024 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11025
11026 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11027 @@ -965,7 +992,7 @@ extern void default_banner(void);
11028
11029 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11030 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11031 -#define PARA_INDIRECT(addr) *%cs:addr
11032 +#define PARA_INDIRECT(addr) *%ss:addr
11033 #endif
11034
11035 #define INTERRUPT_RETURN \
11036 @@ -1042,6 +1069,21 @@ extern void default_banner(void);
11037 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11038 CLBR_NONE, \
11039 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11040 +
11041 +#define GET_CR0_INTO_RDI \
11042 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11043 + mov %rax,%rdi
11044 +
11045 +#define SET_RDI_INTO_CR0 \
11046 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11047 +
11048 +#define GET_CR3_INTO_RDI \
11049 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11050 + mov %rax,%rdi
11051 +
11052 +#define SET_RDI_INTO_CR3 \
11053 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11054 +
11055 #endif /* CONFIG_X86_32 */
11056
11057 #endif /* __ASSEMBLY__ */
11058 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11059 index 8e8b9a4..f07d725 100644
11060 --- a/arch/x86/include/asm/paravirt_types.h
11061 +++ b/arch/x86/include/asm/paravirt_types.h
11062 @@ -84,20 +84,20 @@ struct pv_init_ops {
11063 */
11064 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11065 unsigned long addr, unsigned len);
11066 -};
11067 +} __no_const;
11068
11069
11070 struct pv_lazy_ops {
11071 /* Set deferred update mode, used for batching operations. */
11072 void (*enter)(void);
11073 void (*leave)(void);
11074 -};
11075 +} __no_const;
11076
11077 struct pv_time_ops {
11078 unsigned long long (*sched_clock)(void);
11079 unsigned long long (*steal_clock)(int cpu);
11080 unsigned long (*get_tsc_khz)(void);
11081 -};
11082 +} __no_const;
11083
11084 struct pv_cpu_ops {
11085 /* hooks for various privileged instructions */
11086 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
11087
11088 void (*start_context_switch)(struct task_struct *prev);
11089 void (*end_context_switch)(struct task_struct *next);
11090 -};
11091 +} __no_const;
11092
11093 struct pv_irq_ops {
11094 /*
11095 @@ -224,7 +224,7 @@ struct pv_apic_ops {
11096 unsigned long start_eip,
11097 unsigned long start_esp);
11098 #endif
11099 -};
11100 +} __no_const;
11101
11102 struct pv_mmu_ops {
11103 unsigned long (*read_cr2)(void);
11104 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
11105 struct paravirt_callee_save make_pud;
11106
11107 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11108 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11109 #endif /* PAGETABLE_LEVELS == 4 */
11110 #endif /* PAGETABLE_LEVELS >= 3 */
11111
11112 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
11113 an mfn. We can tell which is which from the index. */
11114 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11115 phys_addr_t phys, pgprot_t flags);
11116 +
11117 +#ifdef CONFIG_PAX_KERNEXEC
11118 + unsigned long (*pax_open_kernel)(void);
11119 + unsigned long (*pax_close_kernel)(void);
11120 +#endif
11121 +
11122 };
11123
11124 struct arch_spinlock;
11125 @@ -334,7 +341,7 @@ struct pv_lock_ops {
11126 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11127 int (*spin_trylock)(struct arch_spinlock *lock);
11128 void (*spin_unlock)(struct arch_spinlock *lock);
11129 -};
11130 +} __no_const;
11131
11132 /* This contains all the paravirt structures: we get a convenient
11133 * number for each function using the offset which we use to indicate
11134 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11135 index b4389a4..7024269 100644
11136 --- a/arch/x86/include/asm/pgalloc.h
11137 +++ b/arch/x86/include/asm/pgalloc.h
11138 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11139 pmd_t *pmd, pte_t *pte)
11140 {
11141 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11142 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11143 +}
11144 +
11145 +static inline void pmd_populate_user(struct mm_struct *mm,
11146 + pmd_t *pmd, pte_t *pte)
11147 +{
11148 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11149 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11150 }
11151
11152 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11153
11154 #ifdef CONFIG_X86_PAE
11155 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11156 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11157 +{
11158 + pud_populate(mm, pudp, pmd);
11159 +}
11160 #else /* !CONFIG_X86_PAE */
11161 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11162 {
11163 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11164 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11165 }
11166 +
11167 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11168 +{
11169 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11170 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11171 +}
11172 #endif /* CONFIG_X86_PAE */
11173
11174 #if PAGETABLE_LEVELS > 3
11175 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11176 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11177 }
11178
11179 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11180 +{
11181 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11182 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11183 +}
11184 +
11185 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11186 {
11187 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11188 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11189 index 98391db..8f6984e 100644
11190 --- a/arch/x86/include/asm/pgtable-2level.h
11191 +++ b/arch/x86/include/asm/pgtable-2level.h
11192 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11193
11194 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11195 {
11196 + pax_open_kernel();
11197 *pmdp = pmd;
11198 + pax_close_kernel();
11199 }
11200
11201 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11202 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11203 index effff47..bbb8295 100644
11204 --- a/arch/x86/include/asm/pgtable-3level.h
11205 +++ b/arch/x86/include/asm/pgtable-3level.h
11206 @@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
11207 ptep->pte_low = pte.pte_low;
11208 }
11209
11210 +#define __HAVE_ARCH_READ_PMD_ATOMIC
11211 +/*
11212 + * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
11213 + * a "*pmdp" dereference done by gcc. Problem is, in certain places
11214 + * where pte_offset_map_lock is called, concurrent page faults are
11215 + * allowed, if the mmap_sem is hold for reading. An example is mincore
11216 + * vs page faults vs MADV_DONTNEED. On the page fault side
11217 + * pmd_populate rightfully does a set_64bit, but if we're reading the
11218 + * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
11219 + * because gcc will not read the 64bit of the pmd atomically. To fix
11220 + * this all places running pmd_offset_map_lock() while holding the
11221 + * mmap_sem in read mode, shall read the pmdp pointer using this
11222 + * function to know if the pmd is null nor not, and in turn to know if
11223 + * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
11224 + * operations.
11225 + *
11226 + * Without THP if the mmap_sem is hold for reading, the
11227 + * pmd can only transition from null to not null while read_pmd_atomic runs.
11228 + * So there's no need of literally reading it atomically.
11229 + *
11230 + * With THP if the mmap_sem is hold for reading, the pmd can become
11231 + * THP or null or point to a pte (and in turn become "stable") at any
11232 + * time under read_pmd_atomic, so it's mandatory to read it atomically
11233 + * with cmpxchg8b.
11234 + */
11235 +#ifndef CONFIG_TRANSPARENT_HUGEPAGE
11236 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11237 +{
11238 + pmdval_t ret;
11239 + u32 *tmp = (u32 *)pmdp;
11240 +
11241 + ret = (pmdval_t) (*tmp);
11242 + if (ret) {
11243 + /*
11244 + * If the low part is null, we must not read the high part
11245 + * or we can end up with a partial pmd.
11246 + */
11247 + smp_rmb();
11248 + ret |= ((pmdval_t)*(tmp + 1)) << 32;
11249 + }
11250 +
11251 + return __pmd(ret);
11252 +}
11253 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
11254 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11255 +{
11256 + return __pmd(atomic64_read((atomic64_t *)pmdp));
11257 +}
11258 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
11259 +
11260 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11261 {
11262 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
11263 @@ -38,12 +88,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11264
11265 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11266 {
11267 + pax_open_kernel();
11268 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11269 + pax_close_kernel();
11270 }
11271
11272 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11273 {
11274 + pax_open_kernel();
11275 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11276 + pax_close_kernel();
11277 }
11278
11279 /*
11280 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11281 index 49afb3f..91a8c63 100644
11282 --- a/arch/x86/include/asm/pgtable.h
11283 +++ b/arch/x86/include/asm/pgtable.h
11284 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11285
11286 #ifndef __PAGETABLE_PUD_FOLDED
11287 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11288 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11289 #define pgd_clear(pgd) native_pgd_clear(pgd)
11290 #endif
11291
11292 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11293
11294 #define arch_end_context_switch(prev) do {} while(0)
11295
11296 +#define pax_open_kernel() native_pax_open_kernel()
11297 +#define pax_close_kernel() native_pax_close_kernel()
11298 #endif /* CONFIG_PARAVIRT */
11299
11300 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11301 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11302 +
11303 +#ifdef CONFIG_PAX_KERNEXEC
11304 +static inline unsigned long native_pax_open_kernel(void)
11305 +{
11306 + unsigned long cr0;
11307 +
11308 + preempt_disable();
11309 + barrier();
11310 + cr0 = read_cr0() ^ X86_CR0_WP;
11311 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11312 + write_cr0(cr0);
11313 + return cr0 ^ X86_CR0_WP;
11314 +}
11315 +
11316 +static inline unsigned long native_pax_close_kernel(void)
11317 +{
11318 + unsigned long cr0;
11319 +
11320 + cr0 = read_cr0() ^ X86_CR0_WP;
11321 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11322 + write_cr0(cr0);
11323 + barrier();
11324 + preempt_enable_no_resched();
11325 + return cr0 ^ X86_CR0_WP;
11326 +}
11327 +#else
11328 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11329 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11330 +#endif
11331 +
11332 /*
11333 * The following only work if pte_present() is true.
11334 * Undefined behaviour if not..
11335 */
11336 +static inline int pte_user(pte_t pte)
11337 +{
11338 + return pte_val(pte) & _PAGE_USER;
11339 +}
11340 +
11341 static inline int pte_dirty(pte_t pte)
11342 {
11343 return pte_flags(pte) & _PAGE_DIRTY;
11344 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11345 return pte_clear_flags(pte, _PAGE_RW);
11346 }
11347
11348 +static inline pte_t pte_mkread(pte_t pte)
11349 +{
11350 + return __pte(pte_val(pte) | _PAGE_USER);
11351 +}
11352 +
11353 static inline pte_t pte_mkexec(pte_t pte)
11354 {
11355 - return pte_clear_flags(pte, _PAGE_NX);
11356 +#ifdef CONFIG_X86_PAE
11357 + if (__supported_pte_mask & _PAGE_NX)
11358 + return pte_clear_flags(pte, _PAGE_NX);
11359 + else
11360 +#endif
11361 + return pte_set_flags(pte, _PAGE_USER);
11362 +}
11363 +
11364 +static inline pte_t pte_exprotect(pte_t pte)
11365 +{
11366 +#ifdef CONFIG_X86_PAE
11367 + if (__supported_pte_mask & _PAGE_NX)
11368 + return pte_set_flags(pte, _PAGE_NX);
11369 + else
11370 +#endif
11371 + return pte_clear_flags(pte, _PAGE_USER);
11372 }
11373
11374 static inline pte_t pte_mkdirty(pte_t pte)
11375 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11376 #endif
11377
11378 #ifndef __ASSEMBLY__
11379 +
11380 +#ifdef CONFIG_PAX_PER_CPU_PGD
11381 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11382 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11383 +{
11384 + return cpu_pgd[cpu];
11385 +}
11386 +#endif
11387 +
11388 #include <linux/mm_types.h>
11389
11390 static inline int pte_none(pte_t pte)
11391 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11392
11393 static inline int pgd_bad(pgd_t pgd)
11394 {
11395 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11396 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11397 }
11398
11399 static inline int pgd_none(pgd_t pgd)
11400 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11401 * pgd_offset() returns a (pgd_t *)
11402 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11403 */
11404 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11405 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11406 +
11407 +#ifdef CONFIG_PAX_PER_CPU_PGD
11408 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11409 +#endif
11410 +
11411 /*
11412 * a shortcut which implies the use of the kernel's pgd, instead
11413 * of a process's
11414 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11415 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11416 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11417
11418 +#ifdef CONFIG_X86_32
11419 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11420 +#else
11421 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11422 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11423 +
11424 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11425 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11426 +#else
11427 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11428 +#endif
11429 +
11430 +#endif
11431 +
11432 #ifndef __ASSEMBLY__
11433
11434 extern int direct_gbpages;
11435 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11436 * dst and src can be on the same page, but the range must not overlap,
11437 * and must not cross a page boundary.
11438 */
11439 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11440 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11441 {
11442 - memcpy(dst, src, count * sizeof(pgd_t));
11443 + pax_open_kernel();
11444 + while (count--)
11445 + *dst++ = *src++;
11446 + pax_close_kernel();
11447 }
11448
11449 +#ifdef CONFIG_PAX_PER_CPU_PGD
11450 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11451 +#endif
11452 +
11453 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11454 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11455 +#else
11456 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11457 +#endif
11458
11459 #include <asm-generic/pgtable.h>
11460 #endif /* __ASSEMBLY__ */
11461 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11462 index 0c92113..34a77c6 100644
11463 --- a/arch/x86/include/asm/pgtable_32.h
11464 +++ b/arch/x86/include/asm/pgtable_32.h
11465 @@ -25,9 +25,6 @@
11466 struct mm_struct;
11467 struct vm_area_struct;
11468
11469 -extern pgd_t swapper_pg_dir[1024];
11470 -extern pgd_t initial_page_table[1024];
11471 -
11472 static inline void pgtable_cache_init(void) { }
11473 static inline void check_pgt_cache(void) { }
11474 void paging_init(void);
11475 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11476 # include <asm/pgtable-2level.h>
11477 #endif
11478
11479 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11480 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11481 +#ifdef CONFIG_X86_PAE
11482 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11483 +#endif
11484 +
11485 #if defined(CONFIG_HIGHPTE)
11486 #define pte_offset_map(dir, address) \
11487 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11488 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11489 /* Clear a kernel PTE and flush it from the TLB */
11490 #define kpte_clear_flush(ptep, vaddr) \
11491 do { \
11492 + pax_open_kernel(); \
11493 pte_clear(&init_mm, (vaddr), (ptep)); \
11494 + pax_close_kernel(); \
11495 __flush_tlb_one((vaddr)); \
11496 } while (0)
11497
11498 @@ -74,6 +79,9 @@ do { \
11499
11500 #endif /* !__ASSEMBLY__ */
11501
11502 +#define HAVE_ARCH_UNMAPPED_AREA
11503 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11504 +
11505 /*
11506 * kern_addr_valid() is (1) for FLATMEM and (0) for
11507 * SPARSEMEM and DISCONTIGMEM
11508 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11509 index ed5903b..c7fe163 100644
11510 --- a/arch/x86/include/asm/pgtable_32_types.h
11511 +++ b/arch/x86/include/asm/pgtable_32_types.h
11512 @@ -8,7 +8,7 @@
11513 */
11514 #ifdef CONFIG_X86_PAE
11515 # include <asm/pgtable-3level_types.h>
11516 -# define PMD_SIZE (1UL << PMD_SHIFT)
11517 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11518 # define PMD_MASK (~(PMD_SIZE - 1))
11519 #else
11520 # include <asm/pgtable-2level_types.h>
11521 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11522 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11523 #endif
11524
11525 +#ifdef CONFIG_PAX_KERNEXEC
11526 +#ifndef __ASSEMBLY__
11527 +extern unsigned char MODULES_EXEC_VADDR[];
11528 +extern unsigned char MODULES_EXEC_END[];
11529 +#endif
11530 +#include <asm/boot.h>
11531 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11532 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11533 +#else
11534 +#define ktla_ktva(addr) (addr)
11535 +#define ktva_ktla(addr) (addr)
11536 +#endif
11537 +
11538 #define MODULES_VADDR VMALLOC_START
11539 #define MODULES_END VMALLOC_END
11540 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11541 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11542 index 975f709..9f779c9 100644
11543 --- a/arch/x86/include/asm/pgtable_64.h
11544 +++ b/arch/x86/include/asm/pgtable_64.h
11545 @@ -16,10 +16,14 @@
11546
11547 extern pud_t level3_kernel_pgt[512];
11548 extern pud_t level3_ident_pgt[512];
11549 +extern pud_t level3_vmalloc_start_pgt[512];
11550 +extern pud_t level3_vmalloc_end_pgt[512];
11551 +extern pud_t level3_vmemmap_pgt[512];
11552 +extern pud_t level2_vmemmap_pgt[512];
11553 extern pmd_t level2_kernel_pgt[512];
11554 extern pmd_t level2_fixmap_pgt[512];
11555 -extern pmd_t level2_ident_pgt[512];
11556 -extern pgd_t init_level4_pgt[];
11557 +extern pmd_t level2_ident_pgt[512*2];
11558 +extern pgd_t init_level4_pgt[512];
11559
11560 #define swapper_pg_dir init_level4_pgt
11561
11562 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11563
11564 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11565 {
11566 + pax_open_kernel();
11567 *pmdp = pmd;
11568 + pax_close_kernel();
11569 }
11570
11571 static inline void native_pmd_clear(pmd_t *pmd)
11572 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11573
11574 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11575 {
11576 + pax_open_kernel();
11577 *pudp = pud;
11578 + pax_close_kernel();
11579 }
11580
11581 static inline void native_pud_clear(pud_t *pud)
11582 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11583
11584 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11585 {
11586 + pax_open_kernel();
11587 + *pgdp = pgd;
11588 + pax_close_kernel();
11589 +}
11590 +
11591 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11592 +{
11593 *pgdp = pgd;
11594 }
11595
11596 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11597 index 766ea16..5b96cb3 100644
11598 --- a/arch/x86/include/asm/pgtable_64_types.h
11599 +++ b/arch/x86/include/asm/pgtable_64_types.h
11600 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11601 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11602 #define MODULES_END _AC(0xffffffffff000000, UL)
11603 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11604 +#define MODULES_EXEC_VADDR MODULES_VADDR
11605 +#define MODULES_EXEC_END MODULES_END
11606 +
11607 +#define ktla_ktva(addr) (addr)
11608 +#define ktva_ktla(addr) (addr)
11609
11610 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11611 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11612 index 013286a..8b42f4f 100644
11613 --- a/arch/x86/include/asm/pgtable_types.h
11614 +++ b/arch/x86/include/asm/pgtable_types.h
11615 @@ -16,13 +16,12 @@
11616 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11617 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11618 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11619 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11620 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11621 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11622 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11623 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11624 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11625 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11626 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11627 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11628 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11629 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11630
11631 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11632 @@ -40,7 +39,6 @@
11633 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11634 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11635 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11636 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11637 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11638 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11639 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11640 @@ -57,8 +55,10 @@
11641
11642 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11643 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11644 -#else
11645 +#elif defined(CONFIG_KMEMCHECK)
11646 #define _PAGE_NX (_AT(pteval_t, 0))
11647 +#else
11648 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11649 #endif
11650
11651 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11652 @@ -96,6 +96,9 @@
11653 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11654 _PAGE_ACCESSED)
11655
11656 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11657 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11658 +
11659 #define __PAGE_KERNEL_EXEC \
11660 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11661 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11662 @@ -106,7 +109,7 @@
11663 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11664 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11665 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11666 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11667 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11668 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11669 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11670 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11671 @@ -168,8 +171,8 @@
11672 * bits are combined, this will alow user to access the high address mapped
11673 * VDSO in the presence of CONFIG_COMPAT_VDSO
11674 */
11675 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11676 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11677 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11678 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11679 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11680 #endif
11681
11682 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11683 {
11684 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11685 }
11686 +#endif
11687
11688 +#if PAGETABLE_LEVELS == 3
11689 +#include <asm-generic/pgtable-nopud.h>
11690 +#endif
11691 +
11692 +#if PAGETABLE_LEVELS == 2
11693 +#include <asm-generic/pgtable-nopmd.h>
11694 +#endif
11695 +
11696 +#ifndef __ASSEMBLY__
11697 #if PAGETABLE_LEVELS > 3
11698 typedef struct { pudval_t pud; } pud_t;
11699
11700 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11701 return pud.pud;
11702 }
11703 #else
11704 -#include <asm-generic/pgtable-nopud.h>
11705 -
11706 static inline pudval_t native_pud_val(pud_t pud)
11707 {
11708 return native_pgd_val(pud.pgd);
11709 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11710 return pmd.pmd;
11711 }
11712 #else
11713 -#include <asm-generic/pgtable-nopmd.h>
11714 -
11715 static inline pmdval_t native_pmd_val(pmd_t pmd)
11716 {
11717 return native_pgd_val(pmd.pud.pgd);
11718 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11719
11720 extern pteval_t __supported_pte_mask;
11721 extern void set_nx(void);
11722 -extern int nx_enabled;
11723
11724 #define pgprot_writecombine pgprot_writecombine
11725 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11726 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11727 index 4fa7dcc..764e33a 100644
11728 --- a/arch/x86/include/asm/processor.h
11729 +++ b/arch/x86/include/asm/processor.h
11730 @@ -276,7 +276,7 @@ struct tss_struct {
11731
11732 } ____cacheline_aligned;
11733
11734 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11735 +extern struct tss_struct init_tss[NR_CPUS];
11736
11737 /*
11738 * Save the original ist values for checking stack pointers during debugging
11739 @@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
11740 */
11741 #define TASK_SIZE PAGE_OFFSET
11742 #define TASK_SIZE_MAX TASK_SIZE
11743 +
11744 +#ifdef CONFIG_PAX_SEGMEXEC
11745 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11746 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11747 +#else
11748 #define STACK_TOP TASK_SIZE
11749 -#define STACK_TOP_MAX STACK_TOP
11750 +#endif
11751 +
11752 +#define STACK_TOP_MAX TASK_SIZE
11753
11754 #define INIT_THREAD { \
11755 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11756 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11757 .vm86_info = NULL, \
11758 .sysenter_cs = __KERNEL_CS, \
11759 .io_bitmap_ptr = NULL, \
11760 @@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
11761 */
11762 #define INIT_TSS { \
11763 .x86_tss = { \
11764 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11765 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11766 .ss0 = __KERNEL_DS, \
11767 .ss1 = __KERNEL_CS, \
11768 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11769 @@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
11770 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11771
11772 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11773 -#define KSTK_TOP(info) \
11774 -({ \
11775 - unsigned long *__ptr = (unsigned long *)(info); \
11776 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11777 -})
11778 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11779
11780 /*
11781 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11782 @@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11783 #define task_pt_regs(task) \
11784 ({ \
11785 struct pt_regs *__regs__; \
11786 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11787 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11788 __regs__ - 1; \
11789 })
11790
11791 @@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11792 /*
11793 * User space process size. 47bits minus one guard page.
11794 */
11795 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11796 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11797
11798 /* This decides where the kernel will search for a free chunk of vm
11799 * space during mmap's.
11800 */
11801 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11802 - 0xc0000000 : 0xFFFFe000)
11803 + 0xc0000000 : 0xFFFFf000)
11804
11805 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11806 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11807 @@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11808 #define STACK_TOP_MAX TASK_SIZE_MAX
11809
11810 #define INIT_THREAD { \
11811 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11812 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11813 }
11814
11815 #define INIT_TSS { \
11816 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11817 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11818 }
11819
11820 /*
11821 @@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11822 */
11823 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11824
11825 +#ifdef CONFIG_PAX_SEGMEXEC
11826 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11827 +#endif
11828 +
11829 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11830
11831 /* Get/set a process' ability to use the timestamp counter instruction */
11832 @@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11833
11834 void cpu_idle_wait(void);
11835
11836 -extern unsigned long arch_align_stack(unsigned long sp);
11837 +#define arch_align_stack(x) ((x) & ~0xfUL)
11838 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11839
11840 void default_idle(void);
11841 bool set_pm_idle_to_default(void);
11842
11843 -void stop_this_cpu(void *dummy);
11844 +void stop_this_cpu(void *dummy) __noreturn;
11845
11846 #endif /* _ASM_X86_PROCESSOR_H */
11847 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11848 index dcfde52..dbfea06 100644
11849 --- a/arch/x86/include/asm/ptrace.h
11850 +++ b/arch/x86/include/asm/ptrace.h
11851 @@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11852 }
11853
11854 /*
11855 - * user_mode_vm(regs) determines whether a register set came from user mode.
11856 + * user_mode(regs) determines whether a register set came from user mode.
11857 * This is true if V8086 mode was enabled OR if the register set was from
11858 * protected mode with RPL-3 CS value. This tricky test checks that with
11859 * one comparison. Many places in the kernel can bypass this full check
11860 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11861 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11862 + * be used.
11863 */
11864 -static inline int user_mode(struct pt_regs *regs)
11865 +static inline int user_mode_novm(struct pt_regs *regs)
11866 {
11867 #ifdef CONFIG_X86_32
11868 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11869 #else
11870 - return !!(regs->cs & 3);
11871 + return !!(regs->cs & SEGMENT_RPL_MASK);
11872 #endif
11873 }
11874
11875 -static inline int user_mode_vm(struct pt_regs *regs)
11876 +static inline int user_mode(struct pt_regs *regs)
11877 {
11878 #ifdef CONFIG_X86_32
11879 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11880 USER_RPL;
11881 #else
11882 - return user_mode(regs);
11883 + return user_mode_novm(regs);
11884 #endif
11885 }
11886
11887 @@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11888 #ifdef CONFIG_X86_64
11889 static inline bool user_64bit_mode(struct pt_regs *regs)
11890 {
11891 + unsigned long cs = regs->cs & 0xffff;
11892 #ifndef CONFIG_PARAVIRT
11893 /*
11894 * On non-paravirt systems, this is the only long mode CPL 3
11895 * selector. We do not allow long mode selectors in the LDT.
11896 */
11897 - return regs->cs == __USER_CS;
11898 + return cs == __USER_CS;
11899 #else
11900 /* Headers are too twisted for this to go in paravirt.h. */
11901 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11902 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11903 #endif
11904 }
11905 #endif
11906 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11907 index 92f29706..a79cbbb 100644
11908 --- a/arch/x86/include/asm/reboot.h
11909 +++ b/arch/x86/include/asm/reboot.h
11910 @@ -6,19 +6,19 @@
11911 struct pt_regs;
11912
11913 struct machine_ops {
11914 - void (*restart)(char *cmd);
11915 - void (*halt)(void);
11916 - void (*power_off)(void);
11917 + void (* __noreturn restart)(char *cmd);
11918 + void (* __noreturn halt)(void);
11919 + void (* __noreturn power_off)(void);
11920 void (*shutdown)(void);
11921 void (*crash_shutdown)(struct pt_regs *);
11922 - void (*emergency_restart)(void);
11923 -};
11924 + void (* __noreturn emergency_restart)(void);
11925 +} __no_const;
11926
11927 extern struct machine_ops machine_ops;
11928
11929 void native_machine_crash_shutdown(struct pt_regs *regs);
11930 void native_machine_shutdown(void);
11931 -void machine_real_restart(unsigned int type);
11932 +void machine_real_restart(unsigned int type) __noreturn;
11933 /* These must match dispatch_table in reboot_32.S */
11934 #define MRR_BIOS 0
11935 #define MRR_APM 1
11936 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11937 index 2dbe4a7..ce1db00 100644
11938 --- a/arch/x86/include/asm/rwsem.h
11939 +++ b/arch/x86/include/asm/rwsem.h
11940 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11941 {
11942 asm volatile("# beginning down_read\n\t"
11943 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11944 +
11945 +#ifdef CONFIG_PAX_REFCOUNT
11946 + "jno 0f\n"
11947 + LOCK_PREFIX _ASM_DEC "(%1)\n"
11948 + "int $4\n0:\n"
11949 + _ASM_EXTABLE(0b, 0b)
11950 +#endif
11951 +
11952 /* adds 0x00000001 */
11953 " jns 1f\n"
11954 " call call_rwsem_down_read_failed\n"
11955 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11956 "1:\n\t"
11957 " mov %1,%2\n\t"
11958 " add %3,%2\n\t"
11959 +
11960 +#ifdef CONFIG_PAX_REFCOUNT
11961 + "jno 0f\n"
11962 + "sub %3,%2\n"
11963 + "int $4\n0:\n"
11964 + _ASM_EXTABLE(0b, 0b)
11965 +#endif
11966 +
11967 " jle 2f\n\t"
11968 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11969 " jnz 1b\n\t"
11970 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11971 long tmp;
11972 asm volatile("# beginning down_write\n\t"
11973 LOCK_PREFIX " xadd %1,(%2)\n\t"
11974 +
11975 +#ifdef CONFIG_PAX_REFCOUNT
11976 + "jno 0f\n"
11977 + "mov %1,(%2)\n"
11978 + "int $4\n0:\n"
11979 + _ASM_EXTABLE(0b, 0b)
11980 +#endif
11981 +
11982 /* adds 0xffff0001, returns the old value */
11983 " test %1,%1\n\t"
11984 /* was the count 0 before? */
11985 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11986 long tmp;
11987 asm volatile("# beginning __up_read\n\t"
11988 LOCK_PREFIX " xadd %1,(%2)\n\t"
11989 +
11990 +#ifdef CONFIG_PAX_REFCOUNT
11991 + "jno 0f\n"
11992 + "mov %1,(%2)\n"
11993 + "int $4\n0:\n"
11994 + _ASM_EXTABLE(0b, 0b)
11995 +#endif
11996 +
11997 /* subtracts 1, returns the old value */
11998 " jns 1f\n\t"
11999 " call call_rwsem_wake\n" /* expects old value in %edx */
12000 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12001 long tmp;
12002 asm volatile("# beginning __up_write\n\t"
12003 LOCK_PREFIX " xadd %1,(%2)\n\t"
12004 +
12005 +#ifdef CONFIG_PAX_REFCOUNT
12006 + "jno 0f\n"
12007 + "mov %1,(%2)\n"
12008 + "int $4\n0:\n"
12009 + _ASM_EXTABLE(0b, 0b)
12010 +#endif
12011 +
12012 /* subtracts 0xffff0001, returns the old value */
12013 " jns 1f\n\t"
12014 " call call_rwsem_wake\n" /* expects old value in %edx */
12015 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12016 {
12017 asm volatile("# beginning __downgrade_write\n\t"
12018 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12019 +
12020 +#ifdef CONFIG_PAX_REFCOUNT
12021 + "jno 0f\n"
12022 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12023 + "int $4\n0:\n"
12024 + _ASM_EXTABLE(0b, 0b)
12025 +#endif
12026 +
12027 /*
12028 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12029 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12030 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12031 */
12032 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12033 {
12034 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12035 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12036 +
12037 +#ifdef CONFIG_PAX_REFCOUNT
12038 + "jno 0f\n"
12039 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12040 + "int $4\n0:\n"
12041 + _ASM_EXTABLE(0b, 0b)
12042 +#endif
12043 +
12044 : "+m" (sem->count)
12045 : "er" (delta));
12046 }
12047 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12048 */
12049 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12050 {
12051 - return delta + xadd(&sem->count, delta);
12052 + return delta + xadd_check_overflow(&sem->count, delta);
12053 }
12054
12055 #endif /* __KERNEL__ */
12056 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12057 index 1654662..5af4157 100644
12058 --- a/arch/x86/include/asm/segment.h
12059 +++ b/arch/x86/include/asm/segment.h
12060 @@ -64,10 +64,15 @@
12061 * 26 - ESPFIX small SS
12062 * 27 - per-cpu [ offset to per-cpu data area ]
12063 * 28 - stack_canary-20 [ for stack protector ]
12064 - * 29 - unused
12065 - * 30 - unused
12066 + * 29 - PCI BIOS CS
12067 + * 30 - PCI BIOS DS
12068 * 31 - TSS for double fault handler
12069 */
12070 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12071 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12072 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12073 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12074 +
12075 #define GDT_ENTRY_TLS_MIN 6
12076 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12077
12078 @@ -79,6 +84,8 @@
12079
12080 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12081
12082 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12083 +
12084 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12085
12086 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12087 @@ -104,6 +111,12 @@
12088 #define __KERNEL_STACK_CANARY 0
12089 #endif
12090
12091 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12092 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12093 +
12094 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12095 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12096 +
12097 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12098
12099 /*
12100 @@ -141,7 +154,7 @@
12101 */
12102
12103 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12104 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12105 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12106
12107
12108 #else
12109 @@ -165,6 +178,8 @@
12110 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12111 #define __USER32_DS __USER_DS
12112
12113 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12114 +
12115 #define GDT_ENTRY_TSS 8 /* needs two entries */
12116 #define GDT_ENTRY_LDT 10 /* needs two entries */
12117 #define GDT_ENTRY_TLS_MIN 12
12118 @@ -185,6 +200,7 @@
12119 #endif
12120
12121 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12122 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12123 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12124 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12125 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12126 @@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
12127 {
12128 unsigned long __limit;
12129 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12130 - return __limit + 1;
12131 + return __limit;
12132 }
12133
12134 #endif /* !__ASSEMBLY__ */
12135 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12136 index 0434c40..1714bf0 100644
12137 --- a/arch/x86/include/asm/smp.h
12138 +++ b/arch/x86/include/asm/smp.h
12139 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12140 /* cpus sharing the last level cache: */
12141 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12142 DECLARE_PER_CPU(u16, cpu_llc_id);
12143 -DECLARE_PER_CPU(int, cpu_number);
12144 +DECLARE_PER_CPU(unsigned int, cpu_number);
12145
12146 static inline struct cpumask *cpu_sibling_mask(int cpu)
12147 {
12148 @@ -77,7 +77,7 @@ struct smp_ops {
12149
12150 void (*send_call_func_ipi)(const struct cpumask *mask);
12151 void (*send_call_func_single_ipi)(int cpu);
12152 -};
12153 +} __no_const;
12154
12155 /* Globals due to paravirt */
12156 extern void set_cpu_sibling_map(int cpu);
12157 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12158 extern int safe_smp_processor_id(void);
12159
12160 #elif defined(CONFIG_X86_64_SMP)
12161 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12162 -
12163 -#define stack_smp_processor_id() \
12164 -({ \
12165 - struct thread_info *ti; \
12166 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12167 - ti->cpu; \
12168 -})
12169 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12170 +#define stack_smp_processor_id() raw_smp_processor_id()
12171 #define safe_smp_processor_id() smp_processor_id()
12172
12173 #endif
12174 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12175 index 76bfa2c..12d3fe7 100644
12176 --- a/arch/x86/include/asm/spinlock.h
12177 +++ b/arch/x86/include/asm/spinlock.h
12178 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12179 static inline void arch_read_lock(arch_rwlock_t *rw)
12180 {
12181 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12182 +
12183 +#ifdef CONFIG_PAX_REFCOUNT
12184 + "jno 0f\n"
12185 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12186 + "int $4\n0:\n"
12187 + _ASM_EXTABLE(0b, 0b)
12188 +#endif
12189 +
12190 "jns 1f\n"
12191 "call __read_lock_failed\n\t"
12192 "1:\n"
12193 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12194 static inline void arch_write_lock(arch_rwlock_t *rw)
12195 {
12196 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12197 +
12198 +#ifdef CONFIG_PAX_REFCOUNT
12199 + "jno 0f\n"
12200 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12201 + "int $4\n0:\n"
12202 + _ASM_EXTABLE(0b, 0b)
12203 +#endif
12204 +
12205 "jz 1f\n"
12206 "call __write_lock_failed\n\t"
12207 "1:\n"
12208 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12209
12210 static inline void arch_read_unlock(arch_rwlock_t *rw)
12211 {
12212 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12213 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12214 +
12215 +#ifdef CONFIG_PAX_REFCOUNT
12216 + "jno 0f\n"
12217 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12218 + "int $4\n0:\n"
12219 + _ASM_EXTABLE(0b, 0b)
12220 +#endif
12221 +
12222 :"+m" (rw->lock) : : "memory");
12223 }
12224
12225 static inline void arch_write_unlock(arch_rwlock_t *rw)
12226 {
12227 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12228 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12229 +
12230 +#ifdef CONFIG_PAX_REFCOUNT
12231 + "jno 0f\n"
12232 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12233 + "int $4\n0:\n"
12234 + _ASM_EXTABLE(0b, 0b)
12235 +#endif
12236 +
12237 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12238 }
12239
12240 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12241 index b5d9533..41655fa 100644
12242 --- a/arch/x86/include/asm/stackprotector.h
12243 +++ b/arch/x86/include/asm/stackprotector.h
12244 @@ -47,7 +47,7 @@
12245 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12246 */
12247 #define GDT_STACK_CANARY_INIT \
12248 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12249 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12250
12251 /*
12252 * Initialize the stackprotector canary value.
12253 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12254
12255 static inline void load_stack_canary_segment(void)
12256 {
12257 -#ifdef CONFIG_X86_32
12258 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12259 asm volatile ("mov %0, %%gs" : : "r" (0));
12260 #endif
12261 }
12262 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12263 index 70bbe39..4ae2bd4 100644
12264 --- a/arch/x86/include/asm/stacktrace.h
12265 +++ b/arch/x86/include/asm/stacktrace.h
12266 @@ -11,28 +11,20 @@
12267
12268 extern int kstack_depth_to_print;
12269
12270 -struct thread_info;
12271 +struct task_struct;
12272 struct stacktrace_ops;
12273
12274 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12275 - unsigned long *stack,
12276 - unsigned long bp,
12277 - const struct stacktrace_ops *ops,
12278 - void *data,
12279 - unsigned long *end,
12280 - int *graph);
12281 +typedef unsigned long walk_stack_t(struct task_struct *task,
12282 + void *stack_start,
12283 + unsigned long *stack,
12284 + unsigned long bp,
12285 + const struct stacktrace_ops *ops,
12286 + void *data,
12287 + unsigned long *end,
12288 + int *graph);
12289
12290 -extern unsigned long
12291 -print_context_stack(struct thread_info *tinfo,
12292 - unsigned long *stack, unsigned long bp,
12293 - const struct stacktrace_ops *ops, void *data,
12294 - unsigned long *end, int *graph);
12295 -
12296 -extern unsigned long
12297 -print_context_stack_bp(struct thread_info *tinfo,
12298 - unsigned long *stack, unsigned long bp,
12299 - const struct stacktrace_ops *ops, void *data,
12300 - unsigned long *end, int *graph);
12301 +extern walk_stack_t print_context_stack;
12302 +extern walk_stack_t print_context_stack_bp;
12303
12304 /* Generic stack tracer with callbacks */
12305
12306 @@ -40,7 +32,7 @@ struct stacktrace_ops {
12307 void (*address)(void *data, unsigned long address, int reliable);
12308 /* On negative return stop dumping */
12309 int (*stack)(void *data, char *name);
12310 - walk_stack_t walk_stack;
12311 + walk_stack_t *walk_stack;
12312 };
12313
12314 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12315 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12316 index 4ec45b3..a4f0a8a 100644
12317 --- a/arch/x86/include/asm/switch_to.h
12318 +++ b/arch/x86/include/asm/switch_to.h
12319 @@ -108,7 +108,7 @@ do { \
12320 "call __switch_to\n\t" \
12321 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12322 __switch_canary \
12323 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12324 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12325 "movq %%rax,%%rdi\n\t" \
12326 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12327 "jnz ret_from_fork\n\t" \
12328 @@ -119,7 +119,7 @@ do { \
12329 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12330 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12331 [_tif_fork] "i" (_TIF_FORK), \
12332 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12333 + [thread_info] "m" (current_tinfo), \
12334 [current_task] "m" (current_task) \
12335 __switch_canary_iparam \
12336 : "memory", "cc" __EXTRA_CLOBBER)
12337 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12338 index 3fda9db4..4ca1c61 100644
12339 --- a/arch/x86/include/asm/sys_ia32.h
12340 +++ b/arch/x86/include/asm/sys_ia32.h
12341 @@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12342 struct old_sigaction32 __user *);
12343 asmlinkage long sys32_alarm(unsigned int);
12344
12345 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12346 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12347 asmlinkage long sys32_sysfs(int, u32, u32);
12348
12349 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12350 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12351 index ad6df8c..5e0cf6e 100644
12352 --- a/arch/x86/include/asm/thread_info.h
12353 +++ b/arch/x86/include/asm/thread_info.h
12354 @@ -10,6 +10,7 @@
12355 #include <linux/compiler.h>
12356 #include <asm/page.h>
12357 #include <asm/types.h>
12358 +#include <asm/percpu.h>
12359
12360 /*
12361 * low level task data that entry.S needs immediate access to
12362 @@ -24,7 +25,6 @@ struct exec_domain;
12363 #include <linux/atomic.h>
12364
12365 struct thread_info {
12366 - struct task_struct *task; /* main task structure */
12367 struct exec_domain *exec_domain; /* execution domain */
12368 __u32 flags; /* low level flags */
12369 __u32 status; /* thread synchronous flags */
12370 @@ -34,19 +34,13 @@ struct thread_info {
12371 mm_segment_t addr_limit;
12372 struct restart_block restart_block;
12373 void __user *sysenter_return;
12374 -#ifdef CONFIG_X86_32
12375 - unsigned long previous_esp; /* ESP of the previous stack in
12376 - case of nested (IRQ) stacks
12377 - */
12378 - __u8 supervisor_stack[0];
12379 -#endif
12380 + unsigned long lowest_stack;
12381 unsigned int sig_on_uaccess_error:1;
12382 unsigned int uaccess_err:1; /* uaccess failed */
12383 };
12384
12385 -#define INIT_THREAD_INFO(tsk) \
12386 +#define INIT_THREAD_INFO \
12387 { \
12388 - .task = &tsk, \
12389 .exec_domain = &default_exec_domain, \
12390 .flags = 0, \
12391 .cpu = 0, \
12392 @@ -57,7 +51,7 @@ struct thread_info {
12393 }, \
12394 }
12395
12396 -#define init_thread_info (init_thread_union.thread_info)
12397 +#define init_thread_info (init_thread_union.stack)
12398 #define init_stack (init_thread_union.stack)
12399
12400 #else /* !__ASSEMBLY__ */
12401 @@ -97,6 +91,7 @@ struct thread_info {
12402 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12403 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12404 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12405 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12406
12407 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12408 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12409 @@ -120,16 +115,18 @@ struct thread_info {
12410 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12411 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12412 #define _TIF_X32 (1 << TIF_X32)
12413 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12414
12415 /* work to do in syscall_trace_enter() */
12416 #define _TIF_WORK_SYSCALL_ENTRY \
12417 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12418 - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12419 + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12420 + _TIF_GRSEC_SETXID)
12421
12422 /* work to do in syscall_trace_leave() */
12423 #define _TIF_WORK_SYSCALL_EXIT \
12424 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12425 - _TIF_SYSCALL_TRACEPOINT)
12426 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12427
12428 /* work to do on interrupt/exception return */
12429 #define _TIF_WORK_MASK \
12430 @@ -139,7 +136,8 @@ struct thread_info {
12431
12432 /* work to do on any return to user space */
12433 #define _TIF_ALLWORK_MASK \
12434 - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12435 + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12436 + _TIF_GRSEC_SETXID)
12437
12438 /* Only used for 64 bit */
12439 #define _TIF_DO_NOTIFY_MASK \
12440 @@ -173,45 +171,40 @@ struct thread_info {
12441 ret; \
12442 })
12443
12444 -#ifdef CONFIG_X86_32
12445 -
12446 -#define STACK_WARN (THREAD_SIZE/8)
12447 -/*
12448 - * macros/functions for gaining access to the thread information structure
12449 - *
12450 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12451 - */
12452 -#ifndef __ASSEMBLY__
12453 -
12454 -
12455 -/* how to get the current stack pointer from C */
12456 -register unsigned long current_stack_pointer asm("esp") __used;
12457 -
12458 -/* how to get the thread information struct from C */
12459 -static inline struct thread_info *current_thread_info(void)
12460 -{
12461 - return (struct thread_info *)
12462 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12463 -}
12464 -
12465 -#else /* !__ASSEMBLY__ */
12466 -
12467 +#ifdef __ASSEMBLY__
12468 /* how to get the thread information struct from ASM */
12469 #define GET_THREAD_INFO(reg) \
12470 - movl $-THREAD_SIZE, reg; \
12471 - andl %esp, reg
12472 + mov PER_CPU_VAR(current_tinfo), reg
12473
12474 /* use this one if reg already contains %esp */
12475 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12476 - andl $-THREAD_SIZE, reg
12477 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12478 +#else
12479 +/* how to get the thread information struct from C */
12480 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12481 +
12482 +static __always_inline struct thread_info *current_thread_info(void)
12483 +{
12484 + return percpu_read_stable(current_tinfo);
12485 +}
12486 +#endif
12487 +
12488 +#ifdef CONFIG_X86_32
12489 +
12490 +#define STACK_WARN (THREAD_SIZE/8)
12491 +/*
12492 + * macros/functions for gaining access to the thread information structure
12493 + *
12494 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12495 + */
12496 +#ifndef __ASSEMBLY__
12497 +
12498 +/* how to get the current stack pointer from C */
12499 +register unsigned long current_stack_pointer asm("esp") __used;
12500
12501 #endif
12502
12503 #else /* X86_32 */
12504
12505 -#include <asm/percpu.h>
12506 -#define KERNEL_STACK_OFFSET (5*8)
12507 -
12508 /*
12509 * macros/functions for gaining access to the thread information structure
12510 * preempt_count needs to be 1 initially, until the scheduler is functional.
12511 @@ -219,27 +212,8 @@ static inline struct thread_info *current_thread_info(void)
12512 #ifndef __ASSEMBLY__
12513 DECLARE_PER_CPU(unsigned long, kernel_stack);
12514
12515 -static inline struct thread_info *current_thread_info(void)
12516 -{
12517 - struct thread_info *ti;
12518 - ti = (void *)(percpu_read_stable(kernel_stack) +
12519 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12520 - return ti;
12521 -}
12522 -
12523 -#else /* !__ASSEMBLY__ */
12524 -
12525 -/* how to get the thread information struct from ASM */
12526 -#define GET_THREAD_INFO(reg) \
12527 - movq PER_CPU_VAR(kernel_stack),reg ; \
12528 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12529 -
12530 -/*
12531 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12532 - * a certain register (to be used in assembler memory operands).
12533 - */
12534 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12535 -
12536 +/* how to get the current stack pointer from C */
12537 +register unsigned long current_stack_pointer asm("rsp") __used;
12538 #endif
12539
12540 #endif /* !X86_32 */
12541 @@ -285,5 +259,16 @@ extern void arch_task_cache_init(void);
12542 extern void free_thread_info(struct thread_info *ti);
12543 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12544 #define arch_task_cache_init arch_task_cache_init
12545 +
12546 +#define __HAVE_THREAD_FUNCTIONS
12547 +#define task_thread_info(task) (&(task)->tinfo)
12548 +#define task_stack_page(task) ((task)->stack)
12549 +#define setup_thread_stack(p, org) do {} while (0)
12550 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12551 +
12552 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12553 +extern struct task_struct *alloc_task_struct_node(int node);
12554 +extern void free_task_struct(struct task_struct *);
12555 +
12556 #endif
12557 #endif /* _ASM_X86_THREAD_INFO_H */
12558 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12559 index e054459..14bc8a7 100644
12560 --- a/arch/x86/include/asm/uaccess.h
12561 +++ b/arch/x86/include/asm/uaccess.h
12562 @@ -7,12 +7,15 @@
12563 #include <linux/compiler.h>
12564 #include <linux/thread_info.h>
12565 #include <linux/string.h>
12566 +#include <linux/sched.h>
12567 #include <asm/asm.h>
12568 #include <asm/page.h>
12569
12570 #define VERIFY_READ 0
12571 #define VERIFY_WRITE 1
12572
12573 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12574 +
12575 /*
12576 * The fs value determines whether argument validity checking should be
12577 * performed or not. If get_fs() == USER_DS, checking is performed, with
12578 @@ -28,7 +31,12 @@
12579
12580 #define get_ds() (KERNEL_DS)
12581 #define get_fs() (current_thread_info()->addr_limit)
12582 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12583 +void __set_fs(mm_segment_t x);
12584 +void set_fs(mm_segment_t x);
12585 +#else
12586 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12587 +#endif
12588
12589 #define segment_eq(a, b) ((a).seg == (b).seg)
12590
12591 @@ -76,7 +84,33 @@
12592 * checks that the pointer is in the user space range - after calling
12593 * this function, memory access functions may still return -EFAULT.
12594 */
12595 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12596 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12597 +#define access_ok(type, addr, size) \
12598 +({ \
12599 + long __size = size; \
12600 + unsigned long __addr = (unsigned long)addr; \
12601 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12602 + unsigned long __end_ao = __addr + __size - 1; \
12603 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12604 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12605 + while(__addr_ao <= __end_ao) { \
12606 + char __c_ao; \
12607 + __addr_ao += PAGE_SIZE; \
12608 + if (__size > PAGE_SIZE) \
12609 + cond_resched(); \
12610 + if (__get_user(__c_ao, (char __user *)__addr)) \
12611 + break; \
12612 + if (type != VERIFY_WRITE) { \
12613 + __addr = __addr_ao; \
12614 + continue; \
12615 + } \
12616 + if (__put_user(__c_ao, (char __user *)__addr)) \
12617 + break; \
12618 + __addr = __addr_ao; \
12619 + } \
12620 + } \
12621 + __ret_ao; \
12622 +})
12623
12624 /*
12625 * The exception table consists of pairs of addresses: the first is the
12626 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12627 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12628 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12629
12630 -
12631 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12632 +#define __copyuser_seg "gs;"
12633 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12634 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12635 +#else
12636 +#define __copyuser_seg
12637 +#define __COPYUSER_SET_ES
12638 +#define __COPYUSER_RESTORE_ES
12639 +#endif
12640
12641 #ifdef CONFIG_X86_32
12642 #define __put_user_asm_u64(x, addr, err, errret) \
12643 - asm volatile("1: movl %%eax,0(%2)\n" \
12644 - "2: movl %%edx,4(%2)\n" \
12645 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12646 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12647 "3:\n" \
12648 ".section .fixup,\"ax\"\n" \
12649 "4: movl %3,%0\n" \
12650 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12651 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12652
12653 #define __put_user_asm_ex_u64(x, addr) \
12654 - asm volatile("1: movl %%eax,0(%1)\n" \
12655 - "2: movl %%edx,4(%1)\n" \
12656 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12657 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12658 "3:\n" \
12659 _ASM_EXTABLE(1b, 2b - 1b) \
12660 _ASM_EXTABLE(2b, 3b - 2b) \
12661 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12662 __typeof__(*(ptr)) __pu_val; \
12663 __chk_user_ptr(ptr); \
12664 might_fault(); \
12665 - __pu_val = x; \
12666 + __pu_val = (x); \
12667 switch (sizeof(*(ptr))) { \
12668 case 1: \
12669 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12670 @@ -373,7 +415,7 @@ do { \
12671 } while (0)
12672
12673 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12674 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12675 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12676 "2:\n" \
12677 ".section .fixup,\"ax\"\n" \
12678 "3: mov %3,%0\n" \
12679 @@ -381,7 +423,7 @@ do { \
12680 " jmp 2b\n" \
12681 ".previous\n" \
12682 _ASM_EXTABLE(1b, 3b) \
12683 - : "=r" (err), ltype(x) \
12684 + : "=r" (err), ltype (x) \
12685 : "m" (__m(addr)), "i" (errret), "0" (err))
12686
12687 #define __get_user_size_ex(x, ptr, size) \
12688 @@ -406,7 +448,7 @@ do { \
12689 } while (0)
12690
12691 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12692 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12693 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12694 "2:\n" \
12695 _ASM_EXTABLE(1b, 2b - 1b) \
12696 : ltype(x) : "m" (__m(addr)))
12697 @@ -423,13 +465,24 @@ do { \
12698 int __gu_err; \
12699 unsigned long __gu_val; \
12700 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12701 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12702 + (x) = (__typeof__(*(ptr)))__gu_val; \
12703 __gu_err; \
12704 })
12705
12706 /* FIXME: this hack is definitely wrong -AK */
12707 struct __large_struct { unsigned long buf[100]; };
12708 -#define __m(x) (*(struct __large_struct __user *)(x))
12709 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12710 +#define ____m(x) \
12711 +({ \
12712 + unsigned long ____x = (unsigned long)(x); \
12713 + if (____x < PAX_USER_SHADOW_BASE) \
12714 + ____x += PAX_USER_SHADOW_BASE; \
12715 + (void __user *)____x; \
12716 +})
12717 +#else
12718 +#define ____m(x) (x)
12719 +#endif
12720 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12721
12722 /*
12723 * Tell gcc we read from memory instead of writing: this is because
12724 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12725 * aliasing issues.
12726 */
12727 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12728 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12729 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12730 "2:\n" \
12731 ".section .fixup,\"ax\"\n" \
12732 "3: mov %3,%0\n" \
12733 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12734 ".previous\n" \
12735 _ASM_EXTABLE(1b, 3b) \
12736 : "=r"(err) \
12737 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12738 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12739
12740 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12741 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12742 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12743 "2:\n" \
12744 _ASM_EXTABLE(1b, 2b - 1b) \
12745 : : ltype(x), "m" (__m(addr)))
12746 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12747 * On error, the variable @x is set to zero.
12748 */
12749
12750 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12751 +#define __get_user(x, ptr) get_user((x), (ptr))
12752 +#else
12753 #define __get_user(x, ptr) \
12754 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12755 +#endif
12756
12757 /**
12758 * __put_user: - Write a simple value into user space, with less checking.
12759 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12760 * Returns zero on success, or -EFAULT on error.
12761 */
12762
12763 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12764 +#define __put_user(x, ptr) put_user((x), (ptr))
12765 +#else
12766 #define __put_user(x, ptr) \
12767 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12768 +#endif
12769
12770 #define __get_user_unaligned __get_user
12771 #define __put_user_unaligned __put_user
12772 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12773 #define get_user_ex(x, ptr) do { \
12774 unsigned long __gue_val; \
12775 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12776 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12777 + (x) = (__typeof__(*(ptr)))__gue_val; \
12778 } while (0)
12779
12780 #ifdef CONFIG_X86_WP_WORKS_OK
12781 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12782 index 8084bc7..cc139cb 100644
12783 --- a/arch/x86/include/asm/uaccess_32.h
12784 +++ b/arch/x86/include/asm/uaccess_32.h
12785 @@ -11,15 +11,15 @@
12786 #include <asm/page.h>
12787
12788 unsigned long __must_check __copy_to_user_ll
12789 - (void __user *to, const void *from, unsigned long n);
12790 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12791 unsigned long __must_check __copy_from_user_ll
12792 - (void *to, const void __user *from, unsigned long n);
12793 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12794 unsigned long __must_check __copy_from_user_ll_nozero
12795 - (void *to, const void __user *from, unsigned long n);
12796 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12797 unsigned long __must_check __copy_from_user_ll_nocache
12798 - (void *to, const void __user *from, unsigned long n);
12799 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12800 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12801 - (void *to, const void __user *from, unsigned long n);
12802 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12803
12804 /**
12805 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12806 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12807 static __always_inline unsigned long __must_check
12808 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12809 {
12810 + if ((long)n < 0)
12811 + return n;
12812 +
12813 if (__builtin_constant_p(n)) {
12814 unsigned long ret;
12815
12816 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12817 return ret;
12818 }
12819 }
12820 + if (!__builtin_constant_p(n))
12821 + check_object_size(from, n, true);
12822 return __copy_to_user_ll(to, from, n);
12823 }
12824
12825 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12826 __copy_to_user(void __user *to, const void *from, unsigned long n)
12827 {
12828 might_fault();
12829 +
12830 return __copy_to_user_inatomic(to, from, n);
12831 }
12832
12833 static __always_inline unsigned long
12834 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12835 {
12836 + if ((long)n < 0)
12837 + return n;
12838 +
12839 /* Avoid zeroing the tail if the copy fails..
12840 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12841 * but as the zeroing behaviour is only significant when n is not
12842 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12843 __copy_from_user(void *to, const void __user *from, unsigned long n)
12844 {
12845 might_fault();
12846 +
12847 + if ((long)n < 0)
12848 + return n;
12849 +
12850 if (__builtin_constant_p(n)) {
12851 unsigned long ret;
12852
12853 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12854 return ret;
12855 }
12856 }
12857 + if (!__builtin_constant_p(n))
12858 + check_object_size(to, n, false);
12859 return __copy_from_user_ll(to, from, n);
12860 }
12861
12862 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12863 const void __user *from, unsigned long n)
12864 {
12865 might_fault();
12866 +
12867 + if ((long)n < 0)
12868 + return n;
12869 +
12870 if (__builtin_constant_p(n)) {
12871 unsigned long ret;
12872
12873 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12874 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12875 unsigned long n)
12876 {
12877 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12878 + if ((long)n < 0)
12879 + return n;
12880 +
12881 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12882 }
12883
12884 -unsigned long __must_check copy_to_user(void __user *to,
12885 - const void *from, unsigned long n);
12886 -unsigned long __must_check _copy_from_user(void *to,
12887 - const void __user *from,
12888 - unsigned long n);
12889 -
12890 +extern void copy_to_user_overflow(void)
12891 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12892 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12893 +#else
12894 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12895 +#endif
12896 +;
12897
12898 extern void copy_from_user_overflow(void)
12899 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12900 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12901 #endif
12902 ;
12903
12904 -static inline unsigned long __must_check copy_from_user(void *to,
12905 - const void __user *from,
12906 - unsigned long n)
12907 +/**
12908 + * copy_to_user: - Copy a block of data into user space.
12909 + * @to: Destination address, in user space.
12910 + * @from: Source address, in kernel space.
12911 + * @n: Number of bytes to copy.
12912 + *
12913 + * Context: User context only. This function may sleep.
12914 + *
12915 + * Copy data from kernel space to user space.
12916 + *
12917 + * Returns number of bytes that could not be copied.
12918 + * On success, this will be zero.
12919 + */
12920 +static inline unsigned long __must_check
12921 +copy_to_user(void __user *to, const void *from, unsigned long n)
12922 +{
12923 + int sz = __compiletime_object_size(from);
12924 +
12925 + if (unlikely(sz != -1 && sz < n))
12926 + copy_to_user_overflow();
12927 + else if (access_ok(VERIFY_WRITE, to, n))
12928 + n = __copy_to_user(to, from, n);
12929 + return n;
12930 +}
12931 +
12932 +/**
12933 + * copy_from_user: - Copy a block of data from user space.
12934 + * @to: Destination address, in kernel space.
12935 + * @from: Source address, in user space.
12936 + * @n: Number of bytes to copy.
12937 + *
12938 + * Context: User context only. This function may sleep.
12939 + *
12940 + * Copy data from user space to kernel space.
12941 + *
12942 + * Returns number of bytes that could not be copied.
12943 + * On success, this will be zero.
12944 + *
12945 + * If some data could not be copied, this function will pad the copied
12946 + * data to the requested size using zero bytes.
12947 + */
12948 +static inline unsigned long __must_check
12949 +copy_from_user(void *to, const void __user *from, unsigned long n)
12950 {
12951 int sz = __compiletime_object_size(to);
12952
12953 - if (likely(sz == -1 || sz >= n))
12954 - n = _copy_from_user(to, from, n);
12955 - else
12956 + if (unlikely(sz != -1 && sz < n))
12957 copy_from_user_overflow();
12958 -
12959 + else if (access_ok(VERIFY_READ, from, n))
12960 + n = __copy_from_user(to, from, n);
12961 + else if ((long)n > 0) {
12962 + if (!__builtin_constant_p(n))
12963 + check_object_size(to, n, false);
12964 + memset(to, 0, n);
12965 + }
12966 return n;
12967 }
12968
12969 @@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
12970 #define strlen_user(str) strnlen_user(str, LONG_MAX)
12971
12972 long strnlen_user(const char __user *str, long n);
12973 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
12974 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
12975 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12976 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12977
12978 #endif /* _ASM_X86_UACCESS_32_H */
12979 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12980 index fcd4b6f..f4631a0 100644
12981 --- a/arch/x86/include/asm/uaccess_64.h
12982 +++ b/arch/x86/include/asm/uaccess_64.h
12983 @@ -10,6 +10,9 @@
12984 #include <asm/alternative.h>
12985 #include <asm/cpufeature.h>
12986 #include <asm/page.h>
12987 +#include <asm/pgtable.h>
12988 +
12989 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12990
12991 /*
12992 * Copy To/From Userspace
12993 @@ -17,12 +20,14 @@
12994
12995 /* Handles exceptions in both to and from, but doesn't do access_ok */
12996 __must_check unsigned long
12997 -copy_user_generic_string(void *to, const void *from, unsigned len);
12998 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
12999 __must_check unsigned long
13000 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13001 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13002
13003 static __always_inline __must_check unsigned long
13004 -copy_user_generic(void *to, const void *from, unsigned len)
13005 +copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13006 +static __always_inline __must_check unsigned long
13007 +copy_user_generic(void *to, const void *from, unsigned long len)
13008 {
13009 unsigned ret;
13010
13011 @@ -32,142 +37,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
13012 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13013 "=d" (len)),
13014 "1" (to), "2" (from), "3" (len)
13015 - : "memory", "rcx", "r8", "r9", "r10", "r11");
13016 + : "memory", "rcx", "r8", "r9", "r11");
13017 return ret;
13018 }
13019
13020 +static __always_inline __must_check unsigned long
13021 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13022 +static __always_inline __must_check unsigned long
13023 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13024 __must_check unsigned long
13025 -_copy_to_user(void __user *to, const void *from, unsigned len);
13026 -__must_check unsigned long
13027 -_copy_from_user(void *to, const void __user *from, unsigned len);
13028 -__must_check unsigned long
13029 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13030 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13031
13032 static inline unsigned long __must_check copy_from_user(void *to,
13033 const void __user *from,
13034 unsigned long n)
13035 {
13036 - int sz = __compiletime_object_size(to);
13037 -
13038 might_fault();
13039 - if (likely(sz == -1 || sz >= n))
13040 - n = _copy_from_user(to, from, n);
13041 -#ifdef CONFIG_DEBUG_VM
13042 - else
13043 - WARN(1, "Buffer overflow detected!\n");
13044 -#endif
13045 +
13046 + if (access_ok(VERIFY_READ, from, n))
13047 + n = __copy_from_user(to, from, n);
13048 + else if (n < INT_MAX) {
13049 + if (!__builtin_constant_p(n))
13050 + check_object_size(to, n, false);
13051 + memset(to, 0, n);
13052 + }
13053 return n;
13054 }
13055
13056 static __always_inline __must_check
13057 -int copy_to_user(void __user *dst, const void *src, unsigned size)
13058 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
13059 {
13060 might_fault();
13061
13062 - return _copy_to_user(dst, src, size);
13063 + if (access_ok(VERIFY_WRITE, dst, size))
13064 + size = __copy_to_user(dst, src, size);
13065 + return size;
13066 }
13067
13068 static __always_inline __must_check
13069 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13070 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13071 {
13072 - int ret = 0;
13073 + int sz = __compiletime_object_size(dst);
13074 + unsigned ret = 0;
13075
13076 might_fault();
13077 - if (!__builtin_constant_p(size))
13078 - return copy_user_generic(dst, (__force void *)src, size);
13079 +
13080 + if (size > INT_MAX)
13081 + return size;
13082 +
13083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13084 + if (!__access_ok(VERIFY_READ, src, size))
13085 + return size;
13086 +#endif
13087 +
13088 + if (unlikely(sz != -1 && sz < size)) {
13089 +#ifdef CONFIG_DEBUG_VM
13090 + WARN(1, "Buffer overflow detected!\n");
13091 +#endif
13092 + return size;
13093 + }
13094 +
13095 + if (!__builtin_constant_p(size)) {
13096 + check_object_size(dst, size, false);
13097 +
13098 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13099 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13100 + src += PAX_USER_SHADOW_BASE;
13101 +#endif
13102 +
13103 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13104 + }
13105 switch (size) {
13106 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13107 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13108 ret, "b", "b", "=q", 1);
13109 return ret;
13110 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13111 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13112 ret, "w", "w", "=r", 2);
13113 return ret;
13114 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13115 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13116 ret, "l", "k", "=r", 4);
13117 return ret;
13118 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13119 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13120 ret, "q", "", "=r", 8);
13121 return ret;
13122 case 10:
13123 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13124 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13125 ret, "q", "", "=r", 10);
13126 if (unlikely(ret))
13127 return ret;
13128 __get_user_asm(*(u16 *)(8 + (char *)dst),
13129 - (u16 __user *)(8 + (char __user *)src),
13130 + (const u16 __user *)(8 + (const char __user *)src),
13131 ret, "w", "w", "=r", 2);
13132 return ret;
13133 case 16:
13134 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13135 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13136 ret, "q", "", "=r", 16);
13137 if (unlikely(ret))
13138 return ret;
13139 __get_user_asm(*(u64 *)(8 + (char *)dst),
13140 - (u64 __user *)(8 + (char __user *)src),
13141 + (const u64 __user *)(8 + (const char __user *)src),
13142 ret, "q", "", "=r", 8);
13143 return ret;
13144 default:
13145 - return copy_user_generic(dst, (__force void *)src, size);
13146 +
13147 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13148 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13149 + src += PAX_USER_SHADOW_BASE;
13150 +#endif
13151 +
13152 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13153 }
13154 }
13155
13156 static __always_inline __must_check
13157 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13158 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13159 {
13160 - int ret = 0;
13161 + int sz = __compiletime_object_size(src);
13162 + unsigned ret = 0;
13163
13164 might_fault();
13165 - if (!__builtin_constant_p(size))
13166 - return copy_user_generic((__force void *)dst, src, size);
13167 +
13168 + if (size > INT_MAX)
13169 + return size;
13170 +
13171 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13172 + if (!__access_ok(VERIFY_WRITE, dst, size))
13173 + return size;
13174 +#endif
13175 +
13176 + if (unlikely(sz != -1 && sz < size)) {
13177 +#ifdef CONFIG_DEBUG_VM
13178 + WARN(1, "Buffer overflow detected!\n");
13179 +#endif
13180 + return size;
13181 + }
13182 +
13183 + if (!__builtin_constant_p(size)) {
13184 + check_object_size(src, size, true);
13185 +
13186 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13187 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13188 + dst += PAX_USER_SHADOW_BASE;
13189 +#endif
13190 +
13191 + return copy_user_generic((__force_kernel void *)dst, src, size);
13192 + }
13193 switch (size) {
13194 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13195 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13196 ret, "b", "b", "iq", 1);
13197 return ret;
13198 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13199 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13200 ret, "w", "w", "ir", 2);
13201 return ret;
13202 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13203 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13204 ret, "l", "k", "ir", 4);
13205 return ret;
13206 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13207 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13208 ret, "q", "", "er", 8);
13209 return ret;
13210 case 10:
13211 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13212 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13213 ret, "q", "", "er", 10);
13214 if (unlikely(ret))
13215 return ret;
13216 asm("":::"memory");
13217 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13218 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13219 ret, "w", "w", "ir", 2);
13220 return ret;
13221 case 16:
13222 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13223 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13224 ret, "q", "", "er", 16);
13225 if (unlikely(ret))
13226 return ret;
13227 asm("":::"memory");
13228 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13229 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13230 ret, "q", "", "er", 8);
13231 return ret;
13232 default:
13233 - return copy_user_generic((__force void *)dst, src, size);
13234 +
13235 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13236 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13237 + dst += PAX_USER_SHADOW_BASE;
13238 +#endif
13239 +
13240 + return copy_user_generic((__force_kernel void *)dst, src, size);
13241 }
13242 }
13243
13244 static __always_inline __must_check
13245 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13246 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13247 {
13248 - int ret = 0;
13249 + unsigned ret = 0;
13250
13251 might_fault();
13252 - if (!__builtin_constant_p(size))
13253 - return copy_user_generic((__force void *)dst,
13254 - (__force void *)src, size);
13255 +
13256 + if (size > INT_MAX)
13257 + return size;
13258 +
13259 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13260 + if (!__access_ok(VERIFY_READ, src, size))
13261 + return size;
13262 + if (!__access_ok(VERIFY_WRITE, dst, size))
13263 + return size;
13264 +#endif
13265 +
13266 + if (!__builtin_constant_p(size)) {
13267 +
13268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13269 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13270 + src += PAX_USER_SHADOW_BASE;
13271 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13272 + dst += PAX_USER_SHADOW_BASE;
13273 +#endif
13274 +
13275 + return copy_user_generic((__force_kernel void *)dst,
13276 + (__force_kernel const void *)src, size);
13277 + }
13278 switch (size) {
13279 case 1: {
13280 u8 tmp;
13281 - __get_user_asm(tmp, (u8 __user *)src,
13282 + __get_user_asm(tmp, (const u8 __user *)src,
13283 ret, "b", "b", "=q", 1);
13284 if (likely(!ret))
13285 __put_user_asm(tmp, (u8 __user *)dst,
13286 @@ -176,7 +265,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13287 }
13288 case 2: {
13289 u16 tmp;
13290 - __get_user_asm(tmp, (u16 __user *)src,
13291 + __get_user_asm(tmp, (const u16 __user *)src,
13292 ret, "w", "w", "=r", 2);
13293 if (likely(!ret))
13294 __put_user_asm(tmp, (u16 __user *)dst,
13295 @@ -186,7 +275,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13296
13297 case 4: {
13298 u32 tmp;
13299 - __get_user_asm(tmp, (u32 __user *)src,
13300 + __get_user_asm(tmp, (const u32 __user *)src,
13301 ret, "l", "k", "=r", 4);
13302 if (likely(!ret))
13303 __put_user_asm(tmp, (u32 __user *)dst,
13304 @@ -195,7 +284,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13305 }
13306 case 8: {
13307 u64 tmp;
13308 - __get_user_asm(tmp, (u64 __user *)src,
13309 + __get_user_asm(tmp, (const u64 __user *)src,
13310 ret, "q", "", "=r", 8);
13311 if (likely(!ret))
13312 __put_user_asm(tmp, (u64 __user *)dst,
13313 @@ -203,47 +292,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13314 return ret;
13315 }
13316 default:
13317 - return copy_user_generic((__force void *)dst,
13318 - (__force void *)src, size);
13319 +
13320 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13321 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13322 + src += PAX_USER_SHADOW_BASE;
13323 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13324 + dst += PAX_USER_SHADOW_BASE;
13325 +#endif
13326 +
13327 + return copy_user_generic((__force_kernel void *)dst,
13328 + (__force_kernel const void *)src, size);
13329 }
13330 }
13331
13332 __must_check long strnlen_user(const char __user *str, long n);
13333 __must_check long __strnlen_user(const char __user *str, long n);
13334 __must_check long strlen_user(const char __user *str);
13335 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13336 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13337 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13338 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13339
13340 static __must_check __always_inline int
13341 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13342 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13343 {
13344 - return copy_user_generic(dst, (__force const void *)src, size);
13345 + if (size > INT_MAX)
13346 + return size;
13347 +
13348 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13349 + if (!__access_ok(VERIFY_READ, src, size))
13350 + return size;
13351 +
13352 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13353 + src += PAX_USER_SHADOW_BASE;
13354 +#endif
13355 +
13356 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13357 }
13358
13359 -static __must_check __always_inline int
13360 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13361 +static __must_check __always_inline unsigned long
13362 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13363 {
13364 - return copy_user_generic((__force void *)dst, src, size);
13365 + if (size > INT_MAX)
13366 + return size;
13367 +
13368 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13369 + if (!__access_ok(VERIFY_WRITE, dst, size))
13370 + return size;
13371 +
13372 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13373 + dst += PAX_USER_SHADOW_BASE;
13374 +#endif
13375 +
13376 + return copy_user_generic((__force_kernel void *)dst, src, size);
13377 }
13378
13379 -extern long __copy_user_nocache(void *dst, const void __user *src,
13380 - unsigned size, int zerorest);
13381 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13382 + unsigned long size, int zerorest) __size_overflow(3);
13383
13384 -static inline int
13385 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13386 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13387 {
13388 might_sleep();
13389 +
13390 + if (size > INT_MAX)
13391 + return size;
13392 +
13393 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13394 + if (!__access_ok(VERIFY_READ, src, size))
13395 + return size;
13396 +#endif
13397 +
13398 return __copy_user_nocache(dst, src, size, 1);
13399 }
13400
13401 -static inline int
13402 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13403 - unsigned size)
13404 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13405 + unsigned long size)
13406 {
13407 + if (size > INT_MAX)
13408 + return size;
13409 +
13410 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13411 + if (!__access_ok(VERIFY_READ, src, size))
13412 + return size;
13413 +#endif
13414 +
13415 return __copy_user_nocache(dst, src, size, 0);
13416 }
13417
13418 -unsigned long
13419 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13420 +extern unsigned long
13421 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13422
13423 #endif /* _ASM_X86_UACCESS_64_H */
13424 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13425 index bb05228..d763d5b 100644
13426 --- a/arch/x86/include/asm/vdso.h
13427 +++ b/arch/x86/include/asm/vdso.h
13428 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13429 #define VDSO32_SYMBOL(base, name) \
13430 ({ \
13431 extern const char VDSO32_##name[]; \
13432 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13433 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13434 })
13435 #endif
13436
13437 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13438 index 764b66a..ad3cfc8 100644
13439 --- a/arch/x86/include/asm/x86_init.h
13440 +++ b/arch/x86/include/asm/x86_init.h
13441 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13442 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13443 void (*find_smp_config)(void);
13444 void (*get_smp_config)(unsigned int early);
13445 -};
13446 +} __no_const;
13447
13448 /**
13449 * struct x86_init_resources - platform specific resource related ops
13450 @@ -43,7 +43,7 @@ struct x86_init_resources {
13451 void (*probe_roms)(void);
13452 void (*reserve_resources)(void);
13453 char *(*memory_setup)(void);
13454 -};
13455 +} __no_const;
13456
13457 /**
13458 * struct x86_init_irqs - platform specific interrupt setup
13459 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13460 void (*pre_vector_init)(void);
13461 void (*intr_init)(void);
13462 void (*trap_init)(void);
13463 -};
13464 +} __no_const;
13465
13466 /**
13467 * struct x86_init_oem - oem platform specific customizing functions
13468 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13469 struct x86_init_oem {
13470 void (*arch_setup)(void);
13471 void (*banner)(void);
13472 -};
13473 +} __no_const;
13474
13475 /**
13476 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13477 @@ -77,7 +77,7 @@ struct x86_init_oem {
13478 */
13479 struct x86_init_mapping {
13480 void (*pagetable_reserve)(u64 start, u64 end);
13481 -};
13482 +} __no_const;
13483
13484 /**
13485 * struct x86_init_paging - platform specific paging functions
13486 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13487 struct x86_init_paging {
13488 void (*pagetable_setup_start)(pgd_t *base);
13489 void (*pagetable_setup_done)(pgd_t *base);
13490 -};
13491 +} __no_const;
13492
13493 /**
13494 * struct x86_init_timers - platform specific timer setup
13495 @@ -102,7 +102,7 @@ struct x86_init_timers {
13496 void (*tsc_pre_init)(void);
13497 void (*timer_init)(void);
13498 void (*wallclock_init)(void);
13499 -};
13500 +} __no_const;
13501
13502 /**
13503 * struct x86_init_iommu - platform specific iommu setup
13504 @@ -110,7 +110,7 @@ struct x86_init_timers {
13505 */
13506 struct x86_init_iommu {
13507 int (*iommu_init)(void);
13508 -};
13509 +} __no_const;
13510
13511 /**
13512 * struct x86_init_pci - platform specific pci init functions
13513 @@ -124,7 +124,7 @@ struct x86_init_pci {
13514 int (*init)(void);
13515 void (*init_irq)(void);
13516 void (*fixup_irqs)(void);
13517 -};
13518 +} __no_const;
13519
13520 /**
13521 * struct x86_init_ops - functions for platform specific setup
13522 @@ -140,7 +140,7 @@ struct x86_init_ops {
13523 struct x86_init_timers timers;
13524 struct x86_init_iommu iommu;
13525 struct x86_init_pci pci;
13526 -};
13527 +} __no_const;
13528
13529 /**
13530 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13531 @@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13532 void (*setup_percpu_clockev)(void);
13533 void (*early_percpu_clock_init)(void);
13534 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13535 -};
13536 +} __no_const;
13537
13538 /**
13539 * struct x86_platform_ops - platform specific runtime functions
13540 @@ -177,7 +177,7 @@ struct x86_platform_ops {
13541 int (*i8042_detect)(void);
13542 void (*save_sched_clock_state)(void);
13543 void (*restore_sched_clock_state)(void);
13544 -};
13545 +} __no_const;
13546
13547 struct pci_dev;
13548
13549 @@ -186,7 +186,7 @@ struct x86_msi_ops {
13550 void (*teardown_msi_irq)(unsigned int irq);
13551 void (*teardown_msi_irqs)(struct pci_dev *dev);
13552 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13553 -};
13554 +} __no_const;
13555
13556 extern struct x86_init_ops x86_init;
13557 extern struct x86_cpuinit_ops x86_cpuinit;
13558 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13559 index c6ce245..ffbdab7 100644
13560 --- a/arch/x86/include/asm/xsave.h
13561 +++ b/arch/x86/include/asm/xsave.h
13562 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13563 {
13564 int err;
13565
13566 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13567 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13568 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13569 +#endif
13570 +
13571 /*
13572 * Clear the xsave header first, so that reserved fields are
13573 * initialized to zero.
13574 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13575 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13576 {
13577 int err;
13578 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13579 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13580 u32 lmask = mask;
13581 u32 hmask = mask >> 32;
13582
13583 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13584 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13585 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13586 +#endif
13587 +
13588 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13589 "2:\n"
13590 ".section .fixup,\"ax\"\n"
13591 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13592 index 6a564ac..9b1340c 100644
13593 --- a/arch/x86/kernel/acpi/realmode/Makefile
13594 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13595 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13596 $(call cc-option, -fno-stack-protector) \
13597 $(call cc-option, -mpreferred-stack-boundary=2)
13598 KBUILD_CFLAGS += $(call cc-option, -m32)
13599 +ifdef CONSTIFY_PLUGIN
13600 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13601 +endif
13602 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13603 GCOV_PROFILE := n
13604
13605 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13606 index b4fd836..4358fe3 100644
13607 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13608 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13609 @@ -108,6 +108,9 @@ wakeup_code:
13610 /* Do any other stuff... */
13611
13612 #ifndef CONFIG_64BIT
13613 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13614 + call verify_cpu
13615 +
13616 /* This could also be done in C code... */
13617 movl pmode_cr3, %eax
13618 movl %eax, %cr3
13619 @@ -131,6 +134,7 @@ wakeup_code:
13620 movl pmode_cr0, %eax
13621 movl %eax, %cr0
13622 jmp pmode_return
13623 +# include "../../verify_cpu.S"
13624 #else
13625 pushw $0
13626 pushw trampoline_segment
13627 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13628 index 146a49c..1b5338b 100644
13629 --- a/arch/x86/kernel/acpi/sleep.c
13630 +++ b/arch/x86/kernel/acpi/sleep.c
13631 @@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
13632 header->trampoline_segment = trampoline_address() >> 4;
13633 #ifdef CONFIG_SMP
13634 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13635 +
13636 + pax_open_kernel();
13637 early_gdt_descr.address =
13638 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13639 + pax_close_kernel();
13640 +
13641 initial_gs = per_cpu_offset(smp_processor_id());
13642 #endif
13643 initial_code = (unsigned long)wakeup_long64;
13644 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13645 index 7261083..5c12053 100644
13646 --- a/arch/x86/kernel/acpi/wakeup_32.S
13647 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13648 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13649 # and restore the stack ... but you need gdt for this to work
13650 movl saved_context_esp, %esp
13651
13652 - movl %cs:saved_magic, %eax
13653 - cmpl $0x12345678, %eax
13654 + cmpl $0x12345678, saved_magic
13655 jne bogus_magic
13656
13657 # jump to place where we left off
13658 - movl saved_eip, %eax
13659 - jmp *%eax
13660 + jmp *(saved_eip)
13661
13662 bogus_magic:
13663 jmp bogus_magic
13664 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13665 index 1f84794..e23f862 100644
13666 --- a/arch/x86/kernel/alternative.c
13667 +++ b/arch/x86/kernel/alternative.c
13668 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13669 */
13670 for (a = start; a < end; a++) {
13671 instr = (u8 *)&a->instr_offset + a->instr_offset;
13672 +
13673 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13674 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13675 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13676 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13677 +#endif
13678 +
13679 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13680 BUG_ON(a->replacementlen > a->instrlen);
13681 BUG_ON(a->instrlen > sizeof(insnbuf));
13682 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13683 for (poff = start; poff < end; poff++) {
13684 u8 *ptr = (u8 *)poff + *poff;
13685
13686 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13687 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13688 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13689 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13690 +#endif
13691 +
13692 if (!*poff || ptr < text || ptr >= text_end)
13693 continue;
13694 /* turn DS segment override prefix into lock prefix */
13695 - if (*ptr == 0x3e)
13696 + if (*ktla_ktva(ptr) == 0x3e)
13697 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13698 };
13699 mutex_unlock(&text_mutex);
13700 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13701 for (poff = start; poff < end; poff++) {
13702 u8 *ptr = (u8 *)poff + *poff;
13703
13704 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13705 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13706 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13707 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13708 +#endif
13709 +
13710 if (!*poff || ptr < text || ptr >= text_end)
13711 continue;
13712 /* turn lock prefix into DS segment override prefix */
13713 - if (*ptr == 0xf0)
13714 + if (*ktla_ktva(ptr) == 0xf0)
13715 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13716 };
13717 mutex_unlock(&text_mutex);
13718 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13719
13720 BUG_ON(p->len > MAX_PATCH_LEN);
13721 /* prep the buffer with the original instructions */
13722 - memcpy(insnbuf, p->instr, p->len);
13723 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13724 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13725 (unsigned long)p->instr, p->len);
13726
13727 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13728 if (smp_alt_once)
13729 free_init_pages("SMP alternatives",
13730 (unsigned long)__smp_locks,
13731 - (unsigned long)__smp_locks_end);
13732 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13733
13734 restart_nmi();
13735 }
13736 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13737 * instructions. And on the local CPU you need to be protected again NMI or MCE
13738 * handlers seeing an inconsistent instruction while you patch.
13739 */
13740 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13741 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13742 size_t len)
13743 {
13744 unsigned long flags;
13745 local_irq_save(flags);
13746 - memcpy(addr, opcode, len);
13747 +
13748 + pax_open_kernel();
13749 + memcpy(ktla_ktva(addr), opcode, len);
13750 sync_core();
13751 + pax_close_kernel();
13752 +
13753 local_irq_restore(flags);
13754 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13755 that causes hangs on some VIA CPUs. */
13756 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13757 */
13758 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13759 {
13760 - unsigned long flags;
13761 - char *vaddr;
13762 + unsigned char *vaddr = ktla_ktva(addr);
13763 struct page *pages[2];
13764 - int i;
13765 + size_t i;
13766
13767 if (!core_kernel_text((unsigned long)addr)) {
13768 - pages[0] = vmalloc_to_page(addr);
13769 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13770 + pages[0] = vmalloc_to_page(vaddr);
13771 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13772 } else {
13773 - pages[0] = virt_to_page(addr);
13774 + pages[0] = virt_to_page(vaddr);
13775 WARN_ON(!PageReserved(pages[0]));
13776 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13777 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13778 }
13779 BUG_ON(!pages[0]);
13780 - local_irq_save(flags);
13781 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13782 - if (pages[1])
13783 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13784 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13785 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13786 - clear_fixmap(FIX_TEXT_POKE0);
13787 - if (pages[1])
13788 - clear_fixmap(FIX_TEXT_POKE1);
13789 - local_flush_tlb();
13790 - sync_core();
13791 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13792 - that causes hangs on some VIA CPUs. */
13793 + text_poke_early(addr, opcode, len);
13794 for (i = 0; i < len; i++)
13795 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13796 - local_irq_restore(flags);
13797 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13798 return addr;
13799 }
13800
13801 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13802 index edc2448..553e7c5 100644
13803 --- a/arch/x86/kernel/apic/apic.c
13804 +++ b/arch/x86/kernel/apic/apic.c
13805 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13806 /*
13807 * Debug level, exported for io_apic.c
13808 */
13809 -unsigned int apic_verbosity;
13810 +int apic_verbosity;
13811
13812 int pic_mode;
13813
13814 @@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13815 apic_write(APIC_ESR, 0);
13816 v1 = apic_read(APIC_ESR);
13817 ack_APIC_irq();
13818 - atomic_inc(&irq_err_count);
13819 + atomic_inc_unchecked(&irq_err_count);
13820
13821 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13822 smp_processor_id(), v0 , v1);
13823 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13824 index e88300d..cd5a87a 100644
13825 --- a/arch/x86/kernel/apic/io_apic.c
13826 +++ b/arch/x86/kernel/apic/io_apic.c
13827 @@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
13828
13829 void __init set_io_apic_ops(const struct io_apic_ops *ops)
13830 {
13831 - io_apic_ops = *ops;
13832 + pax_open_kernel();
13833 + memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
13834 + pax_close_kernel();
13835 }
13836
13837 /*
13838 @@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13839 }
13840 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13841
13842 -void lock_vector_lock(void)
13843 +void lock_vector_lock(void) __acquires(vector_lock)
13844 {
13845 /* Used to the online set of cpus does not change
13846 * during assign_irq_vector.
13847 @@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
13848 raw_spin_lock(&vector_lock);
13849 }
13850
13851 -void unlock_vector_lock(void)
13852 +void unlock_vector_lock(void) __releases(vector_lock)
13853 {
13854 raw_spin_unlock(&vector_lock);
13855 }
13856 @@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
13857 ack_APIC_irq();
13858 }
13859
13860 -atomic_t irq_mis_count;
13861 +atomic_unchecked_t irq_mis_count;
13862
13863 #ifdef CONFIG_GENERIC_PENDING_IRQ
13864 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
13865 @@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
13866 * at the cpu.
13867 */
13868 if (!(v & (1 << (i & 0x1f)))) {
13869 - atomic_inc(&irq_mis_count);
13870 + atomic_inc_unchecked(&irq_mis_count);
13871
13872 eoi_ioapic_irq(irq, cfg);
13873 }
13874 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13875 index 459e78c..f037006 100644
13876 --- a/arch/x86/kernel/apm_32.c
13877 +++ b/arch/x86/kernel/apm_32.c
13878 @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13879 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13880 * even though they are called in protected mode.
13881 */
13882 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13883 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13884 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13885
13886 static const char driver_version[] = "1.16ac"; /* no spaces */
13887 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13888 BUG_ON(cpu != 0);
13889 gdt = get_cpu_gdt_table(cpu);
13890 save_desc_40 = gdt[0x40 / 8];
13891 +
13892 + pax_open_kernel();
13893 gdt[0x40 / 8] = bad_bios_desc;
13894 + pax_close_kernel();
13895
13896 apm_irq_save(flags);
13897 APM_DO_SAVE_SEGS;
13898 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13899 &call->esi);
13900 APM_DO_RESTORE_SEGS;
13901 apm_irq_restore(flags);
13902 +
13903 + pax_open_kernel();
13904 gdt[0x40 / 8] = save_desc_40;
13905 + pax_close_kernel();
13906 +
13907 put_cpu();
13908
13909 return call->eax & 0xff;
13910 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13911 BUG_ON(cpu != 0);
13912 gdt = get_cpu_gdt_table(cpu);
13913 save_desc_40 = gdt[0x40 / 8];
13914 +
13915 + pax_open_kernel();
13916 gdt[0x40 / 8] = bad_bios_desc;
13917 + pax_close_kernel();
13918
13919 apm_irq_save(flags);
13920 APM_DO_SAVE_SEGS;
13921 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13922 &call->eax);
13923 APM_DO_RESTORE_SEGS;
13924 apm_irq_restore(flags);
13925 +
13926 + pax_open_kernel();
13927 gdt[0x40 / 8] = save_desc_40;
13928 + pax_close_kernel();
13929 +
13930 put_cpu();
13931 return error;
13932 }
13933 @@ -2345,12 +2359,15 @@ static int __init apm_init(void)
13934 * code to that CPU.
13935 */
13936 gdt = get_cpu_gdt_table(0);
13937 +
13938 + pax_open_kernel();
13939 set_desc_base(&gdt[APM_CS >> 3],
13940 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13941 set_desc_base(&gdt[APM_CS_16 >> 3],
13942 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13943 set_desc_base(&gdt[APM_DS >> 3],
13944 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13945 + pax_close_kernel();
13946
13947 proc_create("apm", 0, NULL, &apm_file_ops);
13948
13949 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13950 index 68de2dc..1f3c720 100644
13951 --- a/arch/x86/kernel/asm-offsets.c
13952 +++ b/arch/x86/kernel/asm-offsets.c
13953 @@ -33,6 +33,8 @@ void common(void) {
13954 OFFSET(TI_status, thread_info, status);
13955 OFFSET(TI_addr_limit, thread_info, addr_limit);
13956 OFFSET(TI_preempt_count, thread_info, preempt_count);
13957 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13958 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13959
13960 BLANK();
13961 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13962 @@ -53,8 +55,26 @@ void common(void) {
13963 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13964 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13965 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13966 +
13967 +#ifdef CONFIG_PAX_KERNEXEC
13968 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13969 #endif
13970
13971 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13972 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13973 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13974 +#ifdef CONFIG_X86_64
13975 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13976 +#endif
13977 +#endif
13978 +
13979 +#endif
13980 +
13981 + BLANK();
13982 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13983 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13984 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13985 +
13986 #ifdef CONFIG_XEN
13987 BLANK();
13988 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13989 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13990 index 1b4754f..fbb4227 100644
13991 --- a/arch/x86/kernel/asm-offsets_64.c
13992 +++ b/arch/x86/kernel/asm-offsets_64.c
13993 @@ -76,6 +76,7 @@ int main(void)
13994 BLANK();
13995 #undef ENTRY
13996
13997 + DEFINE(TSS_size, sizeof(struct tss_struct));
13998 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13999 BLANK();
14000
14001 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14002 index 6ab6aa2..8f71507 100644
14003 --- a/arch/x86/kernel/cpu/Makefile
14004 +++ b/arch/x86/kernel/cpu/Makefile
14005 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14006 CFLAGS_REMOVE_perf_event.o = -pg
14007 endif
14008
14009 -# Make sure load_percpu_segment has no stackprotector
14010 -nostackp := $(call cc-option, -fno-stack-protector)
14011 -CFLAGS_common.o := $(nostackp)
14012 -
14013 obj-y := intel_cacheinfo.o scattered.o topology.o
14014 obj-y += proc.o capflags.o powerflags.o common.o
14015 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14016 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14017 index 146bb62..ac9c74a 100644
14018 --- a/arch/x86/kernel/cpu/amd.c
14019 +++ b/arch/x86/kernel/cpu/amd.c
14020 @@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14021 unsigned int size)
14022 {
14023 /* AMD errata T13 (order #21922) */
14024 - if ((c->x86 == 6)) {
14025 + if (c->x86 == 6) {
14026 /* Duron Rev A0 */
14027 if (c->x86_model == 3 && c->x86_mask == 0)
14028 size = 64;
14029 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14030 index cf79302..b1b28ae 100644
14031 --- a/arch/x86/kernel/cpu/common.c
14032 +++ b/arch/x86/kernel/cpu/common.c
14033 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14034
14035 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14036
14037 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14038 -#ifdef CONFIG_X86_64
14039 - /*
14040 - * We need valid kernel segments for data and code in long mode too
14041 - * IRET will check the segment types kkeil 2000/10/28
14042 - * Also sysret mandates a special GDT layout
14043 - *
14044 - * TLS descriptors are currently at a different place compared to i386.
14045 - * Hopefully nobody expects them at a fixed place (Wine?)
14046 - */
14047 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14048 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14049 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14050 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14051 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14052 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14053 -#else
14054 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14055 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14056 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14057 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14058 - /*
14059 - * Segments used for calling PnP BIOS have byte granularity.
14060 - * They code segments and data segments have fixed 64k limits,
14061 - * the transfer segment sizes are set at run time.
14062 - */
14063 - /* 32-bit code */
14064 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14065 - /* 16-bit code */
14066 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14067 - /* 16-bit data */
14068 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14069 - /* 16-bit data */
14070 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14071 - /* 16-bit data */
14072 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14073 - /*
14074 - * The APM segments have byte granularity and their bases
14075 - * are set at run time. All have 64k limits.
14076 - */
14077 - /* 32-bit code */
14078 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14079 - /* 16-bit code */
14080 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14081 - /* data */
14082 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14083 -
14084 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14085 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14086 - GDT_STACK_CANARY_INIT
14087 -#endif
14088 -} };
14089 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14090 -
14091 static int __init x86_xsave_setup(char *s)
14092 {
14093 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14094 @@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14095 {
14096 struct desc_ptr gdt_descr;
14097
14098 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14099 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14100 gdt_descr.size = GDT_SIZE - 1;
14101 load_gdt(&gdt_descr);
14102 /* Reload the per-cpu base */
14103 @@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14104 /* Filter out anything that depends on CPUID levels we don't have */
14105 filter_cpuid_features(c, true);
14106
14107 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14108 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14109 +#endif
14110 +
14111 /* If the model name is still unset, do table lookup. */
14112 if (!c->x86_model_id[0]) {
14113 const char *p;
14114 @@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14115 }
14116 __setup("clearcpuid=", setup_disablecpuid);
14117
14118 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14119 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14120 +
14121 #ifdef CONFIG_X86_64
14122 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14123 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14124 - (unsigned long) nmi_idt_table };
14125 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14126
14127 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14128 irq_stack_union) __aligned(PAGE_SIZE);
14129 @@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14130 EXPORT_PER_CPU_SYMBOL(current_task);
14131
14132 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14133 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14134 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14135 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14136
14137 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14138 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14139 {
14140 memset(regs, 0, sizeof(struct pt_regs));
14141 regs->fs = __KERNEL_PERCPU;
14142 - regs->gs = __KERNEL_STACK_CANARY;
14143 + savesegment(gs, regs->gs);
14144
14145 return regs;
14146 }
14147 @@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14148 int i;
14149
14150 cpu = stack_smp_processor_id();
14151 - t = &per_cpu(init_tss, cpu);
14152 + t = init_tss + cpu;
14153 oist = &per_cpu(orig_ist, cpu);
14154
14155 #ifdef CONFIG_NUMA
14156 @@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14157 switch_to_new_gdt(cpu);
14158 loadsegment(fs, 0);
14159
14160 - load_idt((const struct desc_ptr *)&idt_descr);
14161 + load_idt(&idt_descr);
14162
14163 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14164 syscall_init();
14165 @@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14166 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14167 barrier();
14168
14169 - x86_configure_nx();
14170 if (cpu != 0)
14171 enable_x2apic();
14172
14173 @@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14174 {
14175 int cpu = smp_processor_id();
14176 struct task_struct *curr = current;
14177 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14178 + struct tss_struct *t = init_tss + cpu;
14179 struct thread_struct *thread = &curr->thread;
14180
14181 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14182 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14183 index 3e6ff6c..54b4992 100644
14184 --- a/arch/x86/kernel/cpu/intel.c
14185 +++ b/arch/x86/kernel/cpu/intel.c
14186 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14187 * Update the IDT descriptor and reload the IDT so that
14188 * it uses the read-only mapped virtual address.
14189 */
14190 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14191 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14192 load_idt(&idt_descr);
14193 }
14194 #endif
14195 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14196 index 61604ae..98250a5 100644
14197 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14198 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14199 @@ -42,6 +42,7 @@
14200 #include <asm/processor.h>
14201 #include <asm/mce.h>
14202 #include <asm/msr.h>
14203 +#include <asm/local.h>
14204
14205 #include "mce-internal.h"
14206
14207 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14208 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14209 m->cs, m->ip);
14210
14211 - if (m->cs == __KERNEL_CS)
14212 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14213 print_symbol("{%s}", m->ip);
14214 pr_cont("\n");
14215 }
14216 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14217
14218 #define PANIC_TIMEOUT 5 /* 5 seconds */
14219
14220 -static atomic_t mce_paniced;
14221 +static atomic_unchecked_t mce_paniced;
14222
14223 static int fake_panic;
14224 -static atomic_t mce_fake_paniced;
14225 +static atomic_unchecked_t mce_fake_paniced;
14226
14227 /* Panic in progress. Enable interrupts and wait for final IPI */
14228 static void wait_for_panic(void)
14229 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14230 /*
14231 * Make sure only one CPU runs in machine check panic
14232 */
14233 - if (atomic_inc_return(&mce_paniced) > 1)
14234 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14235 wait_for_panic();
14236 barrier();
14237
14238 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14239 console_verbose();
14240 } else {
14241 /* Don't log too much for fake panic */
14242 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14243 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14244 return;
14245 }
14246 /* First print corrected ones that are still unlogged */
14247 @@ -684,7 +685,7 @@ static int mce_timed_out(u64 *t)
14248 * might have been modified by someone else.
14249 */
14250 rmb();
14251 - if (atomic_read(&mce_paniced))
14252 + if (atomic_read_unchecked(&mce_paniced))
14253 wait_for_panic();
14254 if (!monarch_timeout)
14255 goto out;
14256 @@ -1535,7 +1536,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14257 }
14258
14259 /* Call the installed machine check handler for this CPU setup. */
14260 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14261 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14262 unexpected_machine_check;
14263
14264 /*
14265 @@ -1558,7 +1559,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14266 return;
14267 }
14268
14269 + pax_open_kernel();
14270 machine_check_vector = do_machine_check;
14271 + pax_close_kernel();
14272
14273 __mcheck_cpu_init_generic();
14274 __mcheck_cpu_init_vendor(c);
14275 @@ -1572,7 +1575,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14276 */
14277
14278 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14279 -static int mce_chrdev_open_count; /* #times opened */
14280 +static local_t mce_chrdev_open_count; /* #times opened */
14281 static int mce_chrdev_open_exclu; /* already open exclusive? */
14282
14283 static int mce_chrdev_open(struct inode *inode, struct file *file)
14284 @@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14285 spin_lock(&mce_chrdev_state_lock);
14286
14287 if (mce_chrdev_open_exclu ||
14288 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14289 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14290 spin_unlock(&mce_chrdev_state_lock);
14291
14292 return -EBUSY;
14293 @@ -1588,7 +1591,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14294
14295 if (file->f_flags & O_EXCL)
14296 mce_chrdev_open_exclu = 1;
14297 - mce_chrdev_open_count++;
14298 + local_inc(&mce_chrdev_open_count);
14299
14300 spin_unlock(&mce_chrdev_state_lock);
14301
14302 @@ -1599,7 +1602,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14303 {
14304 spin_lock(&mce_chrdev_state_lock);
14305
14306 - mce_chrdev_open_count--;
14307 + local_dec(&mce_chrdev_open_count);
14308 mce_chrdev_open_exclu = 0;
14309
14310 spin_unlock(&mce_chrdev_state_lock);
14311 @@ -2324,7 +2327,7 @@ struct dentry *mce_get_debugfs_dir(void)
14312 static void mce_reset(void)
14313 {
14314 cpu_missing = 0;
14315 - atomic_set(&mce_fake_paniced, 0);
14316 + atomic_set_unchecked(&mce_fake_paniced, 0);
14317 atomic_set(&mce_executing, 0);
14318 atomic_set(&mce_callin, 0);
14319 atomic_set(&global_nwo, 0);
14320 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14321 index 2d5454c..51987eb 100644
14322 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14323 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14324 @@ -11,6 +11,7 @@
14325 #include <asm/processor.h>
14326 #include <asm/mce.h>
14327 #include <asm/msr.h>
14328 +#include <asm/pgtable.h>
14329
14330 /* By default disabled */
14331 int mce_p5_enabled __read_mostly;
14332 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14333 if (!cpu_has(c, X86_FEATURE_MCE))
14334 return;
14335
14336 + pax_open_kernel();
14337 machine_check_vector = pentium_machine_check;
14338 + pax_close_kernel();
14339 /* Make sure the vector pointer is visible before we enable MCEs: */
14340 wmb();
14341
14342 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14343 index 2d7998f..17c9de1 100644
14344 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14345 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14346 @@ -10,6 +10,7 @@
14347 #include <asm/processor.h>
14348 #include <asm/mce.h>
14349 #include <asm/msr.h>
14350 +#include <asm/pgtable.h>
14351
14352 /* Machine check handler for WinChip C6: */
14353 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14354 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14355 {
14356 u32 lo, hi;
14357
14358 + pax_open_kernel();
14359 machine_check_vector = winchip_machine_check;
14360 + pax_close_kernel();
14361 /* Make sure the vector pointer is visible before we enable MCEs: */
14362 wmb();
14363
14364 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14365 index 6b96110..0da73eb 100644
14366 --- a/arch/x86/kernel/cpu/mtrr/main.c
14367 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14368 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14369 u64 size_or_mask, size_and_mask;
14370 static bool mtrr_aps_delayed_init;
14371
14372 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14373 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14374
14375 const struct mtrr_ops *mtrr_if;
14376
14377 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14378 index df5e41f..816c719 100644
14379 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14380 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14381 @@ -25,7 +25,7 @@ struct mtrr_ops {
14382 int (*validate_add_page)(unsigned long base, unsigned long size,
14383 unsigned int type);
14384 int (*have_wrcomb)(void);
14385 -};
14386 +} __do_const;
14387
14388 extern int generic_get_free_region(unsigned long base, unsigned long size,
14389 int replace_reg);
14390 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14391 index bb8e034..fb9020b 100644
14392 --- a/arch/x86/kernel/cpu/perf_event.c
14393 +++ b/arch/x86/kernel/cpu/perf_event.c
14394 @@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14395 break;
14396
14397 perf_callchain_store(entry, frame.return_address);
14398 - fp = frame.next_frame;
14399 + fp = (const void __force_user *)frame.next_frame;
14400 }
14401 }
14402
14403 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14404 index 13ad899..f642b9a 100644
14405 --- a/arch/x86/kernel/crash.c
14406 +++ b/arch/x86/kernel/crash.c
14407 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14408 {
14409 #ifdef CONFIG_X86_32
14410 struct pt_regs fixed_regs;
14411 -#endif
14412
14413 -#ifdef CONFIG_X86_32
14414 - if (!user_mode_vm(regs)) {
14415 + if (!user_mode(regs)) {
14416 crash_fixup_ss_esp(&fixed_regs, regs);
14417 regs = &fixed_regs;
14418 }
14419 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14420 index 37250fe..bf2ec74 100644
14421 --- a/arch/x86/kernel/doublefault_32.c
14422 +++ b/arch/x86/kernel/doublefault_32.c
14423 @@ -11,7 +11,7 @@
14424
14425 #define DOUBLEFAULT_STACKSIZE (1024)
14426 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14427 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14428 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14429
14430 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14431
14432 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14433 unsigned long gdt, tss;
14434
14435 store_gdt(&gdt_desc);
14436 - gdt = gdt_desc.address;
14437 + gdt = (unsigned long)gdt_desc.address;
14438
14439 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14440
14441 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14442 /* 0x2 bit is always set */
14443 .flags = X86_EFLAGS_SF | 0x2,
14444 .sp = STACK_START,
14445 - .es = __USER_DS,
14446 + .es = __KERNEL_DS,
14447 .cs = __KERNEL_CS,
14448 .ss = __KERNEL_DS,
14449 - .ds = __USER_DS,
14450 + .ds = __KERNEL_DS,
14451 .fs = __KERNEL_PERCPU,
14452
14453 .__cr3 = __pa_nodebug(swapper_pg_dir),
14454 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14455 index 1b81839..0b4e7b0 100644
14456 --- a/arch/x86/kernel/dumpstack.c
14457 +++ b/arch/x86/kernel/dumpstack.c
14458 @@ -2,6 +2,9 @@
14459 * Copyright (C) 1991, 1992 Linus Torvalds
14460 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14461 */
14462 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14463 +#define __INCLUDED_BY_HIDESYM 1
14464 +#endif
14465 #include <linux/kallsyms.h>
14466 #include <linux/kprobes.h>
14467 #include <linux/uaccess.h>
14468 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14469 static void
14470 print_ftrace_graph_addr(unsigned long addr, void *data,
14471 const struct stacktrace_ops *ops,
14472 - struct thread_info *tinfo, int *graph)
14473 + struct task_struct *task, int *graph)
14474 {
14475 - struct task_struct *task;
14476 unsigned long ret_addr;
14477 int index;
14478
14479 if (addr != (unsigned long)return_to_handler)
14480 return;
14481
14482 - task = tinfo->task;
14483 index = task->curr_ret_stack;
14484
14485 if (!task->ret_stack || index < *graph)
14486 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14487 static inline void
14488 print_ftrace_graph_addr(unsigned long addr, void *data,
14489 const struct stacktrace_ops *ops,
14490 - struct thread_info *tinfo, int *graph)
14491 + struct task_struct *task, int *graph)
14492 { }
14493 #endif
14494
14495 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14496 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14497 */
14498
14499 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14500 - void *p, unsigned int size, void *end)
14501 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14502 {
14503 - void *t = tinfo;
14504 if (end) {
14505 if (p < end && p >= (end-THREAD_SIZE))
14506 return 1;
14507 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14508 }
14509
14510 unsigned long
14511 -print_context_stack(struct thread_info *tinfo,
14512 +print_context_stack(struct task_struct *task, void *stack_start,
14513 unsigned long *stack, unsigned long bp,
14514 const struct stacktrace_ops *ops, void *data,
14515 unsigned long *end, int *graph)
14516 {
14517 struct stack_frame *frame = (struct stack_frame *)bp;
14518
14519 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14520 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14521 unsigned long addr;
14522
14523 addr = *stack;
14524 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14525 } else {
14526 ops->address(data, addr, 0);
14527 }
14528 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14529 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14530 }
14531 stack++;
14532 }
14533 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14534 EXPORT_SYMBOL_GPL(print_context_stack);
14535
14536 unsigned long
14537 -print_context_stack_bp(struct thread_info *tinfo,
14538 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14539 unsigned long *stack, unsigned long bp,
14540 const struct stacktrace_ops *ops, void *data,
14541 unsigned long *end, int *graph)
14542 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14543 struct stack_frame *frame = (struct stack_frame *)bp;
14544 unsigned long *ret_addr = &frame->return_address;
14545
14546 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14547 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14548 unsigned long addr = *ret_addr;
14549
14550 if (!__kernel_text_address(addr))
14551 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14552 ops->address(data, addr, 1);
14553 frame = frame->next_frame;
14554 ret_addr = &frame->return_address;
14555 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14556 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14557 }
14558
14559 return (unsigned long)frame;
14560 @@ -189,7 +188,7 @@ void dump_stack(void)
14561
14562 bp = stack_frame(current, NULL);
14563 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14564 - current->pid, current->comm, print_tainted(),
14565 + task_pid_nr(current), current->comm, print_tainted(),
14566 init_utsname()->release,
14567 (int)strcspn(init_utsname()->version, " "),
14568 init_utsname()->version);
14569 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14570 }
14571 EXPORT_SYMBOL_GPL(oops_begin);
14572
14573 +extern void gr_handle_kernel_exploit(void);
14574 +
14575 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14576 {
14577 if (regs && kexec_should_crash(current))
14578 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14579 panic("Fatal exception in interrupt");
14580 if (panic_on_oops)
14581 panic("Fatal exception");
14582 - do_exit(signr);
14583 +
14584 + gr_handle_kernel_exploit();
14585 +
14586 + do_group_exit(signr);
14587 }
14588
14589 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14590 @@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14591
14592 show_registers(regs);
14593 #ifdef CONFIG_X86_32
14594 - if (user_mode_vm(regs)) {
14595 + if (user_mode(regs)) {
14596 sp = regs->sp;
14597 ss = regs->ss & 0xffff;
14598 } else {
14599 @@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14600 unsigned long flags = oops_begin();
14601 int sig = SIGSEGV;
14602
14603 - if (!user_mode_vm(regs))
14604 + if (!user_mode(regs))
14605 report_bug(regs->ip, regs);
14606
14607 if (__die(str, regs, err))
14608 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14609 index 88ec912..e95e935 100644
14610 --- a/arch/x86/kernel/dumpstack_32.c
14611 +++ b/arch/x86/kernel/dumpstack_32.c
14612 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14613 bp = stack_frame(task, regs);
14614
14615 for (;;) {
14616 - struct thread_info *context;
14617 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14618
14619 - context = (struct thread_info *)
14620 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14621 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14622 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14623
14624 - stack = (unsigned long *)context->previous_esp;
14625 - if (!stack)
14626 + if (stack_start == task_stack_page(task))
14627 break;
14628 + stack = *(unsigned long **)stack_start;
14629 if (ops->stack(data, "IRQ") < 0)
14630 break;
14631 touch_nmi_watchdog();
14632 @@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
14633 int i;
14634
14635 print_modules();
14636 - __show_regs(regs, !user_mode_vm(regs));
14637 + __show_regs(regs, !user_mode(regs));
14638
14639 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14640 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14641 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14642 * When in-kernel, we also print out the stack and code at the
14643 * time of the fault..
14644 */
14645 - if (!user_mode_vm(regs)) {
14646 + if (!user_mode(regs)) {
14647 unsigned int code_prologue = code_bytes * 43 / 64;
14648 unsigned int code_len = code_bytes;
14649 unsigned char c;
14650 u8 *ip;
14651 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14652
14653 printk(KERN_EMERG "Stack:\n");
14654 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14655
14656 printk(KERN_EMERG "Code: ");
14657
14658 - ip = (u8 *)regs->ip - code_prologue;
14659 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14660 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14661 /* try starting at IP */
14662 - ip = (u8 *)regs->ip;
14663 + ip = (u8 *)regs->ip + cs_base;
14664 code_len = code_len - code_prologue + 1;
14665 }
14666 for (i = 0; i < code_len; i++, ip++) {
14667 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14668 printk(KERN_CONT " Bad EIP value.");
14669 break;
14670 }
14671 - if (ip == (u8 *)regs->ip)
14672 + if (ip == (u8 *)regs->ip + cs_base)
14673 printk(KERN_CONT "<%02x> ", c);
14674 else
14675 printk(KERN_CONT "%02x ", c);
14676 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14677 {
14678 unsigned short ud2;
14679
14680 + ip = ktla_ktva(ip);
14681 if (ip < PAGE_OFFSET)
14682 return 0;
14683 if (probe_kernel_address((unsigned short *)ip, ud2))
14684 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14685
14686 return ud2 == 0x0b0f;
14687 }
14688 +
14689 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14690 +void pax_check_alloca(unsigned long size)
14691 +{
14692 + unsigned long sp = (unsigned long)&sp, stack_left;
14693 +
14694 + /* all kernel stacks are of the same size */
14695 + stack_left = sp & (THREAD_SIZE - 1);
14696 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14697 +}
14698 +EXPORT_SYMBOL(pax_check_alloca);
14699 +#endif
14700 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14701 index 17107bd..9623722 100644
14702 --- a/arch/x86/kernel/dumpstack_64.c
14703 +++ b/arch/x86/kernel/dumpstack_64.c
14704 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14705 unsigned long *irq_stack_end =
14706 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14707 unsigned used = 0;
14708 - struct thread_info *tinfo;
14709 int graph = 0;
14710 unsigned long dummy;
14711 + void *stack_start;
14712
14713 if (!task)
14714 task = current;
14715 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14716 * current stack address. If the stacks consist of nested
14717 * exceptions
14718 */
14719 - tinfo = task_thread_info(task);
14720 for (;;) {
14721 char *id;
14722 unsigned long *estack_end;
14723 +
14724 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14725 &used, &id);
14726
14727 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14728 if (ops->stack(data, id) < 0)
14729 break;
14730
14731 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14732 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14733 data, estack_end, &graph);
14734 ops->stack(data, "<EOE>");
14735 /*
14736 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14737 * second-to-last pointer (index -2 to end) in the
14738 * exception stack:
14739 */
14740 + if ((u16)estack_end[-1] != __KERNEL_DS)
14741 + goto out;
14742 stack = (unsigned long *) estack_end[-2];
14743 continue;
14744 }
14745 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14746 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14747 if (ops->stack(data, "IRQ") < 0)
14748 break;
14749 - bp = ops->walk_stack(tinfo, stack, bp,
14750 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14751 ops, data, irq_stack_end, &graph);
14752 /*
14753 * We link to the next stack (which would be
14754 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14755 /*
14756 * This handles the process stack:
14757 */
14758 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14759 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14760 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14761 +out:
14762 put_cpu();
14763 }
14764 EXPORT_SYMBOL(dump_trace);
14765 @@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14766
14767 return ud2 == 0x0b0f;
14768 }
14769 +
14770 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14771 +void pax_check_alloca(unsigned long size)
14772 +{
14773 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14774 + unsigned cpu, used;
14775 + char *id;
14776 +
14777 + /* check the process stack first */
14778 + stack_start = (unsigned long)task_stack_page(current);
14779 + stack_end = stack_start + THREAD_SIZE;
14780 + if (likely(stack_start <= sp && sp < stack_end)) {
14781 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14782 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14783 + return;
14784 + }
14785 +
14786 + cpu = get_cpu();
14787 +
14788 + /* check the irq stacks */
14789 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14790 + stack_start = stack_end - IRQ_STACK_SIZE;
14791 + if (stack_start <= sp && sp < stack_end) {
14792 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14793 + put_cpu();
14794 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14795 + return;
14796 + }
14797 +
14798 + /* check the exception stacks */
14799 + used = 0;
14800 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14801 + stack_start = stack_end - EXCEPTION_STKSZ;
14802 + if (stack_end && stack_start <= sp && sp < stack_end) {
14803 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14804 + put_cpu();
14805 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14806 + return;
14807 + }
14808 +
14809 + put_cpu();
14810 +
14811 + /* unknown stack */
14812 + BUG();
14813 +}
14814 +EXPORT_SYMBOL(pax_check_alloca);
14815 +#endif
14816 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14817 index 9b9f18b..9fcaa04 100644
14818 --- a/arch/x86/kernel/early_printk.c
14819 +++ b/arch/x86/kernel/early_printk.c
14820 @@ -7,6 +7,7 @@
14821 #include <linux/pci_regs.h>
14822 #include <linux/pci_ids.h>
14823 #include <linux/errno.h>
14824 +#include <linux/sched.h>
14825 #include <asm/io.h>
14826 #include <asm/processor.h>
14827 #include <asm/fcntl.h>
14828 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14829 index 7b784f4..db6b628 100644
14830 --- a/arch/x86/kernel/entry_32.S
14831 +++ b/arch/x86/kernel/entry_32.S
14832 @@ -179,13 +179,146 @@
14833 /*CFI_REL_OFFSET gs, PT_GS*/
14834 .endm
14835 .macro SET_KERNEL_GS reg
14836 +
14837 +#ifdef CONFIG_CC_STACKPROTECTOR
14838 movl $(__KERNEL_STACK_CANARY), \reg
14839 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14840 + movl $(__USER_DS), \reg
14841 +#else
14842 + xorl \reg, \reg
14843 +#endif
14844 +
14845 movl \reg, %gs
14846 .endm
14847
14848 #endif /* CONFIG_X86_32_LAZY_GS */
14849
14850 -.macro SAVE_ALL
14851 +.macro pax_enter_kernel
14852 +#ifdef CONFIG_PAX_KERNEXEC
14853 + call pax_enter_kernel
14854 +#endif
14855 +.endm
14856 +
14857 +.macro pax_exit_kernel
14858 +#ifdef CONFIG_PAX_KERNEXEC
14859 + call pax_exit_kernel
14860 +#endif
14861 +.endm
14862 +
14863 +#ifdef CONFIG_PAX_KERNEXEC
14864 +ENTRY(pax_enter_kernel)
14865 +#ifdef CONFIG_PARAVIRT
14866 + pushl %eax
14867 + pushl %ecx
14868 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14869 + mov %eax, %esi
14870 +#else
14871 + mov %cr0, %esi
14872 +#endif
14873 + bts $16, %esi
14874 + jnc 1f
14875 + mov %cs, %esi
14876 + cmp $__KERNEL_CS, %esi
14877 + jz 3f
14878 + ljmp $__KERNEL_CS, $3f
14879 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14880 +2:
14881 +#ifdef CONFIG_PARAVIRT
14882 + mov %esi, %eax
14883 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14884 +#else
14885 + mov %esi, %cr0
14886 +#endif
14887 +3:
14888 +#ifdef CONFIG_PARAVIRT
14889 + popl %ecx
14890 + popl %eax
14891 +#endif
14892 + ret
14893 +ENDPROC(pax_enter_kernel)
14894 +
14895 +ENTRY(pax_exit_kernel)
14896 +#ifdef CONFIG_PARAVIRT
14897 + pushl %eax
14898 + pushl %ecx
14899 +#endif
14900 + mov %cs, %esi
14901 + cmp $__KERNEXEC_KERNEL_CS, %esi
14902 + jnz 2f
14903 +#ifdef CONFIG_PARAVIRT
14904 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14905 + mov %eax, %esi
14906 +#else
14907 + mov %cr0, %esi
14908 +#endif
14909 + btr $16, %esi
14910 + ljmp $__KERNEL_CS, $1f
14911 +1:
14912 +#ifdef CONFIG_PARAVIRT
14913 + mov %esi, %eax
14914 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14915 +#else
14916 + mov %esi, %cr0
14917 +#endif
14918 +2:
14919 +#ifdef CONFIG_PARAVIRT
14920 + popl %ecx
14921 + popl %eax
14922 +#endif
14923 + ret
14924 +ENDPROC(pax_exit_kernel)
14925 +#endif
14926 +
14927 +.macro pax_erase_kstack
14928 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14929 + call pax_erase_kstack
14930 +#endif
14931 +.endm
14932 +
14933 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14934 +/*
14935 + * ebp: thread_info
14936 + * ecx, edx: can be clobbered
14937 + */
14938 +ENTRY(pax_erase_kstack)
14939 + pushl %edi
14940 + pushl %eax
14941 +
14942 + mov TI_lowest_stack(%ebp), %edi
14943 + mov $-0xBEEF, %eax
14944 + std
14945 +
14946 +1: mov %edi, %ecx
14947 + and $THREAD_SIZE_asm - 1, %ecx
14948 + shr $2, %ecx
14949 + repne scasl
14950 + jecxz 2f
14951 +
14952 + cmp $2*16, %ecx
14953 + jc 2f
14954 +
14955 + mov $2*16, %ecx
14956 + repe scasl
14957 + jecxz 2f
14958 + jne 1b
14959 +
14960 +2: cld
14961 + mov %esp, %ecx
14962 + sub %edi, %ecx
14963 + shr $2, %ecx
14964 + rep stosl
14965 +
14966 + mov TI_task_thread_sp0(%ebp), %edi
14967 + sub $128, %edi
14968 + mov %edi, TI_lowest_stack(%ebp)
14969 +
14970 + popl %eax
14971 + popl %edi
14972 + ret
14973 +ENDPROC(pax_erase_kstack)
14974 +#endif
14975 +
14976 +.macro __SAVE_ALL _DS
14977 cld
14978 PUSH_GS
14979 pushl_cfi %fs
14980 @@ -208,7 +341,7 @@
14981 CFI_REL_OFFSET ecx, 0
14982 pushl_cfi %ebx
14983 CFI_REL_OFFSET ebx, 0
14984 - movl $(__USER_DS), %edx
14985 + movl $\_DS, %edx
14986 movl %edx, %ds
14987 movl %edx, %es
14988 movl $(__KERNEL_PERCPU), %edx
14989 @@ -216,6 +349,15 @@
14990 SET_KERNEL_GS %edx
14991 .endm
14992
14993 +.macro SAVE_ALL
14994 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14995 + __SAVE_ALL __KERNEL_DS
14996 + pax_enter_kernel
14997 +#else
14998 + __SAVE_ALL __USER_DS
14999 +#endif
15000 +.endm
15001 +
15002 .macro RESTORE_INT_REGS
15003 popl_cfi %ebx
15004 CFI_RESTORE ebx
15005 @@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15006 popfl_cfi
15007 jmp syscall_exit
15008 CFI_ENDPROC
15009 -END(ret_from_fork)
15010 +ENDPROC(ret_from_fork)
15011
15012 /*
15013 * Interrupt exit functions should be protected against kprobes
15014 @@ -335,7 +477,15 @@ resume_userspace_sig:
15015 andl $SEGMENT_RPL_MASK, %eax
15016 #endif
15017 cmpl $USER_RPL, %eax
15018 +
15019 +#ifdef CONFIG_PAX_KERNEXEC
15020 + jae resume_userspace
15021 +
15022 + pax_exit_kernel
15023 + jmp resume_kernel
15024 +#else
15025 jb resume_kernel # not returning to v8086 or userspace
15026 +#endif
15027
15028 ENTRY(resume_userspace)
15029 LOCKDEP_SYS_EXIT
15030 @@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15031 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15032 # int/exception return?
15033 jne work_pending
15034 - jmp restore_all
15035 -END(ret_from_exception)
15036 + jmp restore_all_pax
15037 +ENDPROC(ret_from_exception)
15038
15039 #ifdef CONFIG_PREEMPT
15040 ENTRY(resume_kernel)
15041 @@ -363,7 +513,7 @@ need_resched:
15042 jz restore_all
15043 call preempt_schedule_irq
15044 jmp need_resched
15045 -END(resume_kernel)
15046 +ENDPROC(resume_kernel)
15047 #endif
15048 CFI_ENDPROC
15049 /*
15050 @@ -397,23 +547,34 @@ sysenter_past_esp:
15051 /*CFI_REL_OFFSET cs, 0*/
15052 /*
15053 * Push current_thread_info()->sysenter_return to the stack.
15054 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15055 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15056 */
15057 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15058 + pushl_cfi $0
15059 CFI_REL_OFFSET eip, 0
15060
15061 pushl_cfi %eax
15062 SAVE_ALL
15063 + GET_THREAD_INFO(%ebp)
15064 + movl TI_sysenter_return(%ebp),%ebp
15065 + movl %ebp,PT_EIP(%esp)
15066 ENABLE_INTERRUPTS(CLBR_NONE)
15067
15068 /*
15069 * Load the potential sixth argument from user stack.
15070 * Careful about security.
15071 */
15072 + movl PT_OLDESP(%esp),%ebp
15073 +
15074 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15075 + mov PT_OLDSS(%esp),%ds
15076 +1: movl %ds:(%ebp),%ebp
15077 + push %ss
15078 + pop %ds
15079 +#else
15080 cmpl $__PAGE_OFFSET-3,%ebp
15081 jae syscall_fault
15082 1: movl (%ebp),%ebp
15083 +#endif
15084 +
15085 movl %ebp,PT_EBP(%esp)
15086 .section __ex_table,"a"
15087 .align 4
15088 @@ -436,12 +597,24 @@ sysenter_do_call:
15089 testl $_TIF_ALLWORK_MASK, %ecx
15090 jne sysexit_audit
15091 sysenter_exit:
15092 +
15093 +#ifdef CONFIG_PAX_RANDKSTACK
15094 + pushl_cfi %eax
15095 + movl %esp, %eax
15096 + call pax_randomize_kstack
15097 + popl_cfi %eax
15098 +#endif
15099 +
15100 + pax_erase_kstack
15101 +
15102 /* if something modifies registers it must also disable sysexit */
15103 movl PT_EIP(%esp), %edx
15104 movl PT_OLDESP(%esp), %ecx
15105 xorl %ebp,%ebp
15106 TRACE_IRQS_ON
15107 1: mov PT_FS(%esp), %fs
15108 +2: mov PT_DS(%esp), %ds
15109 +3: mov PT_ES(%esp), %es
15110 PTGS_TO_GS
15111 ENABLE_INTERRUPTS_SYSEXIT
15112
15113 @@ -458,6 +631,9 @@ sysenter_audit:
15114 movl %eax,%edx /* 2nd arg: syscall number */
15115 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15116 call __audit_syscall_entry
15117 +
15118 + pax_erase_kstack
15119 +
15120 pushl_cfi %ebx
15121 movl PT_EAX(%esp),%eax /* reload syscall number */
15122 jmp sysenter_do_call
15123 @@ -483,11 +659,17 @@ sysexit_audit:
15124
15125 CFI_ENDPROC
15126 .pushsection .fixup,"ax"
15127 -2: movl $0,PT_FS(%esp)
15128 +4: movl $0,PT_FS(%esp)
15129 + jmp 1b
15130 +5: movl $0,PT_DS(%esp)
15131 + jmp 1b
15132 +6: movl $0,PT_ES(%esp)
15133 jmp 1b
15134 .section __ex_table,"a"
15135 .align 4
15136 - .long 1b,2b
15137 + .long 1b,4b
15138 + .long 2b,5b
15139 + .long 3b,6b
15140 .popsection
15141 PTGS_TO_GS_EX
15142 ENDPROC(ia32_sysenter_target)
15143 @@ -520,6 +702,15 @@ syscall_exit:
15144 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15145 jne syscall_exit_work
15146
15147 +restore_all_pax:
15148 +
15149 +#ifdef CONFIG_PAX_RANDKSTACK
15150 + movl %esp, %eax
15151 + call pax_randomize_kstack
15152 +#endif
15153 +
15154 + pax_erase_kstack
15155 +
15156 restore_all:
15157 TRACE_IRQS_IRET
15158 restore_all_notrace:
15159 @@ -579,14 +770,34 @@ ldt_ss:
15160 * compensating for the offset by changing to the ESPFIX segment with
15161 * a base address that matches for the difference.
15162 */
15163 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15164 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15165 mov %esp, %edx /* load kernel esp */
15166 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15167 mov %dx, %ax /* eax: new kernel esp */
15168 sub %eax, %edx /* offset (low word is 0) */
15169 +#ifdef CONFIG_SMP
15170 + movl PER_CPU_VAR(cpu_number), %ebx
15171 + shll $PAGE_SHIFT_asm, %ebx
15172 + addl $cpu_gdt_table, %ebx
15173 +#else
15174 + movl $cpu_gdt_table, %ebx
15175 +#endif
15176 shr $16, %edx
15177 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15178 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15179 +
15180 +#ifdef CONFIG_PAX_KERNEXEC
15181 + mov %cr0, %esi
15182 + btr $16, %esi
15183 + mov %esi, %cr0
15184 +#endif
15185 +
15186 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15187 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15188 +
15189 +#ifdef CONFIG_PAX_KERNEXEC
15190 + bts $16, %esi
15191 + mov %esi, %cr0
15192 +#endif
15193 +
15194 pushl_cfi $__ESPFIX_SS
15195 pushl_cfi %eax /* new kernel esp */
15196 /* Disable interrupts, but do not irqtrace this section: we
15197 @@ -615,38 +826,30 @@ work_resched:
15198 movl TI_flags(%ebp), %ecx
15199 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15200 # than syscall tracing?
15201 - jz restore_all
15202 + jz restore_all_pax
15203 testb $_TIF_NEED_RESCHED, %cl
15204 jnz work_resched
15205
15206 work_notifysig: # deal with pending signals and
15207 # notify-resume requests
15208 + movl %esp, %eax
15209 #ifdef CONFIG_VM86
15210 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15211 - movl %esp, %eax
15212 - jne work_notifysig_v86 # returning to kernel-space or
15213 + jz 1f # returning to kernel-space or
15214 # vm86-space
15215 - TRACE_IRQS_ON
15216 - ENABLE_INTERRUPTS(CLBR_NONE)
15217 - xorl %edx, %edx
15218 - call do_notify_resume
15219 - jmp resume_userspace_sig
15220
15221 - ALIGN
15222 -work_notifysig_v86:
15223 pushl_cfi %ecx # save ti_flags for do_notify_resume
15224 call save_v86_state # %eax contains pt_regs pointer
15225 popl_cfi %ecx
15226 movl %eax, %esp
15227 -#else
15228 - movl %esp, %eax
15229 +1:
15230 #endif
15231 TRACE_IRQS_ON
15232 ENABLE_INTERRUPTS(CLBR_NONE)
15233 xorl %edx, %edx
15234 call do_notify_resume
15235 jmp resume_userspace_sig
15236 -END(work_pending)
15237 +ENDPROC(work_pending)
15238
15239 # perform syscall exit tracing
15240 ALIGN
15241 @@ -654,11 +857,14 @@ syscall_trace_entry:
15242 movl $-ENOSYS,PT_EAX(%esp)
15243 movl %esp, %eax
15244 call syscall_trace_enter
15245 +
15246 + pax_erase_kstack
15247 +
15248 /* What it returned is what we'll actually use. */
15249 cmpl $(NR_syscalls), %eax
15250 jnae syscall_call
15251 jmp syscall_exit
15252 -END(syscall_trace_entry)
15253 +ENDPROC(syscall_trace_entry)
15254
15255 # perform syscall exit tracing
15256 ALIGN
15257 @@ -671,20 +877,24 @@ syscall_exit_work:
15258 movl %esp, %eax
15259 call syscall_trace_leave
15260 jmp resume_userspace
15261 -END(syscall_exit_work)
15262 +ENDPROC(syscall_exit_work)
15263 CFI_ENDPROC
15264
15265 RING0_INT_FRAME # can't unwind into user space anyway
15266 syscall_fault:
15267 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15268 + push %ss
15269 + pop %ds
15270 +#endif
15271 GET_THREAD_INFO(%ebp)
15272 movl $-EFAULT,PT_EAX(%esp)
15273 jmp resume_userspace
15274 -END(syscall_fault)
15275 +ENDPROC(syscall_fault)
15276
15277 syscall_badsys:
15278 movl $-ENOSYS,PT_EAX(%esp)
15279 jmp resume_userspace
15280 -END(syscall_badsys)
15281 +ENDPROC(syscall_badsys)
15282 CFI_ENDPROC
15283 /*
15284 * End of kprobes section
15285 @@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15286 CFI_ENDPROC
15287 ENDPROC(ptregs_clone)
15288
15289 + ALIGN;
15290 +ENTRY(kernel_execve)
15291 + CFI_STARTPROC
15292 + pushl_cfi %ebp
15293 + sub $PT_OLDSS+4,%esp
15294 + pushl_cfi %edi
15295 + pushl_cfi %ecx
15296 + pushl_cfi %eax
15297 + lea 3*4(%esp),%edi
15298 + mov $PT_OLDSS/4+1,%ecx
15299 + xorl %eax,%eax
15300 + rep stosl
15301 + popl_cfi %eax
15302 + popl_cfi %ecx
15303 + popl_cfi %edi
15304 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15305 + pushl_cfi %esp
15306 + call sys_execve
15307 + add $4,%esp
15308 + CFI_ADJUST_CFA_OFFSET -4
15309 + GET_THREAD_INFO(%ebp)
15310 + test %eax,%eax
15311 + jz syscall_exit
15312 + add $PT_OLDSS+4,%esp
15313 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15314 + popl_cfi %ebp
15315 + ret
15316 + CFI_ENDPROC
15317 +ENDPROC(kernel_execve)
15318 +
15319 .macro FIXUP_ESPFIX_STACK
15320 /*
15321 * Switch back for ESPFIX stack to the normal zerobased stack
15322 @@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15323 * normal stack and adjusts ESP with the matching offset.
15324 */
15325 /* fixup the stack */
15326 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15327 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15328 +#ifdef CONFIG_SMP
15329 + movl PER_CPU_VAR(cpu_number), %ebx
15330 + shll $PAGE_SHIFT_asm, %ebx
15331 + addl $cpu_gdt_table, %ebx
15332 +#else
15333 + movl $cpu_gdt_table, %ebx
15334 +#endif
15335 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15336 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15337 shl $16, %eax
15338 addl %esp, %eax /* the adjusted stack pointer */
15339 pushl_cfi $__KERNEL_DS
15340 @@ -819,7 +1066,7 @@ vector=vector+1
15341 .endr
15342 2: jmp common_interrupt
15343 .endr
15344 -END(irq_entries_start)
15345 +ENDPROC(irq_entries_start)
15346
15347 .previous
15348 END(interrupt)
15349 @@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15350 pushl_cfi $do_coprocessor_error
15351 jmp error_code
15352 CFI_ENDPROC
15353 -END(coprocessor_error)
15354 +ENDPROC(coprocessor_error)
15355
15356 ENTRY(simd_coprocessor_error)
15357 RING0_INT_FRAME
15358 @@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15359 #endif
15360 jmp error_code
15361 CFI_ENDPROC
15362 -END(simd_coprocessor_error)
15363 +ENDPROC(simd_coprocessor_error)
15364
15365 ENTRY(device_not_available)
15366 RING0_INT_FRAME
15367 @@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15368 pushl_cfi $do_device_not_available
15369 jmp error_code
15370 CFI_ENDPROC
15371 -END(device_not_available)
15372 +ENDPROC(device_not_available)
15373
15374 #ifdef CONFIG_PARAVIRT
15375 ENTRY(native_iret)
15376 @@ -905,12 +1152,12 @@ ENTRY(native_iret)
15377 .align 4
15378 .long native_iret, iret_exc
15379 .previous
15380 -END(native_iret)
15381 +ENDPROC(native_iret)
15382
15383 ENTRY(native_irq_enable_sysexit)
15384 sti
15385 sysexit
15386 -END(native_irq_enable_sysexit)
15387 +ENDPROC(native_irq_enable_sysexit)
15388 #endif
15389
15390 ENTRY(overflow)
15391 @@ -919,7 +1166,7 @@ ENTRY(overflow)
15392 pushl_cfi $do_overflow
15393 jmp error_code
15394 CFI_ENDPROC
15395 -END(overflow)
15396 +ENDPROC(overflow)
15397
15398 ENTRY(bounds)
15399 RING0_INT_FRAME
15400 @@ -927,7 +1174,7 @@ ENTRY(bounds)
15401 pushl_cfi $do_bounds
15402 jmp error_code
15403 CFI_ENDPROC
15404 -END(bounds)
15405 +ENDPROC(bounds)
15406
15407 ENTRY(invalid_op)
15408 RING0_INT_FRAME
15409 @@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15410 pushl_cfi $do_invalid_op
15411 jmp error_code
15412 CFI_ENDPROC
15413 -END(invalid_op)
15414 +ENDPROC(invalid_op)
15415
15416 ENTRY(coprocessor_segment_overrun)
15417 RING0_INT_FRAME
15418 @@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15419 pushl_cfi $do_coprocessor_segment_overrun
15420 jmp error_code
15421 CFI_ENDPROC
15422 -END(coprocessor_segment_overrun)
15423 +ENDPROC(coprocessor_segment_overrun)
15424
15425 ENTRY(invalid_TSS)
15426 RING0_EC_FRAME
15427 pushl_cfi $do_invalid_TSS
15428 jmp error_code
15429 CFI_ENDPROC
15430 -END(invalid_TSS)
15431 +ENDPROC(invalid_TSS)
15432
15433 ENTRY(segment_not_present)
15434 RING0_EC_FRAME
15435 pushl_cfi $do_segment_not_present
15436 jmp error_code
15437 CFI_ENDPROC
15438 -END(segment_not_present)
15439 +ENDPROC(segment_not_present)
15440
15441 ENTRY(stack_segment)
15442 RING0_EC_FRAME
15443 pushl_cfi $do_stack_segment
15444 jmp error_code
15445 CFI_ENDPROC
15446 -END(stack_segment)
15447 +ENDPROC(stack_segment)
15448
15449 ENTRY(alignment_check)
15450 RING0_EC_FRAME
15451 pushl_cfi $do_alignment_check
15452 jmp error_code
15453 CFI_ENDPROC
15454 -END(alignment_check)
15455 +ENDPROC(alignment_check)
15456
15457 ENTRY(divide_error)
15458 RING0_INT_FRAME
15459 @@ -979,7 +1226,7 @@ ENTRY(divide_error)
15460 pushl_cfi $do_divide_error
15461 jmp error_code
15462 CFI_ENDPROC
15463 -END(divide_error)
15464 +ENDPROC(divide_error)
15465
15466 #ifdef CONFIG_X86_MCE
15467 ENTRY(machine_check)
15468 @@ -988,7 +1235,7 @@ ENTRY(machine_check)
15469 pushl_cfi machine_check_vector
15470 jmp error_code
15471 CFI_ENDPROC
15472 -END(machine_check)
15473 +ENDPROC(machine_check)
15474 #endif
15475
15476 ENTRY(spurious_interrupt_bug)
15477 @@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15478 pushl_cfi $do_spurious_interrupt_bug
15479 jmp error_code
15480 CFI_ENDPROC
15481 -END(spurious_interrupt_bug)
15482 +ENDPROC(spurious_interrupt_bug)
15483 /*
15484 * End of kprobes section
15485 */
15486 @@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15487
15488 ENTRY(mcount)
15489 ret
15490 -END(mcount)
15491 +ENDPROC(mcount)
15492
15493 ENTRY(ftrace_caller)
15494 cmpl $0, function_trace_stop
15495 @@ -1141,7 +1388,7 @@ ftrace_graph_call:
15496 .globl ftrace_stub
15497 ftrace_stub:
15498 ret
15499 -END(ftrace_caller)
15500 +ENDPROC(ftrace_caller)
15501
15502 #else /* ! CONFIG_DYNAMIC_FTRACE */
15503
15504 @@ -1177,7 +1424,7 @@ trace:
15505 popl %ecx
15506 popl %eax
15507 jmp ftrace_stub
15508 -END(mcount)
15509 +ENDPROC(mcount)
15510 #endif /* CONFIG_DYNAMIC_FTRACE */
15511 #endif /* CONFIG_FUNCTION_TRACER */
15512
15513 @@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15514 popl %ecx
15515 popl %eax
15516 ret
15517 -END(ftrace_graph_caller)
15518 +ENDPROC(ftrace_graph_caller)
15519
15520 .globl return_to_handler
15521 return_to_handler:
15522 @@ -1253,15 +1500,18 @@ error_code:
15523 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15524 REG_TO_PTGS %ecx
15525 SET_KERNEL_GS %ecx
15526 - movl $(__USER_DS), %ecx
15527 + movl $(__KERNEL_DS), %ecx
15528 movl %ecx, %ds
15529 movl %ecx, %es
15530 +
15531 + pax_enter_kernel
15532 +
15533 TRACE_IRQS_OFF
15534 movl %esp,%eax # pt_regs pointer
15535 call *%edi
15536 jmp ret_from_exception
15537 CFI_ENDPROC
15538 -END(page_fault)
15539 +ENDPROC(page_fault)
15540
15541 /*
15542 * Debug traps and NMI can happen at the one SYSENTER instruction
15543 @@ -1303,7 +1553,7 @@ debug_stack_correct:
15544 call do_debug
15545 jmp ret_from_exception
15546 CFI_ENDPROC
15547 -END(debug)
15548 +ENDPROC(debug)
15549
15550 /*
15551 * NMI is doubly nasty. It can happen _while_ we're handling
15552 @@ -1340,6 +1590,9 @@ nmi_stack_correct:
15553 xorl %edx,%edx # zero error code
15554 movl %esp,%eax # pt_regs pointer
15555 call do_nmi
15556 +
15557 + pax_exit_kernel
15558 +
15559 jmp restore_all_notrace
15560 CFI_ENDPROC
15561
15562 @@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15563 FIXUP_ESPFIX_STACK # %eax == %esp
15564 xorl %edx,%edx # zero error code
15565 call do_nmi
15566 +
15567 + pax_exit_kernel
15568 +
15569 RESTORE_REGS
15570 lss 12+4(%esp), %esp # back to espfix stack
15571 CFI_ADJUST_CFA_OFFSET -24
15572 jmp irq_return
15573 CFI_ENDPROC
15574 -END(nmi)
15575 +ENDPROC(nmi)
15576
15577 ENTRY(int3)
15578 RING0_INT_FRAME
15579 @@ -1393,14 +1649,14 @@ ENTRY(int3)
15580 call do_int3
15581 jmp ret_from_exception
15582 CFI_ENDPROC
15583 -END(int3)
15584 +ENDPROC(int3)
15585
15586 ENTRY(general_protection)
15587 RING0_EC_FRAME
15588 pushl_cfi $do_general_protection
15589 jmp error_code
15590 CFI_ENDPROC
15591 -END(general_protection)
15592 +ENDPROC(general_protection)
15593
15594 #ifdef CONFIG_KVM_GUEST
15595 ENTRY(async_page_fault)
15596 @@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15597 pushl_cfi $do_async_page_fault
15598 jmp error_code
15599 CFI_ENDPROC
15600 -END(async_page_fault)
15601 +ENDPROC(async_page_fault)
15602 #endif
15603
15604 /*
15605 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15606 index cdc79b5..4710a75 100644
15607 --- a/arch/x86/kernel/entry_64.S
15608 +++ b/arch/x86/kernel/entry_64.S
15609 @@ -56,6 +56,8 @@
15610 #include <asm/ftrace.h>
15611 #include <asm/percpu.h>
15612 #include <linux/err.h>
15613 +#include <asm/pgtable.h>
15614 +#include <asm/alternative-asm.h>
15615
15616 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15617 #include <linux/elf-em.h>
15618 @@ -69,8 +71,9 @@
15619 #ifdef CONFIG_FUNCTION_TRACER
15620 #ifdef CONFIG_DYNAMIC_FTRACE
15621 ENTRY(mcount)
15622 + pax_force_retaddr
15623 retq
15624 -END(mcount)
15625 +ENDPROC(mcount)
15626
15627 ENTRY(ftrace_caller)
15628 cmpl $0, function_trace_stop
15629 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15630 #endif
15631
15632 GLOBAL(ftrace_stub)
15633 + pax_force_retaddr
15634 retq
15635 -END(ftrace_caller)
15636 +ENDPROC(ftrace_caller)
15637
15638 #else /* ! CONFIG_DYNAMIC_FTRACE */
15639 ENTRY(mcount)
15640 @@ -113,6 +117,7 @@ ENTRY(mcount)
15641 #endif
15642
15643 GLOBAL(ftrace_stub)
15644 + pax_force_retaddr
15645 retq
15646
15647 trace:
15648 @@ -122,12 +127,13 @@ trace:
15649 movq 8(%rbp), %rsi
15650 subq $MCOUNT_INSN_SIZE, %rdi
15651
15652 + pax_force_fptr ftrace_trace_function
15653 call *ftrace_trace_function
15654
15655 MCOUNT_RESTORE_FRAME
15656
15657 jmp ftrace_stub
15658 -END(mcount)
15659 +ENDPROC(mcount)
15660 #endif /* CONFIG_DYNAMIC_FTRACE */
15661 #endif /* CONFIG_FUNCTION_TRACER */
15662
15663 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15664
15665 MCOUNT_RESTORE_FRAME
15666
15667 + pax_force_retaddr
15668 retq
15669 -END(ftrace_graph_caller)
15670 +ENDPROC(ftrace_graph_caller)
15671
15672 GLOBAL(return_to_handler)
15673 subq $24, %rsp
15674 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15675 movq 8(%rsp), %rdx
15676 movq (%rsp), %rax
15677 addq $24, %rsp
15678 + pax_force_fptr %rdi
15679 jmp *%rdi
15680 #endif
15681
15682 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15683 ENDPROC(native_usergs_sysret64)
15684 #endif /* CONFIG_PARAVIRT */
15685
15686 + .macro ljmpq sel, off
15687 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15688 + .byte 0x48; ljmp *1234f(%rip)
15689 + .pushsection .rodata
15690 + .align 16
15691 + 1234: .quad \off; .word \sel
15692 + .popsection
15693 +#else
15694 + pushq $\sel
15695 + pushq $\off
15696 + lretq
15697 +#endif
15698 + .endm
15699 +
15700 + .macro pax_enter_kernel
15701 + pax_set_fptr_mask
15702 +#ifdef CONFIG_PAX_KERNEXEC
15703 + call pax_enter_kernel
15704 +#endif
15705 + .endm
15706 +
15707 + .macro pax_exit_kernel
15708 +#ifdef CONFIG_PAX_KERNEXEC
15709 + call pax_exit_kernel
15710 +#endif
15711 + .endm
15712 +
15713 +#ifdef CONFIG_PAX_KERNEXEC
15714 +ENTRY(pax_enter_kernel)
15715 + pushq %rdi
15716 +
15717 +#ifdef CONFIG_PARAVIRT
15718 + PV_SAVE_REGS(CLBR_RDI)
15719 +#endif
15720 +
15721 + GET_CR0_INTO_RDI
15722 + bts $16,%rdi
15723 + jnc 3f
15724 + mov %cs,%edi
15725 + cmp $__KERNEL_CS,%edi
15726 + jnz 2f
15727 +1:
15728 +
15729 +#ifdef CONFIG_PARAVIRT
15730 + PV_RESTORE_REGS(CLBR_RDI)
15731 +#endif
15732 +
15733 + popq %rdi
15734 + pax_force_retaddr
15735 + retq
15736 +
15737 +2: ljmpq __KERNEL_CS,1f
15738 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15739 +4: SET_RDI_INTO_CR0
15740 + jmp 1b
15741 +ENDPROC(pax_enter_kernel)
15742 +
15743 +ENTRY(pax_exit_kernel)
15744 + pushq %rdi
15745 +
15746 +#ifdef CONFIG_PARAVIRT
15747 + PV_SAVE_REGS(CLBR_RDI)
15748 +#endif
15749 +
15750 + mov %cs,%rdi
15751 + cmp $__KERNEXEC_KERNEL_CS,%edi
15752 + jz 2f
15753 +1:
15754 +
15755 +#ifdef CONFIG_PARAVIRT
15756 + PV_RESTORE_REGS(CLBR_RDI);
15757 +#endif
15758 +
15759 + popq %rdi
15760 + pax_force_retaddr
15761 + retq
15762 +
15763 +2: GET_CR0_INTO_RDI
15764 + btr $16,%rdi
15765 + ljmpq __KERNEL_CS,3f
15766 +3: SET_RDI_INTO_CR0
15767 + jmp 1b
15768 +#ifdef CONFIG_PARAVIRT
15769 + PV_RESTORE_REGS(CLBR_RDI);
15770 +#endif
15771 +
15772 + popq %rdi
15773 + pax_force_retaddr
15774 + retq
15775 +ENDPROC(pax_exit_kernel)
15776 +#endif
15777 +
15778 + .macro pax_enter_kernel_user
15779 + pax_set_fptr_mask
15780 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15781 + call pax_enter_kernel_user
15782 +#endif
15783 + .endm
15784 +
15785 + .macro pax_exit_kernel_user
15786 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15787 + call pax_exit_kernel_user
15788 +#endif
15789 +#ifdef CONFIG_PAX_RANDKSTACK
15790 + pushq %rax
15791 + call pax_randomize_kstack
15792 + popq %rax
15793 +#endif
15794 + .endm
15795 +
15796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15797 +ENTRY(pax_enter_kernel_user)
15798 + pushq %rdi
15799 + pushq %rbx
15800 +
15801 +#ifdef CONFIG_PARAVIRT
15802 + PV_SAVE_REGS(CLBR_RDI)
15803 +#endif
15804 +
15805 + GET_CR3_INTO_RDI
15806 + mov %rdi,%rbx
15807 + add $__START_KERNEL_map,%rbx
15808 + sub phys_base(%rip),%rbx
15809 +
15810 +#ifdef CONFIG_PARAVIRT
15811 + pushq %rdi
15812 + cmpl $0, pv_info+PARAVIRT_enabled
15813 + jz 1f
15814 + i = 0
15815 + .rept USER_PGD_PTRS
15816 + mov i*8(%rbx),%rsi
15817 + mov $0,%sil
15818 + lea i*8(%rbx),%rdi
15819 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15820 + i = i + 1
15821 + .endr
15822 + jmp 2f
15823 +1:
15824 +#endif
15825 +
15826 + i = 0
15827 + .rept USER_PGD_PTRS
15828 + movb $0,i*8(%rbx)
15829 + i = i + 1
15830 + .endr
15831 +
15832 +#ifdef CONFIG_PARAVIRT
15833 +2: popq %rdi
15834 +#endif
15835 + SET_RDI_INTO_CR3
15836 +
15837 +#ifdef CONFIG_PAX_KERNEXEC
15838 + GET_CR0_INTO_RDI
15839 + bts $16,%rdi
15840 + SET_RDI_INTO_CR0
15841 +#endif
15842 +
15843 +#ifdef CONFIG_PARAVIRT
15844 + PV_RESTORE_REGS(CLBR_RDI)
15845 +#endif
15846 +
15847 + popq %rbx
15848 + popq %rdi
15849 + pax_force_retaddr
15850 + retq
15851 +ENDPROC(pax_enter_kernel_user)
15852 +
15853 +ENTRY(pax_exit_kernel_user)
15854 + push %rdi
15855 +
15856 +#ifdef CONFIG_PARAVIRT
15857 + pushq %rbx
15858 + PV_SAVE_REGS(CLBR_RDI)
15859 +#endif
15860 +
15861 +#ifdef CONFIG_PAX_KERNEXEC
15862 + GET_CR0_INTO_RDI
15863 + btr $16,%rdi
15864 + SET_RDI_INTO_CR0
15865 +#endif
15866 +
15867 + GET_CR3_INTO_RDI
15868 + add $__START_KERNEL_map,%rdi
15869 + sub phys_base(%rip),%rdi
15870 +
15871 +#ifdef CONFIG_PARAVIRT
15872 + cmpl $0, pv_info+PARAVIRT_enabled
15873 + jz 1f
15874 + mov %rdi,%rbx
15875 + i = 0
15876 + .rept USER_PGD_PTRS
15877 + mov i*8(%rbx),%rsi
15878 + mov $0x67,%sil
15879 + lea i*8(%rbx),%rdi
15880 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15881 + i = i + 1
15882 + .endr
15883 + jmp 2f
15884 +1:
15885 +#endif
15886 +
15887 + i = 0
15888 + .rept USER_PGD_PTRS
15889 + movb $0x67,i*8(%rdi)
15890 + i = i + 1
15891 + .endr
15892 +
15893 +#ifdef CONFIG_PARAVIRT
15894 +2: PV_RESTORE_REGS(CLBR_RDI)
15895 + popq %rbx
15896 +#endif
15897 +
15898 + popq %rdi
15899 + pax_force_retaddr
15900 + retq
15901 +ENDPROC(pax_exit_kernel_user)
15902 +#endif
15903 +
15904 +.macro pax_erase_kstack
15905 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15906 + call pax_erase_kstack
15907 +#endif
15908 +.endm
15909 +
15910 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15911 +/*
15912 + * r11: thread_info
15913 + * rcx, rdx: can be clobbered
15914 + */
15915 +ENTRY(pax_erase_kstack)
15916 + pushq %rdi
15917 + pushq %rax
15918 + pushq %r11
15919 +
15920 + GET_THREAD_INFO(%r11)
15921 + mov TI_lowest_stack(%r11), %rdi
15922 + mov $-0xBEEF, %rax
15923 + std
15924 +
15925 +1: mov %edi, %ecx
15926 + and $THREAD_SIZE_asm - 1, %ecx
15927 + shr $3, %ecx
15928 + repne scasq
15929 + jecxz 2f
15930 +
15931 + cmp $2*8, %ecx
15932 + jc 2f
15933 +
15934 + mov $2*8, %ecx
15935 + repe scasq
15936 + jecxz 2f
15937 + jne 1b
15938 +
15939 +2: cld
15940 + mov %esp, %ecx
15941 + sub %edi, %ecx
15942 +
15943 + cmp $THREAD_SIZE_asm, %rcx
15944 + jb 3f
15945 + ud2
15946 +3:
15947 +
15948 + shr $3, %ecx
15949 + rep stosq
15950 +
15951 + mov TI_task_thread_sp0(%r11), %rdi
15952 + sub $256, %rdi
15953 + mov %rdi, TI_lowest_stack(%r11)
15954 +
15955 + popq %r11
15956 + popq %rax
15957 + popq %rdi
15958 + pax_force_retaddr
15959 + ret
15960 +ENDPROC(pax_erase_kstack)
15961 +#endif
15962
15963 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15964 #ifdef CONFIG_TRACE_IRQFLAGS
15965 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
15966 .endm
15967
15968 .macro UNFAKE_STACK_FRAME
15969 - addq $8*6, %rsp
15970 - CFI_ADJUST_CFA_OFFSET -(6*8)
15971 + addq $8*6 + ARG_SKIP, %rsp
15972 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15973 .endm
15974
15975 /*
15976 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
15977 movq %rsp, %rsi
15978
15979 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15980 - testl $3, CS-RBP(%rsi)
15981 + testb $3, CS-RBP(%rsi)
15982 je 1f
15983 SWAPGS
15984 /*
15985 @@ -355,9 +639,10 @@ ENTRY(save_rest)
15986 movq_cfi r15, R15+16
15987 movq %r11, 8(%rsp) /* return address */
15988 FIXUP_TOP_OF_STACK %r11, 16
15989 + pax_force_retaddr
15990 ret
15991 CFI_ENDPROC
15992 -END(save_rest)
15993 +ENDPROC(save_rest)
15994
15995 /* save complete stack frame */
15996 .pushsection .kprobes.text, "ax"
15997 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
15998 js 1f /* negative -> in kernel */
15999 SWAPGS
16000 xorl %ebx,%ebx
16001 -1: ret
16002 +1: pax_force_retaddr_bts
16003 + ret
16004 CFI_ENDPROC
16005 -END(save_paranoid)
16006 +ENDPROC(save_paranoid)
16007 .popsection
16008
16009 /*
16010 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
16011
16012 RESTORE_REST
16013
16014 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16015 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16016 jz retint_restore_args
16017
16018 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16019 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
16020 jmp ret_from_sys_call # go to the SYSRET fastpath
16021
16022 CFI_ENDPROC
16023 -END(ret_from_fork)
16024 +ENDPROC(ret_from_fork)
16025
16026 /*
16027 * System call entry. Up to 6 arguments in registers are supported.
16028 @@ -456,7 +742,7 @@ END(ret_from_fork)
16029 ENTRY(system_call)
16030 CFI_STARTPROC simple
16031 CFI_SIGNAL_FRAME
16032 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16033 + CFI_DEF_CFA rsp,0
16034 CFI_REGISTER rip,rcx
16035 /*CFI_REGISTER rflags,r11*/
16036 SWAPGS_UNSAFE_STACK
16037 @@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
16038
16039 movq %rsp,PER_CPU_VAR(old_rsp)
16040 movq PER_CPU_VAR(kernel_stack),%rsp
16041 + SAVE_ARGS 8*6,0
16042 + pax_enter_kernel_user
16043 /*
16044 * No need to follow this irqs off/on section - it's straight
16045 * and short:
16046 */
16047 ENABLE_INTERRUPTS(CLBR_NONE)
16048 - SAVE_ARGS 8,0
16049 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16050 movq %rcx,RIP-ARGOFFSET(%rsp)
16051 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16052 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16053 + GET_THREAD_INFO(%rcx)
16054 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16055 jnz tracesys
16056 system_call_fastpath:
16057 #if __SYSCALL_MASK == ~0
16058 @@ -488,7 +776,7 @@ system_call_fastpath:
16059 cmpl $__NR_syscall_max,%eax
16060 #endif
16061 ja badsys
16062 - movq %r10,%rcx
16063 + movq R10-ARGOFFSET(%rsp),%rcx
16064 call *sys_call_table(,%rax,8) # XXX: rip relative
16065 movq %rax,RAX-ARGOFFSET(%rsp)
16066 /*
16067 @@ -502,10 +790,13 @@ sysret_check:
16068 LOCKDEP_SYS_EXIT
16069 DISABLE_INTERRUPTS(CLBR_NONE)
16070 TRACE_IRQS_OFF
16071 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16072 + GET_THREAD_INFO(%rcx)
16073 + movl TI_flags(%rcx),%edx
16074 andl %edi,%edx
16075 jnz sysret_careful
16076 CFI_REMEMBER_STATE
16077 + pax_exit_kernel_user
16078 + pax_erase_kstack
16079 /*
16080 * sysretq will re-enable interrupts:
16081 */
16082 @@ -557,14 +848,18 @@ badsys:
16083 * jump back to the normal fast path.
16084 */
16085 auditsys:
16086 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16087 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16088 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16089 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16090 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16091 movq %rax,%rsi /* 2nd arg: syscall number */
16092 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16093 call __audit_syscall_entry
16094 +
16095 + pax_erase_kstack
16096 +
16097 LOAD_ARGS 0 /* reload call-clobbered registers */
16098 + pax_set_fptr_mask
16099 jmp system_call_fastpath
16100
16101 /*
16102 @@ -585,7 +880,7 @@ sysret_audit:
16103 /* Do syscall tracing */
16104 tracesys:
16105 #ifdef CONFIG_AUDITSYSCALL
16106 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16107 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16108 jz auditsys
16109 #endif
16110 SAVE_REST
16111 @@ -593,12 +888,16 @@ tracesys:
16112 FIXUP_TOP_OF_STACK %rdi
16113 movq %rsp,%rdi
16114 call syscall_trace_enter
16115 +
16116 + pax_erase_kstack
16117 +
16118 /*
16119 * Reload arg registers from stack in case ptrace changed them.
16120 * We don't reload %rax because syscall_trace_enter() returned
16121 * the value it wants us to use in the table lookup.
16122 */
16123 LOAD_ARGS ARGOFFSET, 1
16124 + pax_set_fptr_mask
16125 RESTORE_REST
16126 #if __SYSCALL_MASK == ~0
16127 cmpq $__NR_syscall_max,%rax
16128 @@ -607,7 +906,7 @@ tracesys:
16129 cmpl $__NR_syscall_max,%eax
16130 #endif
16131 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16132 - movq %r10,%rcx /* fixup for C */
16133 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16134 call *sys_call_table(,%rax,8)
16135 movq %rax,RAX-ARGOFFSET(%rsp)
16136 /* Use IRET because user could have changed frame */
16137 @@ -628,6 +927,7 @@ GLOBAL(int_with_check)
16138 andl %edi,%edx
16139 jnz int_careful
16140 andl $~TS_COMPAT,TI_status(%rcx)
16141 + pax_erase_kstack
16142 jmp retint_swapgs
16143
16144 /* Either reschedule or signal or syscall exit tracking needed. */
16145 @@ -674,7 +974,7 @@ int_restore_rest:
16146 TRACE_IRQS_OFF
16147 jmp int_with_check
16148 CFI_ENDPROC
16149 -END(system_call)
16150 +ENDPROC(system_call)
16151
16152 /*
16153 * Certain special system calls that need to save a complete full stack frame.
16154 @@ -690,7 +990,7 @@ ENTRY(\label)
16155 call \func
16156 jmp ptregscall_common
16157 CFI_ENDPROC
16158 -END(\label)
16159 +ENDPROC(\label)
16160 .endm
16161
16162 PTREGSCALL stub_clone, sys_clone, %r8
16163 @@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
16164 movq_cfi_restore R12+8, r12
16165 movq_cfi_restore RBP+8, rbp
16166 movq_cfi_restore RBX+8, rbx
16167 + pax_force_retaddr
16168 ret $REST_SKIP /* pop extended registers */
16169 CFI_ENDPROC
16170 -END(ptregscall_common)
16171 +ENDPROC(ptregscall_common)
16172
16173 ENTRY(stub_execve)
16174 CFI_STARTPROC
16175 @@ -725,7 +1026,7 @@ ENTRY(stub_execve)
16176 RESTORE_REST
16177 jmp int_ret_from_sys_call
16178 CFI_ENDPROC
16179 -END(stub_execve)
16180 +ENDPROC(stub_execve)
16181
16182 /*
16183 * sigreturn is special because it needs to restore all registers on return.
16184 @@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16185 RESTORE_REST
16186 jmp int_ret_from_sys_call
16187 CFI_ENDPROC
16188 -END(stub_rt_sigreturn)
16189 +ENDPROC(stub_rt_sigreturn)
16190
16191 #ifdef CONFIG_X86_X32_ABI
16192 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16193 @@ -812,7 +1113,7 @@ vector=vector+1
16194 2: jmp common_interrupt
16195 .endr
16196 CFI_ENDPROC
16197 -END(irq_entries_start)
16198 +ENDPROC(irq_entries_start)
16199
16200 .previous
16201 END(interrupt)
16202 @@ -832,6 +1133,16 @@ END(interrupt)
16203 subq $ORIG_RAX-RBP, %rsp
16204 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16205 SAVE_ARGS_IRQ
16206 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16207 + testb $3, CS(%rdi)
16208 + jnz 1f
16209 + pax_enter_kernel
16210 + jmp 2f
16211 +1: pax_enter_kernel_user
16212 +2:
16213 +#else
16214 + pax_enter_kernel
16215 +#endif
16216 call \func
16217 .endm
16218
16219 @@ -863,7 +1174,7 @@ ret_from_intr:
16220
16221 exit_intr:
16222 GET_THREAD_INFO(%rcx)
16223 - testl $3,CS-ARGOFFSET(%rsp)
16224 + testb $3,CS-ARGOFFSET(%rsp)
16225 je retint_kernel
16226
16227 /* Interrupt came from user space */
16228 @@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space */
16229 * The iretq could re-enable interrupts:
16230 */
16231 DISABLE_INTERRUPTS(CLBR_ANY)
16232 + pax_exit_kernel_user
16233 TRACE_IRQS_IRETQ
16234 SWAPGS
16235 jmp restore_args
16236
16237 retint_restore_args: /* return to kernel space */
16238 DISABLE_INTERRUPTS(CLBR_ANY)
16239 + pax_exit_kernel
16240 + pax_force_retaddr RIP-ARGOFFSET
16241 /*
16242 * The iretq could re-enable interrupts:
16243 */
16244 @@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
16245 #endif
16246
16247 CFI_ENDPROC
16248 -END(common_interrupt)
16249 +ENDPROC(common_interrupt)
16250 /*
16251 * End of kprobes section
16252 */
16253 @@ -996,7 +1310,7 @@ ENTRY(\sym)
16254 interrupt \do_sym
16255 jmp ret_from_intr
16256 CFI_ENDPROC
16257 -END(\sym)
16258 +ENDPROC(\sym)
16259 .endm
16260
16261 #ifdef CONFIG_SMP
16262 @@ -1069,12 +1383,22 @@ ENTRY(\sym)
16263 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16264 call error_entry
16265 DEFAULT_FRAME 0
16266 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16267 + testb $3, CS(%rsp)
16268 + jnz 1f
16269 + pax_enter_kernel
16270 + jmp 2f
16271 +1: pax_enter_kernel_user
16272 +2:
16273 +#else
16274 + pax_enter_kernel
16275 +#endif
16276 movq %rsp,%rdi /* pt_regs pointer */
16277 xorl %esi,%esi /* no error code */
16278 call \do_sym
16279 jmp error_exit /* %ebx: no swapgs flag */
16280 CFI_ENDPROC
16281 -END(\sym)
16282 +ENDPROC(\sym)
16283 .endm
16284
16285 .macro paranoidzeroentry sym do_sym
16286 @@ -1086,15 +1410,25 @@ ENTRY(\sym)
16287 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16288 call save_paranoid
16289 TRACE_IRQS_OFF
16290 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16291 + testb $3, CS(%rsp)
16292 + jnz 1f
16293 + pax_enter_kernel
16294 + jmp 2f
16295 +1: pax_enter_kernel_user
16296 +2:
16297 +#else
16298 + pax_enter_kernel
16299 +#endif
16300 movq %rsp,%rdi /* pt_regs pointer */
16301 xorl %esi,%esi /* no error code */
16302 call \do_sym
16303 jmp paranoid_exit /* %ebx: no swapgs flag */
16304 CFI_ENDPROC
16305 -END(\sym)
16306 +ENDPROC(\sym)
16307 .endm
16308
16309 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16310 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16311 .macro paranoidzeroentry_ist sym do_sym ist
16312 ENTRY(\sym)
16313 INTR_FRAME
16314 @@ -1104,14 +1438,30 @@ ENTRY(\sym)
16315 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16316 call save_paranoid
16317 TRACE_IRQS_OFF
16318 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16319 + testb $3, CS(%rsp)
16320 + jnz 1f
16321 + pax_enter_kernel
16322 + jmp 2f
16323 +1: pax_enter_kernel_user
16324 +2:
16325 +#else
16326 + pax_enter_kernel
16327 +#endif
16328 movq %rsp,%rdi /* pt_regs pointer */
16329 xorl %esi,%esi /* no error code */
16330 +#ifdef CONFIG_SMP
16331 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16332 + lea init_tss(%r12), %r12
16333 +#else
16334 + lea init_tss(%rip), %r12
16335 +#endif
16336 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16337 call \do_sym
16338 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16339 jmp paranoid_exit /* %ebx: no swapgs flag */
16340 CFI_ENDPROC
16341 -END(\sym)
16342 +ENDPROC(\sym)
16343 .endm
16344
16345 .macro errorentry sym do_sym
16346 @@ -1122,13 +1472,23 @@ ENTRY(\sym)
16347 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16348 call error_entry
16349 DEFAULT_FRAME 0
16350 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16351 + testb $3, CS(%rsp)
16352 + jnz 1f
16353 + pax_enter_kernel
16354 + jmp 2f
16355 +1: pax_enter_kernel_user
16356 +2:
16357 +#else
16358 + pax_enter_kernel
16359 +#endif
16360 movq %rsp,%rdi /* pt_regs pointer */
16361 movq ORIG_RAX(%rsp),%rsi /* get error code */
16362 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16363 call \do_sym
16364 jmp error_exit /* %ebx: no swapgs flag */
16365 CFI_ENDPROC
16366 -END(\sym)
16367 +ENDPROC(\sym)
16368 .endm
16369
16370 /* error code is on the stack already */
16371 @@ -1141,13 +1501,23 @@ ENTRY(\sym)
16372 call save_paranoid
16373 DEFAULT_FRAME 0
16374 TRACE_IRQS_OFF
16375 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16376 + testb $3, CS(%rsp)
16377 + jnz 1f
16378 + pax_enter_kernel
16379 + jmp 2f
16380 +1: pax_enter_kernel_user
16381 +2:
16382 +#else
16383 + pax_enter_kernel
16384 +#endif
16385 movq %rsp,%rdi /* pt_regs pointer */
16386 movq ORIG_RAX(%rsp),%rsi /* get error code */
16387 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16388 call \do_sym
16389 jmp paranoid_exit /* %ebx: no swapgs flag */
16390 CFI_ENDPROC
16391 -END(\sym)
16392 +ENDPROC(\sym)
16393 .endm
16394
16395 zeroentry divide_error do_divide_error
16396 @@ -1177,9 +1547,10 @@ gs_change:
16397 2: mfence /* workaround */
16398 SWAPGS
16399 popfq_cfi
16400 + pax_force_retaddr
16401 ret
16402 CFI_ENDPROC
16403 -END(native_load_gs_index)
16404 +ENDPROC(native_load_gs_index)
16405
16406 .section __ex_table,"a"
16407 .align 8
16408 @@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
16409 * Here we are in the child and the registers are set as they were
16410 * at kernel_thread() invocation in the parent.
16411 */
16412 + pax_force_fptr %rsi
16413 call *%rsi
16414 # exit
16415 mov %eax, %edi
16416 call do_exit
16417 ud2 # padding for call trace
16418 CFI_ENDPROC
16419 -END(kernel_thread_helper)
16420 +ENDPROC(kernel_thread_helper)
16421
16422 /*
16423 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16424 @@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
16425 RESTORE_REST
16426 testq %rax,%rax
16427 je int_ret_from_sys_call
16428 - RESTORE_ARGS
16429 UNFAKE_STACK_FRAME
16430 + pax_force_retaddr
16431 ret
16432 CFI_ENDPROC
16433 -END(kernel_execve)
16434 +ENDPROC(kernel_execve)
16435
16436 /* Call softirq on interrupt stack. Interrupts are off. */
16437 ENTRY(call_softirq)
16438 @@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
16439 CFI_DEF_CFA_REGISTER rsp
16440 CFI_ADJUST_CFA_OFFSET -8
16441 decl PER_CPU_VAR(irq_count)
16442 + pax_force_retaddr
16443 ret
16444 CFI_ENDPROC
16445 -END(call_softirq)
16446 +ENDPROC(call_softirq)
16447
16448 #ifdef CONFIG_XEN
16449 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16450 @@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16451 decl PER_CPU_VAR(irq_count)
16452 jmp error_exit
16453 CFI_ENDPROC
16454 -END(xen_do_hypervisor_callback)
16455 +ENDPROC(xen_do_hypervisor_callback)
16456
16457 /*
16458 * Hypervisor uses this for application faults while it executes.
16459 @@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
16460 SAVE_ALL
16461 jmp error_exit
16462 CFI_ENDPROC
16463 -END(xen_failsafe_callback)
16464 +ENDPROC(xen_failsafe_callback)
16465
16466 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16467 xen_hvm_callback_vector xen_evtchn_do_upcall
16468 @@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
16469 TRACE_IRQS_OFF
16470 testl %ebx,%ebx /* swapgs needed? */
16471 jnz paranoid_restore
16472 - testl $3,CS(%rsp)
16473 + testb $3,CS(%rsp)
16474 jnz paranoid_userspace
16475 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16476 + pax_exit_kernel
16477 + TRACE_IRQS_IRETQ 0
16478 + SWAPGS_UNSAFE_STACK
16479 + RESTORE_ALL 8
16480 + pax_force_retaddr_bts
16481 + jmp irq_return
16482 +#endif
16483 paranoid_swapgs:
16484 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16485 + pax_exit_kernel_user
16486 +#else
16487 + pax_exit_kernel
16488 +#endif
16489 TRACE_IRQS_IRETQ 0
16490 SWAPGS_UNSAFE_STACK
16491 RESTORE_ALL 8
16492 jmp irq_return
16493 paranoid_restore:
16494 + pax_exit_kernel
16495 TRACE_IRQS_IRETQ 0
16496 RESTORE_ALL 8
16497 + pax_force_retaddr_bts
16498 jmp irq_return
16499 paranoid_userspace:
16500 GET_THREAD_INFO(%rcx)
16501 @@ -1442,7 +1830,7 @@ paranoid_schedule:
16502 TRACE_IRQS_OFF
16503 jmp paranoid_userspace
16504 CFI_ENDPROC
16505 -END(paranoid_exit)
16506 +ENDPROC(paranoid_exit)
16507
16508 /*
16509 * Exception entry point. This expects an error code/orig_rax on the stack.
16510 @@ -1469,12 +1857,13 @@ ENTRY(error_entry)
16511 movq_cfi r14, R14+8
16512 movq_cfi r15, R15+8
16513 xorl %ebx,%ebx
16514 - testl $3,CS+8(%rsp)
16515 + testb $3,CS+8(%rsp)
16516 je error_kernelspace
16517 error_swapgs:
16518 SWAPGS
16519 error_sti:
16520 TRACE_IRQS_OFF
16521 + pax_force_retaddr_bts
16522 ret
16523
16524 /*
16525 @@ -1501,7 +1890,7 @@ bstep_iret:
16526 movq %rcx,RIP+8(%rsp)
16527 jmp error_swapgs
16528 CFI_ENDPROC
16529 -END(error_entry)
16530 +ENDPROC(error_entry)
16531
16532
16533 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16534 @@ -1521,7 +1910,7 @@ ENTRY(error_exit)
16535 jnz retint_careful
16536 jmp retint_swapgs
16537 CFI_ENDPROC
16538 -END(error_exit)
16539 +ENDPROC(error_exit)
16540
16541 /*
16542 * Test if a given stack is an NMI stack or not.
16543 @@ -1579,9 +1968,11 @@ ENTRY(nmi)
16544 * If %cs was not the kernel segment, then the NMI triggered in user
16545 * space, which means it is definitely not nested.
16546 */
16547 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16548 + je 1f
16549 cmpl $__KERNEL_CS, 16(%rsp)
16550 jne first_nmi
16551 -
16552 +1:
16553 /*
16554 * Check the special variable on the stack to see if NMIs are
16555 * executing.
16556 @@ -1728,6 +2119,16 @@ end_repeat_nmi:
16557 */
16558 call save_paranoid
16559 DEFAULT_FRAME 0
16560 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16561 + testb $3, CS(%rsp)
16562 + jnz 1f
16563 + pax_enter_kernel
16564 + jmp 2f
16565 +1: pax_enter_kernel_user
16566 +2:
16567 +#else
16568 + pax_enter_kernel
16569 +#endif
16570 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16571 movq %rsp,%rdi
16572 movq $-1,%rsi
16573 @@ -1735,21 +2136,32 @@ end_repeat_nmi:
16574 testl %ebx,%ebx /* swapgs needed? */
16575 jnz nmi_restore
16576 nmi_swapgs:
16577 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16578 + pax_exit_kernel_user
16579 +#else
16580 + pax_exit_kernel
16581 +#endif
16582 SWAPGS_UNSAFE_STACK
16583 + RESTORE_ALL 8
16584 + /* Clear the NMI executing stack variable */
16585 + movq $0, 10*8(%rsp)
16586 + jmp irq_return
16587 nmi_restore:
16588 + pax_exit_kernel
16589 RESTORE_ALL 8
16590 + pax_force_retaddr_bts
16591 /* Clear the NMI executing stack variable */
16592 movq $0, 10*8(%rsp)
16593 jmp irq_return
16594 CFI_ENDPROC
16595 -END(nmi)
16596 +ENDPROC(nmi)
16597
16598 ENTRY(ignore_sysret)
16599 CFI_STARTPROC
16600 mov $-ENOSYS,%eax
16601 sysret
16602 CFI_ENDPROC
16603 -END(ignore_sysret)
16604 +ENDPROC(ignore_sysret)
16605
16606 /*
16607 * End of kprobes section
16608 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16609 index c9a281f..ce2f317 100644
16610 --- a/arch/x86/kernel/ftrace.c
16611 +++ b/arch/x86/kernel/ftrace.c
16612 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16613 static const void *mod_code_newcode; /* holds the text to write to the IP */
16614
16615 static unsigned nmi_wait_count;
16616 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16617 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16618
16619 int ftrace_arch_read_dyn_info(char *buf, int size)
16620 {
16621 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16622
16623 r = snprintf(buf, size, "%u %u",
16624 nmi_wait_count,
16625 - atomic_read(&nmi_update_count));
16626 + atomic_read_unchecked(&nmi_update_count));
16627 return r;
16628 }
16629
16630 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16631
16632 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16633 smp_rmb();
16634 + pax_open_kernel();
16635 ftrace_mod_code();
16636 - atomic_inc(&nmi_update_count);
16637 + pax_close_kernel();
16638 + atomic_inc_unchecked(&nmi_update_count);
16639 }
16640 /* Must have previous changes seen before executions */
16641 smp_mb();
16642 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16643 {
16644 unsigned char replaced[MCOUNT_INSN_SIZE];
16645
16646 + ip = ktla_ktva(ip);
16647 +
16648 /*
16649 * Note: Due to modules and __init, code can
16650 * disappear and change, we need to protect against faulting
16651 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16652 unsigned char old[MCOUNT_INSN_SIZE], *new;
16653 int ret;
16654
16655 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16656 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16657 new = ftrace_call_replace(ip, (unsigned long)func);
16658 ret = ftrace_modify_code(ip, old, new);
16659
16660 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16661 {
16662 unsigned char code[MCOUNT_INSN_SIZE];
16663
16664 + ip = ktla_ktva(ip);
16665 +
16666 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16667 return -EFAULT;
16668
16669 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16670 index 51ff186..9e77418 100644
16671 --- a/arch/x86/kernel/head32.c
16672 +++ b/arch/x86/kernel/head32.c
16673 @@ -19,6 +19,7 @@
16674 #include <asm/io_apic.h>
16675 #include <asm/bios_ebda.h>
16676 #include <asm/tlbflush.h>
16677 +#include <asm/boot.h>
16678
16679 static void __init i386_default_early_setup(void)
16680 {
16681 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16682
16683 void __init i386_start_kernel(void)
16684 {
16685 - memblock_reserve(__pa_symbol(&_text),
16686 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16687 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16688
16689 #ifdef CONFIG_BLK_DEV_INITRD
16690 /* Reserve INITRD */
16691 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16692 index ce0be7c..c41476e 100644
16693 --- a/arch/x86/kernel/head_32.S
16694 +++ b/arch/x86/kernel/head_32.S
16695 @@ -25,6 +25,12 @@
16696 /* Physical address */
16697 #define pa(X) ((X) - __PAGE_OFFSET)
16698
16699 +#ifdef CONFIG_PAX_KERNEXEC
16700 +#define ta(X) (X)
16701 +#else
16702 +#define ta(X) ((X) - __PAGE_OFFSET)
16703 +#endif
16704 +
16705 /*
16706 * References to members of the new_cpu_data structure.
16707 */
16708 @@ -54,11 +60,7 @@
16709 * and small than max_low_pfn, otherwise will waste some page table entries
16710 */
16711
16712 -#if PTRS_PER_PMD > 1
16713 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16714 -#else
16715 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16716 -#endif
16717 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16718
16719 /* Number of possible pages in the lowmem region */
16720 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16721 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16722 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16723
16724 /*
16725 + * Real beginning of normal "text" segment
16726 + */
16727 +ENTRY(stext)
16728 +ENTRY(_stext)
16729 +
16730 +/*
16731 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16732 * %esi points to the real-mode code as a 32-bit pointer.
16733 * CS and DS must be 4 GB flat segments, but we don't depend on
16734 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16735 * can.
16736 */
16737 __HEAD
16738 +
16739 +#ifdef CONFIG_PAX_KERNEXEC
16740 + jmp startup_32
16741 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16742 +.fill PAGE_SIZE-5,1,0xcc
16743 +#endif
16744 +
16745 ENTRY(startup_32)
16746 movl pa(stack_start),%ecx
16747
16748 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16749 2:
16750 leal -__PAGE_OFFSET(%ecx),%esp
16751
16752 +#ifdef CONFIG_SMP
16753 + movl $pa(cpu_gdt_table),%edi
16754 + movl $__per_cpu_load,%eax
16755 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16756 + rorl $16,%eax
16757 + movb %al,__KERNEL_PERCPU + 4(%edi)
16758 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16759 + movl $__per_cpu_end - 1,%eax
16760 + subl $__per_cpu_start,%eax
16761 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16762 +#endif
16763 +
16764 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16765 + movl $NR_CPUS,%ecx
16766 + movl $pa(cpu_gdt_table),%edi
16767 +1:
16768 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16769 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16770 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16771 + addl $PAGE_SIZE_asm,%edi
16772 + loop 1b
16773 +#endif
16774 +
16775 +#ifdef CONFIG_PAX_KERNEXEC
16776 + movl $pa(boot_gdt),%edi
16777 + movl $__LOAD_PHYSICAL_ADDR,%eax
16778 + movw %ax,__BOOT_CS + 2(%edi)
16779 + rorl $16,%eax
16780 + movb %al,__BOOT_CS + 4(%edi)
16781 + movb %ah,__BOOT_CS + 7(%edi)
16782 + rorl $16,%eax
16783 +
16784 + ljmp $(__BOOT_CS),$1f
16785 +1:
16786 +
16787 + movl $NR_CPUS,%ecx
16788 + movl $pa(cpu_gdt_table),%edi
16789 + addl $__PAGE_OFFSET,%eax
16790 +1:
16791 + movw %ax,__KERNEL_CS + 2(%edi)
16792 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16793 + rorl $16,%eax
16794 + movb %al,__KERNEL_CS + 4(%edi)
16795 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16796 + movb %ah,__KERNEL_CS + 7(%edi)
16797 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16798 + rorl $16,%eax
16799 + addl $PAGE_SIZE_asm,%edi
16800 + loop 1b
16801 +#endif
16802 +
16803 /*
16804 * Clear BSS first so that there are no surprises...
16805 */
16806 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16807 movl %eax, pa(max_pfn_mapped)
16808
16809 /* Do early initialization of the fixmap area */
16810 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16811 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16812 +#ifdef CONFIG_COMPAT_VDSO
16813 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16814 +#else
16815 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16816 +#endif
16817 #else /* Not PAE */
16818
16819 page_pde_offset = (__PAGE_OFFSET >> 20);
16820 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16821 movl %eax, pa(max_pfn_mapped)
16822
16823 /* Do early initialization of the fixmap area */
16824 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16825 - movl %eax,pa(initial_page_table+0xffc)
16826 +#ifdef CONFIG_COMPAT_VDSO
16827 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16828 +#else
16829 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16830 +#endif
16831 #endif
16832
16833 #ifdef CONFIG_PARAVIRT
16834 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16835 cmpl $num_subarch_entries, %eax
16836 jae bad_subarch
16837
16838 - movl pa(subarch_entries)(,%eax,4), %eax
16839 - subl $__PAGE_OFFSET, %eax
16840 - jmp *%eax
16841 + jmp *pa(subarch_entries)(,%eax,4)
16842
16843 bad_subarch:
16844 WEAK(lguest_entry)
16845 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16846 __INITDATA
16847
16848 subarch_entries:
16849 - .long default_entry /* normal x86/PC */
16850 - .long lguest_entry /* lguest hypervisor */
16851 - .long xen_entry /* Xen hypervisor */
16852 - .long default_entry /* Moorestown MID */
16853 + .long ta(default_entry) /* normal x86/PC */
16854 + .long ta(lguest_entry) /* lguest hypervisor */
16855 + .long ta(xen_entry) /* Xen hypervisor */
16856 + .long ta(default_entry) /* Moorestown MID */
16857 num_subarch_entries = (. - subarch_entries) / 4
16858 .previous
16859 #else
16860 @@ -312,6 +382,7 @@ default_entry:
16861 orl %edx,%eax
16862 movl %eax,%cr4
16863
16864 +#ifdef CONFIG_X86_PAE
16865 testb $X86_CR4_PAE, %al # check if PAE is enabled
16866 jz 6f
16867
16868 @@ -340,6 +411,9 @@ default_entry:
16869 /* Make changes effective */
16870 wrmsr
16871
16872 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16873 +#endif
16874 +
16875 6:
16876
16877 /*
16878 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16879 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16880 movl %eax,%ss # after changing gdt.
16881
16882 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16883 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16884 movl %eax,%ds
16885 movl %eax,%es
16886
16887 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16888 */
16889 cmpb $0,ready
16890 jne 1f
16891 - movl $gdt_page,%eax
16892 + movl $cpu_gdt_table,%eax
16893 movl $stack_canary,%ecx
16894 +#ifdef CONFIG_SMP
16895 + addl $__per_cpu_load,%ecx
16896 +#endif
16897 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16898 shrl $16, %ecx
16899 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16900 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16901 1:
16902 -#endif
16903 movl $(__KERNEL_STACK_CANARY),%eax
16904 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16905 + movl $(__USER_DS),%eax
16906 +#else
16907 + xorl %eax,%eax
16908 +#endif
16909 movl %eax,%gs
16910
16911 xorl %eax,%eax # Clear LDT
16912 @@ -558,22 +639,22 @@ early_page_fault:
16913 jmp early_fault
16914
16915 early_fault:
16916 - cld
16917 #ifdef CONFIG_PRINTK
16918 + cmpl $1,%ss:early_recursion_flag
16919 + je hlt_loop
16920 + incl %ss:early_recursion_flag
16921 + cld
16922 pusha
16923 movl $(__KERNEL_DS),%eax
16924 movl %eax,%ds
16925 movl %eax,%es
16926 - cmpl $2,early_recursion_flag
16927 - je hlt_loop
16928 - incl early_recursion_flag
16929 movl %cr2,%eax
16930 pushl %eax
16931 pushl %edx /* trapno */
16932 pushl $fault_msg
16933 call printk
16934 +; call dump_stack
16935 #endif
16936 - call dump_stack
16937 hlt_loop:
16938 hlt
16939 jmp hlt_loop
16940 @@ -581,8 +662,11 @@ hlt_loop:
16941 /* This is the default interrupt "handler" :-) */
16942 ALIGN
16943 ignore_int:
16944 - cld
16945 #ifdef CONFIG_PRINTK
16946 + cmpl $2,%ss:early_recursion_flag
16947 + je hlt_loop
16948 + incl %ss:early_recursion_flag
16949 + cld
16950 pushl %eax
16951 pushl %ecx
16952 pushl %edx
16953 @@ -591,9 +675,6 @@ ignore_int:
16954 movl $(__KERNEL_DS),%eax
16955 movl %eax,%ds
16956 movl %eax,%es
16957 - cmpl $2,early_recursion_flag
16958 - je hlt_loop
16959 - incl early_recursion_flag
16960 pushl 16(%esp)
16961 pushl 24(%esp)
16962 pushl 32(%esp)
16963 @@ -622,29 +703,43 @@ ENTRY(initial_code)
16964 /*
16965 * BSS section
16966 */
16967 -__PAGE_ALIGNED_BSS
16968 - .align PAGE_SIZE
16969 #ifdef CONFIG_X86_PAE
16970 +.section .initial_pg_pmd,"a",@progbits
16971 initial_pg_pmd:
16972 .fill 1024*KPMDS,4,0
16973 #else
16974 +.section .initial_page_table,"a",@progbits
16975 ENTRY(initial_page_table)
16976 .fill 1024,4,0
16977 #endif
16978 +.section .initial_pg_fixmap,"a",@progbits
16979 initial_pg_fixmap:
16980 .fill 1024,4,0
16981 +.section .empty_zero_page,"a",@progbits
16982 ENTRY(empty_zero_page)
16983 .fill 4096,1,0
16984 +.section .swapper_pg_dir,"a",@progbits
16985 ENTRY(swapper_pg_dir)
16986 +#ifdef CONFIG_X86_PAE
16987 + .fill 4,8,0
16988 +#else
16989 .fill 1024,4,0
16990 +#endif
16991 +
16992 +/*
16993 + * The IDT has to be page-aligned to simplify the Pentium
16994 + * F0 0F bug workaround.. We have a special link segment
16995 + * for this.
16996 + */
16997 +.section .idt,"a",@progbits
16998 +ENTRY(idt_table)
16999 + .fill 256,8,0
17000
17001 /*
17002 * This starts the data section.
17003 */
17004 #ifdef CONFIG_X86_PAE
17005 -__PAGE_ALIGNED_DATA
17006 - /* Page-aligned for the benefit of paravirt? */
17007 - .align PAGE_SIZE
17008 +.section .initial_page_table,"a",@progbits
17009 ENTRY(initial_page_table)
17010 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17011 # if KPMDS == 3
17012 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17013 # error "Kernel PMDs should be 1, 2 or 3"
17014 # endif
17015 .align PAGE_SIZE /* needs to be page-sized too */
17016 +
17017 +#ifdef CONFIG_PAX_PER_CPU_PGD
17018 +ENTRY(cpu_pgd)
17019 + .rept NR_CPUS
17020 + .fill 4,8,0
17021 + .endr
17022 +#endif
17023 +
17024 #endif
17025
17026 .data
17027 .balign 4
17028 ENTRY(stack_start)
17029 - .long init_thread_union+THREAD_SIZE
17030 + .long init_thread_union+THREAD_SIZE-8
17031
17032 +ready: .byte 0
17033 +
17034 +.section .rodata,"a",@progbits
17035 early_recursion_flag:
17036 .long 0
17037
17038 -ready: .byte 0
17039 -
17040 int_msg:
17041 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17042
17043 @@ -707,7 +811,7 @@ fault_msg:
17044 .word 0 # 32 bit align gdt_desc.address
17045 boot_gdt_descr:
17046 .word __BOOT_DS+7
17047 - .long boot_gdt - __PAGE_OFFSET
17048 + .long pa(boot_gdt)
17049
17050 .word 0 # 32-bit align idt_desc.address
17051 idt_descr:
17052 @@ -718,7 +822,7 @@ idt_descr:
17053 .word 0 # 32 bit align gdt_desc.address
17054 ENTRY(early_gdt_descr)
17055 .word GDT_ENTRIES*8-1
17056 - .long gdt_page /* Overwritten for secondary CPUs */
17057 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17058
17059 /*
17060 * The boot_gdt must mirror the equivalent in setup.S and is
17061 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17062 .align L1_CACHE_BYTES
17063 ENTRY(boot_gdt)
17064 .fill GDT_ENTRY_BOOT_CS,8,0
17065 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17066 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17067 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17068 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17069 +
17070 + .align PAGE_SIZE_asm
17071 +ENTRY(cpu_gdt_table)
17072 + .rept NR_CPUS
17073 + .quad 0x0000000000000000 /* NULL descriptor */
17074 + .quad 0x0000000000000000 /* 0x0b reserved */
17075 + .quad 0x0000000000000000 /* 0x13 reserved */
17076 + .quad 0x0000000000000000 /* 0x1b reserved */
17077 +
17078 +#ifdef CONFIG_PAX_KERNEXEC
17079 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17080 +#else
17081 + .quad 0x0000000000000000 /* 0x20 unused */
17082 +#endif
17083 +
17084 + .quad 0x0000000000000000 /* 0x28 unused */
17085 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17086 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17087 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17088 + .quad 0x0000000000000000 /* 0x4b reserved */
17089 + .quad 0x0000000000000000 /* 0x53 reserved */
17090 + .quad 0x0000000000000000 /* 0x5b reserved */
17091 +
17092 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17093 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17094 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17095 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17096 +
17097 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17098 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17099 +
17100 + /*
17101 + * Segments used for calling PnP BIOS have byte granularity.
17102 + * The code segments and data segments have fixed 64k limits,
17103 + * the transfer segment sizes are set at run time.
17104 + */
17105 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17106 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17107 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17108 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17109 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17110 +
17111 + /*
17112 + * The APM segments have byte granularity and their bases
17113 + * are set at run time. All have 64k limits.
17114 + */
17115 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17116 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17117 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17118 +
17119 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17120 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17121 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17122 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17123 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17124 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17125 +
17126 + /* Be sure this is zeroed to avoid false validations in Xen */
17127 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17128 + .endr
17129 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17130 index 40f4eb3..6d24d9d 100644
17131 --- a/arch/x86/kernel/head_64.S
17132 +++ b/arch/x86/kernel/head_64.S
17133 @@ -19,6 +19,8 @@
17134 #include <asm/cache.h>
17135 #include <asm/processor-flags.h>
17136 #include <asm/percpu.h>
17137 +#include <asm/cpufeature.h>
17138 +#include <asm/alternative-asm.h>
17139
17140 #ifdef CONFIG_PARAVIRT
17141 #include <asm/asm-offsets.h>
17142 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17143 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17144 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17145 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17146 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17147 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17148 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17149 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17150 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17151 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17152
17153 .text
17154 __HEAD
17155 @@ -85,35 +93,23 @@ startup_64:
17156 */
17157 addq %rbp, init_level4_pgt + 0(%rip)
17158 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17159 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17160 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17161 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17162 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17163
17164 addq %rbp, level3_ident_pgt + 0(%rip)
17165 +#ifndef CONFIG_XEN
17166 + addq %rbp, level3_ident_pgt + 8(%rip)
17167 +#endif
17168
17169 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17170 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17171 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17172 +
17173 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17174 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17175
17176 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17177 -
17178 - /* Add an Identity mapping if I am above 1G */
17179 - leaq _text(%rip), %rdi
17180 - andq $PMD_PAGE_MASK, %rdi
17181 -
17182 - movq %rdi, %rax
17183 - shrq $PUD_SHIFT, %rax
17184 - andq $(PTRS_PER_PUD - 1), %rax
17185 - jz ident_complete
17186 -
17187 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17188 - leaq level3_ident_pgt(%rip), %rbx
17189 - movq %rdx, 0(%rbx, %rax, 8)
17190 -
17191 - movq %rdi, %rax
17192 - shrq $PMD_SHIFT, %rax
17193 - andq $(PTRS_PER_PMD - 1), %rax
17194 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17195 - leaq level2_spare_pgt(%rip), %rbx
17196 - movq %rdx, 0(%rbx, %rax, 8)
17197 -ident_complete:
17198 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17199
17200 /*
17201 * Fixup the kernel text+data virtual addresses. Note that
17202 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17203 * after the boot processor executes this code.
17204 */
17205
17206 - /* Enable PAE mode and PGE */
17207 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17208 + /* Enable PAE mode and PSE/PGE */
17209 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17210 movq %rax, %cr4
17211
17212 /* Setup early boot stage 4 level pagetables. */
17213 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17214 movl $MSR_EFER, %ecx
17215 rdmsr
17216 btsl $_EFER_SCE, %eax /* Enable System Call */
17217 - btl $20,%edi /* No Execute supported? */
17218 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17219 jnc 1f
17220 btsl $_EFER_NX, %eax
17221 + leaq init_level4_pgt(%rip), %rdi
17222 +#ifndef CONFIG_EFI
17223 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17224 +#endif
17225 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17226 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17227 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17228 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17229 1: wrmsr /* Make changes effective */
17230
17231 /* Setup cr0 */
17232 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17233 * jump. In addition we need to ensure %cs is set so we make this
17234 * a far return.
17235 */
17236 + pax_set_fptr_mask
17237 movq initial_code(%rip),%rax
17238 pushq $0 # fake return address to stop unwinder
17239 pushq $__KERNEL_CS # set correct cs
17240 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17241 bad_address:
17242 jmp bad_address
17243
17244 - .section ".init.text","ax"
17245 + __INIT
17246 #ifdef CONFIG_EARLY_PRINTK
17247 .globl early_idt_handlers
17248 early_idt_handlers:
17249 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17250 #endif /* EARLY_PRINTK */
17251 1: hlt
17252 jmp 1b
17253 + .previous
17254
17255 #ifdef CONFIG_EARLY_PRINTK
17256 + __INITDATA
17257 early_recursion_flag:
17258 .long 0
17259 + .previous
17260
17261 + .section .rodata,"a",@progbits
17262 early_idt_msg:
17263 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17264 early_idt_ripmsg:
17265 .asciz "RIP %s\n"
17266 + .previous
17267 #endif /* CONFIG_EARLY_PRINTK */
17268 - .previous
17269
17270 + .section .rodata,"a",@progbits
17271 #define NEXT_PAGE(name) \
17272 .balign PAGE_SIZE; \
17273 ENTRY(name)
17274 @@ -338,7 +348,6 @@ ENTRY(name)
17275 i = i + 1 ; \
17276 .endr
17277
17278 - .data
17279 /*
17280 * This default setting generates an ident mapping at address 0x100000
17281 * and a mapping for the kernel that precisely maps virtual address
17282 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17283 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17284 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17285 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17286 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17287 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17288 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17289 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17290 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17291 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17292 .org init_level4_pgt + L4_START_KERNEL*8, 0
17293 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17294 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17295
17296 +#ifdef CONFIG_PAX_PER_CPU_PGD
17297 +NEXT_PAGE(cpu_pgd)
17298 + .rept NR_CPUS
17299 + .fill 512,8,0
17300 + .endr
17301 +#endif
17302 +
17303 NEXT_PAGE(level3_ident_pgt)
17304 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17305 +#ifdef CONFIG_XEN
17306 .fill 511,8,0
17307 +#else
17308 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17309 + .fill 510,8,0
17310 +#endif
17311 +
17312 +NEXT_PAGE(level3_vmalloc_start_pgt)
17313 + .fill 512,8,0
17314 +
17315 +NEXT_PAGE(level3_vmalloc_end_pgt)
17316 + .fill 512,8,0
17317 +
17318 +NEXT_PAGE(level3_vmemmap_pgt)
17319 + .fill L3_VMEMMAP_START,8,0
17320 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17321
17322 NEXT_PAGE(level3_kernel_pgt)
17323 .fill L3_START_KERNEL,8,0
17324 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17325 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17326 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17327
17328 +NEXT_PAGE(level2_vmemmap_pgt)
17329 + .fill 512,8,0
17330 +
17331 NEXT_PAGE(level2_fixmap_pgt)
17332 - .fill 506,8,0
17333 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17334 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17335 - .fill 5,8,0
17336 + .fill 507,8,0
17337 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17338 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17339 + .fill 4,8,0
17340
17341 -NEXT_PAGE(level1_fixmap_pgt)
17342 +NEXT_PAGE(level1_vsyscall_pgt)
17343 .fill 512,8,0
17344
17345 -NEXT_PAGE(level2_ident_pgt)
17346 - /* Since I easily can, map the first 1G.
17347 + /* Since I easily can, map the first 2G.
17348 * Don't set NX because code runs from these pages.
17349 */
17350 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17351 +NEXT_PAGE(level2_ident_pgt)
17352 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17353
17354 NEXT_PAGE(level2_kernel_pgt)
17355 /*
17356 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17357 * If you want to increase this then increase MODULES_VADDR
17358 * too.)
17359 */
17360 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17361 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17362 -
17363 -NEXT_PAGE(level2_spare_pgt)
17364 - .fill 512, 8, 0
17365 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17366
17367 #undef PMDS
17368 #undef NEXT_PAGE
17369
17370 - .data
17371 + .align PAGE_SIZE
17372 +ENTRY(cpu_gdt_table)
17373 + .rept NR_CPUS
17374 + .quad 0x0000000000000000 /* NULL descriptor */
17375 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17376 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17377 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17378 + .quad 0x00cffb000000ffff /* __USER32_CS */
17379 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17380 + .quad 0x00affb000000ffff /* __USER_CS */
17381 +
17382 +#ifdef CONFIG_PAX_KERNEXEC
17383 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17384 +#else
17385 + .quad 0x0 /* unused */
17386 +#endif
17387 +
17388 + .quad 0,0 /* TSS */
17389 + .quad 0,0 /* LDT */
17390 + .quad 0,0,0 /* three TLS descriptors */
17391 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17392 + /* asm/segment.h:GDT_ENTRIES must match this */
17393 +
17394 + /* zero the remaining page */
17395 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17396 + .endr
17397 +
17398 .align 16
17399 .globl early_gdt_descr
17400 early_gdt_descr:
17401 .word GDT_ENTRIES*8-1
17402 early_gdt_descr_base:
17403 - .quad INIT_PER_CPU_VAR(gdt_page)
17404 + .quad cpu_gdt_table
17405
17406 ENTRY(phys_base)
17407 /* This must match the first entry in level2_kernel_pgt */
17408 .quad 0x0000000000000000
17409
17410 #include "../../x86/xen/xen-head.S"
17411 -
17412 - .section .bss, "aw", @nobits
17413 +
17414 + .section .rodata,"a",@progbits
17415 .align L1_CACHE_BYTES
17416 ENTRY(idt_table)
17417 - .skip IDT_ENTRIES * 16
17418 + .fill 512,8,0
17419
17420 .align L1_CACHE_BYTES
17421 ENTRY(nmi_idt_table)
17422 - .skip IDT_ENTRIES * 16
17423 + .fill 512,8,0
17424
17425 __PAGE_ALIGNED_BSS
17426 .align PAGE_SIZE
17427 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17428 index 9c3bd4a..e1d9b35 100644
17429 --- a/arch/x86/kernel/i386_ksyms_32.c
17430 +++ b/arch/x86/kernel/i386_ksyms_32.c
17431 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17432 EXPORT_SYMBOL(cmpxchg8b_emu);
17433 #endif
17434
17435 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17436 +
17437 /* Networking helper routines. */
17438 EXPORT_SYMBOL(csum_partial_copy_generic);
17439 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17440 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17441
17442 EXPORT_SYMBOL(__get_user_1);
17443 EXPORT_SYMBOL(__get_user_2);
17444 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17445
17446 EXPORT_SYMBOL(csum_partial);
17447 EXPORT_SYMBOL(empty_zero_page);
17448 +
17449 +#ifdef CONFIG_PAX_KERNEXEC
17450 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17451 +#endif
17452 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17453 index 2d6e649..df6e1af 100644
17454 --- a/arch/x86/kernel/i387.c
17455 +++ b/arch/x86/kernel/i387.c
17456 @@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17457 static inline bool interrupted_user_mode(void)
17458 {
17459 struct pt_regs *regs = get_irq_regs();
17460 - return regs && user_mode_vm(regs);
17461 + return regs && user_mode(regs);
17462 }
17463
17464 /*
17465 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17466 index 36d1853..bf25736 100644
17467 --- a/arch/x86/kernel/i8259.c
17468 +++ b/arch/x86/kernel/i8259.c
17469 @@ -209,7 +209,7 @@ spurious_8259A_irq:
17470 "spurious 8259A interrupt: IRQ%d.\n", irq);
17471 spurious_irq_mask |= irqmask;
17472 }
17473 - atomic_inc(&irq_err_count);
17474 + atomic_inc_unchecked(&irq_err_count);
17475 /*
17476 * Theoretically we do not have to handle this IRQ,
17477 * but in Linux this does not cause problems and is
17478 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17479 index 43e9ccf..44ccf6f 100644
17480 --- a/arch/x86/kernel/init_task.c
17481 +++ b/arch/x86/kernel/init_task.c
17482 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17483 * way process stacks are handled. This is done by having a special
17484 * "init_task" linker map entry..
17485 */
17486 -union thread_union init_thread_union __init_task_data =
17487 - { INIT_THREAD_INFO(init_task) };
17488 +union thread_union init_thread_union __init_task_data;
17489
17490 /*
17491 * Initial task structure.
17492 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17493 * section. Since TSS's are completely CPU-local, we want them
17494 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17495 */
17496 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17497 -
17498 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17499 +EXPORT_SYMBOL(init_tss);
17500 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17501 index 8c96897..be66bfa 100644
17502 --- a/arch/x86/kernel/ioport.c
17503 +++ b/arch/x86/kernel/ioport.c
17504 @@ -6,6 +6,7 @@
17505 #include <linux/sched.h>
17506 #include <linux/kernel.h>
17507 #include <linux/capability.h>
17508 +#include <linux/security.h>
17509 #include <linux/errno.h>
17510 #include <linux/types.h>
17511 #include <linux/ioport.h>
17512 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17513
17514 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17515 return -EINVAL;
17516 +#ifdef CONFIG_GRKERNSEC_IO
17517 + if (turn_on && grsec_disable_privio) {
17518 + gr_handle_ioperm();
17519 + return -EPERM;
17520 + }
17521 +#endif
17522 if (turn_on && !capable(CAP_SYS_RAWIO))
17523 return -EPERM;
17524
17525 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17526 * because the ->io_bitmap_max value must match the bitmap
17527 * contents:
17528 */
17529 - tss = &per_cpu(init_tss, get_cpu());
17530 + tss = init_tss + get_cpu();
17531
17532 if (turn_on)
17533 bitmap_clear(t->io_bitmap_ptr, from, num);
17534 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17535 return -EINVAL;
17536 /* Trying to gain more privileges? */
17537 if (level > old) {
17538 +#ifdef CONFIG_GRKERNSEC_IO
17539 + if (grsec_disable_privio) {
17540 + gr_handle_iopl();
17541 + return -EPERM;
17542 + }
17543 +#endif
17544 if (!capable(CAP_SYS_RAWIO))
17545 return -EPERM;
17546 }
17547 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17548 index 3dafc60..aa8e9c4 100644
17549 --- a/arch/x86/kernel/irq.c
17550 +++ b/arch/x86/kernel/irq.c
17551 @@ -18,7 +18,7 @@
17552 #include <asm/mce.h>
17553 #include <asm/hw_irq.h>
17554
17555 -atomic_t irq_err_count;
17556 +atomic_unchecked_t irq_err_count;
17557
17558 /* Function pointer for generic interrupt vector handling */
17559 void (*x86_platform_ipi_callback)(void) = NULL;
17560 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17561 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17562 seq_printf(p, " Machine check polls\n");
17563 #endif
17564 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17565 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17566 #if defined(CONFIG_X86_IO_APIC)
17567 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17568 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17569 #endif
17570 return 0;
17571 }
17572 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17573
17574 u64 arch_irq_stat(void)
17575 {
17576 - u64 sum = atomic_read(&irq_err_count);
17577 + u64 sum = atomic_read_unchecked(&irq_err_count);
17578
17579 #ifdef CONFIG_X86_IO_APIC
17580 - sum += atomic_read(&irq_mis_count);
17581 + sum += atomic_read_unchecked(&irq_mis_count);
17582 #endif
17583 return sum;
17584 }
17585 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17586 index 58b7f27..e112d08 100644
17587 --- a/arch/x86/kernel/irq_32.c
17588 +++ b/arch/x86/kernel/irq_32.c
17589 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17590 __asm__ __volatile__("andl %%esp,%0" :
17591 "=r" (sp) : "0" (THREAD_SIZE - 1));
17592
17593 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17594 + return sp < STACK_WARN;
17595 }
17596
17597 static void print_stack_overflow(void)
17598 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17599 * per-CPU IRQ handling contexts (thread information and stack)
17600 */
17601 union irq_ctx {
17602 - struct thread_info tinfo;
17603 - u32 stack[THREAD_SIZE/sizeof(u32)];
17604 + unsigned long previous_esp;
17605 + u32 stack[THREAD_SIZE/sizeof(u32)];
17606 } __attribute__((aligned(THREAD_SIZE)));
17607
17608 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17609 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17610 static inline int
17611 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17612 {
17613 - union irq_ctx *curctx, *irqctx;
17614 + union irq_ctx *irqctx;
17615 u32 *isp, arg1, arg2;
17616
17617 - curctx = (union irq_ctx *) current_thread_info();
17618 irqctx = __this_cpu_read(hardirq_ctx);
17619
17620 /*
17621 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17622 * handler) we can't do that and just have to keep using the
17623 * current stack (which is the irq stack already after all)
17624 */
17625 - if (unlikely(curctx == irqctx))
17626 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17627 return 0;
17628
17629 /* build the stack frame on the IRQ stack */
17630 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17631 - irqctx->tinfo.task = curctx->tinfo.task;
17632 - irqctx->tinfo.previous_esp = current_stack_pointer;
17633 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17634 + irqctx->previous_esp = current_stack_pointer;
17635
17636 - /* Copy the preempt_count so that the [soft]irq checks work. */
17637 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17638 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17639 + __set_fs(MAKE_MM_SEG(0));
17640 +#endif
17641
17642 if (unlikely(overflow))
17643 call_on_stack(print_stack_overflow, isp);
17644 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17645 : "0" (irq), "1" (desc), "2" (isp),
17646 "D" (desc->handle_irq)
17647 : "memory", "cc", "ecx");
17648 +
17649 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17650 + __set_fs(current_thread_info()->addr_limit);
17651 +#endif
17652 +
17653 return 1;
17654 }
17655
17656 @@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17657 */
17658 void __cpuinit irq_ctx_init(int cpu)
17659 {
17660 - union irq_ctx *irqctx;
17661 -
17662 if (per_cpu(hardirq_ctx, cpu))
17663 return;
17664
17665 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17666 - THREAD_FLAGS,
17667 - THREAD_ORDER));
17668 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17669 - irqctx->tinfo.cpu = cpu;
17670 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17671 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17672 -
17673 - per_cpu(hardirq_ctx, cpu) = irqctx;
17674 -
17675 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17676 - THREAD_FLAGS,
17677 - THREAD_ORDER));
17678 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17679 - irqctx->tinfo.cpu = cpu;
17680 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17681 -
17682 - per_cpu(softirq_ctx, cpu) = irqctx;
17683 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17684 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17685
17686 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17687 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17688 @@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17689 asmlinkage void do_softirq(void)
17690 {
17691 unsigned long flags;
17692 - struct thread_info *curctx;
17693 union irq_ctx *irqctx;
17694 u32 *isp;
17695
17696 @@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
17697 local_irq_save(flags);
17698
17699 if (local_softirq_pending()) {
17700 - curctx = current_thread_info();
17701 irqctx = __this_cpu_read(softirq_ctx);
17702 - irqctx->tinfo.task = curctx->task;
17703 - irqctx->tinfo.previous_esp = current_stack_pointer;
17704 + irqctx->previous_esp = current_stack_pointer;
17705
17706 /* build the stack frame on the softirq stack */
17707 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17708 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17709 +
17710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17711 + __set_fs(MAKE_MM_SEG(0));
17712 +#endif
17713
17714 call_on_stack(__do_softirq, isp);
17715 +
17716 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17717 + __set_fs(current_thread_info()->addr_limit);
17718 +#endif
17719 +
17720 /*
17721 * Shouldn't happen, we returned above if in_interrupt():
17722 */
17723 @@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17724 if (unlikely(!desc))
17725 return false;
17726
17727 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17728 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17729 if (unlikely(overflow))
17730 print_stack_overflow();
17731 desc->handle_irq(irq, desc);
17732 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17733 index d04d3ec..ea4b374 100644
17734 --- a/arch/x86/kernel/irq_64.c
17735 +++ b/arch/x86/kernel/irq_64.c
17736 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17737 u64 estack_top, estack_bottom;
17738 u64 curbase = (u64)task_stack_page(current);
17739
17740 - if (user_mode_vm(regs))
17741 + if (user_mode(regs))
17742 return;
17743
17744 if (regs->sp >= curbase + sizeof(struct thread_info) +
17745 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17746 index 1d5d31e..ab846ed 100644
17747 --- a/arch/x86/kernel/kdebugfs.c
17748 +++ b/arch/x86/kernel/kdebugfs.c
17749 @@ -28,6 +28,8 @@ struct setup_data_node {
17750 };
17751
17752 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17753 + size_t count, loff_t *ppos) __size_overflow(3);
17754 +static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17755 size_t count, loff_t *ppos)
17756 {
17757 struct setup_data_node *node = file->private_data;
17758 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17759 index 8bfb614..2b3b35f 100644
17760 --- a/arch/x86/kernel/kgdb.c
17761 +++ b/arch/x86/kernel/kgdb.c
17762 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17763 #ifdef CONFIG_X86_32
17764 switch (regno) {
17765 case GDB_SS:
17766 - if (!user_mode_vm(regs))
17767 + if (!user_mode(regs))
17768 *(unsigned long *)mem = __KERNEL_DS;
17769 break;
17770 case GDB_SP:
17771 - if (!user_mode_vm(regs))
17772 + if (!user_mode(regs))
17773 *(unsigned long *)mem = kernel_stack_pointer(regs);
17774 break;
17775 case GDB_GS:
17776 @@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17777 case 'k':
17778 /* clear the trace bit */
17779 linux_regs->flags &= ~X86_EFLAGS_TF;
17780 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17781 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17782
17783 /* set the trace bit if we're stepping */
17784 if (remcomInBuffer[0] == 's') {
17785 linux_regs->flags |= X86_EFLAGS_TF;
17786 - atomic_set(&kgdb_cpu_doing_single_step,
17787 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17788 raw_smp_processor_id());
17789 }
17790
17791 @@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17792
17793 switch (cmd) {
17794 case DIE_DEBUG:
17795 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17796 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17797 if (user_mode(regs))
17798 return single_step_cont(regs, args);
17799 break;
17800 diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17801 index c5e410e..da6aaf9 100644
17802 --- a/arch/x86/kernel/kprobes-opt.c
17803 +++ b/arch/x86/kernel/kprobes-opt.c
17804 @@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17805 * Verify if the address gap is in 2GB range, because this uses
17806 * a relative jump.
17807 */
17808 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17809 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17810 if (abs(rel) > 0x7fffffff)
17811 return -ERANGE;
17812
17813 @@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17814 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17815
17816 /* Set probe function call */
17817 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17818 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17819
17820 /* Set returning jmp instruction at the tail of out-of-line buffer */
17821 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17822 - (u8 *)op->kp.addr + op->optinsn.size);
17823 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17824
17825 flush_icache_range((unsigned long) buf,
17826 (unsigned long) buf + TMPL_END_IDX +
17827 @@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17828 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17829
17830 /* Backup instructions which will be replaced by jump address */
17831 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17832 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17833 RELATIVE_ADDR_SIZE);
17834
17835 insn_buf[0] = RELATIVEJUMP_OPCODE;
17836 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17837 index e213fc8..d783ba4 100644
17838 --- a/arch/x86/kernel/kprobes.c
17839 +++ b/arch/x86/kernel/kprobes.c
17840 @@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17841 } __attribute__((packed)) *insn;
17842
17843 insn = (struct __arch_relative_insn *)from;
17844 +
17845 + pax_open_kernel();
17846 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17847 insn->op = op;
17848 + pax_close_kernel();
17849 }
17850
17851 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17852 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17853 kprobe_opcode_t opcode;
17854 kprobe_opcode_t *orig_opcodes = opcodes;
17855
17856 - if (search_exception_tables((unsigned long)opcodes))
17857 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17858 return 0; /* Page fault may occur on this address. */
17859
17860 retry:
17861 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17862 /* Another subsystem puts a breakpoint, failed to recover */
17863 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17864 return 0;
17865 + pax_open_kernel();
17866 memcpy(dest, insn.kaddr, insn.length);
17867 + pax_close_kernel();
17868
17869 #ifdef CONFIG_X86_64
17870 if (insn_rip_relative(&insn)) {
17871 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17872 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17873 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17874 disp = (u8 *) dest + insn_offset_displacement(&insn);
17875 + pax_open_kernel();
17876 *(s32 *) disp = (s32) newdisp;
17877 + pax_close_kernel();
17878 }
17879 #endif
17880 return insn.length;
17881 @@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17882 * nor set current_kprobe, because it doesn't use single
17883 * stepping.
17884 */
17885 - regs->ip = (unsigned long)p->ainsn.insn;
17886 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17887 preempt_enable_no_resched();
17888 return;
17889 }
17890 @@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17891 if (p->opcode == BREAKPOINT_INSTRUCTION)
17892 regs->ip = (unsigned long)p->addr;
17893 else
17894 - regs->ip = (unsigned long)p->ainsn.insn;
17895 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17896 }
17897
17898 /*
17899 @@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17900 setup_singlestep(p, regs, kcb, 0);
17901 return 1;
17902 }
17903 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17904 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17905 /*
17906 * The breakpoint instruction was removed right
17907 * after we hit it. Another cpu has removed
17908 @@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17909 " movq %rax, 152(%rsp)\n"
17910 RESTORE_REGS_STRING
17911 " popfq\n"
17912 +#ifdef KERNEXEC_PLUGIN
17913 + " btsq $63,(%rsp)\n"
17914 +#endif
17915 #else
17916 " pushf\n"
17917 SAVE_REGS_STRING
17918 @@ -765,7 +775,7 @@ static void __kprobes
17919 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17920 {
17921 unsigned long *tos = stack_addr(regs);
17922 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17923 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17924 unsigned long orig_ip = (unsigned long)p->addr;
17925 kprobe_opcode_t *insn = p->ainsn.insn;
17926
17927 @@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
17928 struct die_args *args = data;
17929 int ret = NOTIFY_DONE;
17930
17931 - if (args->regs && user_mode_vm(args->regs))
17932 + if (args->regs && user_mode(args->regs))
17933 return ret;
17934
17935 switch (val) {
17936 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17937 index ebc9873..1b9724b 100644
17938 --- a/arch/x86/kernel/ldt.c
17939 +++ b/arch/x86/kernel/ldt.c
17940 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17941 if (reload) {
17942 #ifdef CONFIG_SMP
17943 preempt_disable();
17944 - load_LDT(pc);
17945 + load_LDT_nolock(pc);
17946 if (!cpumask_equal(mm_cpumask(current->mm),
17947 cpumask_of(smp_processor_id())))
17948 smp_call_function(flush_ldt, current->mm, 1);
17949 preempt_enable();
17950 #else
17951 - load_LDT(pc);
17952 + load_LDT_nolock(pc);
17953 #endif
17954 }
17955 if (oldsize) {
17956 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17957 return err;
17958
17959 for (i = 0; i < old->size; i++)
17960 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17961 + write_ldt_entry(new->ldt, i, old->ldt + i);
17962 return 0;
17963 }
17964
17965 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17966 retval = copy_ldt(&mm->context, &old_mm->context);
17967 mutex_unlock(&old_mm->context.lock);
17968 }
17969 +
17970 + if (tsk == current) {
17971 + mm->context.vdso = 0;
17972 +
17973 +#ifdef CONFIG_X86_32
17974 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17975 + mm->context.user_cs_base = 0UL;
17976 + mm->context.user_cs_limit = ~0UL;
17977 +
17978 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17979 + cpus_clear(mm->context.cpu_user_cs_mask);
17980 +#endif
17981 +
17982 +#endif
17983 +#endif
17984 +
17985 + }
17986 +
17987 return retval;
17988 }
17989
17990 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17991 }
17992 }
17993
17994 +#ifdef CONFIG_PAX_SEGMEXEC
17995 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17996 + error = -EINVAL;
17997 + goto out_unlock;
17998 + }
17999 +#endif
18000 +
18001 fill_ldt(&ldt, &ldt_info);
18002 if (oldmode)
18003 ldt.avl = 0;
18004 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18005 index 5b19e4d..6476a76 100644
18006 --- a/arch/x86/kernel/machine_kexec_32.c
18007 +++ b/arch/x86/kernel/machine_kexec_32.c
18008 @@ -26,7 +26,7 @@
18009 #include <asm/cacheflush.h>
18010 #include <asm/debugreg.h>
18011
18012 -static void set_idt(void *newidt, __u16 limit)
18013 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18014 {
18015 struct desc_ptr curidt;
18016
18017 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18018 }
18019
18020
18021 -static void set_gdt(void *newgdt, __u16 limit)
18022 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18023 {
18024 struct desc_ptr curgdt;
18025
18026 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18027 }
18028
18029 control_page = page_address(image->control_code_page);
18030 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18031 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18032
18033 relocate_kernel_ptr = control_page;
18034 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18035 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18036 index 0327e2b..e43737b 100644
18037 --- a/arch/x86/kernel/microcode_intel.c
18038 +++ b/arch/x86/kernel/microcode_intel.c
18039 @@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18040
18041 static int get_ucode_user(void *to, const void *from, size_t n)
18042 {
18043 - return copy_from_user(to, from, n);
18044 + return copy_from_user(to, (const void __force_user *)from, n);
18045 }
18046
18047 static enum ucode_state
18048 request_microcode_user(int cpu, const void __user *buf, size_t size)
18049 {
18050 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18051 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18052 }
18053
18054 static void microcode_fini_cpu(int cpu)
18055 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18056 index f21fd94..61565cd 100644
18057 --- a/arch/x86/kernel/module.c
18058 +++ b/arch/x86/kernel/module.c
18059 @@ -35,15 +35,60 @@
18060 #define DEBUGP(fmt...)
18061 #endif
18062
18063 -void *module_alloc(unsigned long size)
18064 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18065 {
18066 - if (PAGE_ALIGN(size) > MODULES_LEN)
18067 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18068 return NULL;
18069 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18070 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18071 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18072 -1, __builtin_return_address(0));
18073 }
18074
18075 +void *module_alloc(unsigned long size)
18076 +{
18077 +
18078 +#ifdef CONFIG_PAX_KERNEXEC
18079 + return __module_alloc(size, PAGE_KERNEL);
18080 +#else
18081 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18082 +#endif
18083 +
18084 +}
18085 +
18086 +#ifdef CONFIG_PAX_KERNEXEC
18087 +#ifdef CONFIG_X86_32
18088 +void *module_alloc_exec(unsigned long size)
18089 +{
18090 + struct vm_struct *area;
18091 +
18092 + if (size == 0)
18093 + return NULL;
18094 +
18095 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18096 + return area ? area->addr : NULL;
18097 +}
18098 +EXPORT_SYMBOL(module_alloc_exec);
18099 +
18100 +void module_free_exec(struct module *mod, void *module_region)
18101 +{
18102 + vunmap(module_region);
18103 +}
18104 +EXPORT_SYMBOL(module_free_exec);
18105 +#else
18106 +void module_free_exec(struct module *mod, void *module_region)
18107 +{
18108 + module_free(mod, module_region);
18109 +}
18110 +EXPORT_SYMBOL(module_free_exec);
18111 +
18112 +void *module_alloc_exec(unsigned long size)
18113 +{
18114 + return __module_alloc(size, PAGE_KERNEL_RX);
18115 +}
18116 +EXPORT_SYMBOL(module_alloc_exec);
18117 +#endif
18118 +#endif
18119 +
18120 #ifdef CONFIG_X86_32
18121 int apply_relocate(Elf32_Shdr *sechdrs,
18122 const char *strtab,
18123 @@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18124 unsigned int i;
18125 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18126 Elf32_Sym *sym;
18127 - uint32_t *location;
18128 + uint32_t *plocation, location;
18129
18130 DEBUGP("Applying relocate section %u to %u\n", relsec,
18131 sechdrs[relsec].sh_info);
18132 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18133 /* This is where to make the change */
18134 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18135 - + rel[i].r_offset;
18136 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18137 + location = (uint32_t)plocation;
18138 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18139 + plocation = ktla_ktva((void *)plocation);
18140 /* This is the symbol it is referring to. Note that all
18141 undefined symbols have been resolved. */
18142 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18143 @@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18144 switch (ELF32_R_TYPE(rel[i].r_info)) {
18145 case R_386_32:
18146 /* We add the value into the location given */
18147 - *location += sym->st_value;
18148 + pax_open_kernel();
18149 + *plocation += sym->st_value;
18150 + pax_close_kernel();
18151 break;
18152 case R_386_PC32:
18153 /* Add the value, subtract its postition */
18154 - *location += sym->st_value - (uint32_t)location;
18155 + pax_open_kernel();
18156 + *plocation += sym->st_value - location;
18157 + pax_close_kernel();
18158 break;
18159 default:
18160 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18161 @@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18162 case R_X86_64_NONE:
18163 break;
18164 case R_X86_64_64:
18165 + pax_open_kernel();
18166 *(u64 *)loc = val;
18167 + pax_close_kernel();
18168 break;
18169 case R_X86_64_32:
18170 + pax_open_kernel();
18171 *(u32 *)loc = val;
18172 + pax_close_kernel();
18173 if (val != *(u32 *)loc)
18174 goto overflow;
18175 break;
18176 case R_X86_64_32S:
18177 + pax_open_kernel();
18178 *(s32 *)loc = val;
18179 + pax_close_kernel();
18180 if ((s64)val != *(s32 *)loc)
18181 goto overflow;
18182 break;
18183 case R_X86_64_PC32:
18184 val -= (u64)loc;
18185 + pax_open_kernel();
18186 *(u32 *)loc = val;
18187 + pax_close_kernel();
18188 +
18189 #if 0
18190 if ((s64)val != *(s32 *)loc)
18191 goto overflow;
18192 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18193 index 47acaf3..ec48ab6 100644
18194 --- a/arch/x86/kernel/nmi.c
18195 +++ b/arch/x86/kernel/nmi.c
18196 @@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
18197 dotraplinkage notrace __kprobes void
18198 do_nmi(struct pt_regs *regs, long error_code)
18199 {
18200 +
18201 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18202 + if (!user_mode(regs)) {
18203 + unsigned long cs = regs->cs & 0xFFFF;
18204 + unsigned long ip = ktva_ktla(regs->ip);
18205 +
18206 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18207 + regs->ip = ip;
18208 + }
18209 +#endif
18210 +
18211 nmi_nesting_preprocess(regs);
18212
18213 nmi_enter();
18214 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18215 index 676b8c7..870ba04 100644
18216 --- a/arch/x86/kernel/paravirt-spinlocks.c
18217 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18218 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18219 arch_spin_lock(lock);
18220 }
18221
18222 -struct pv_lock_ops pv_lock_ops = {
18223 +struct pv_lock_ops pv_lock_ops __read_only = {
18224 #ifdef CONFIG_SMP
18225 .spin_is_locked = __ticket_spin_is_locked,
18226 .spin_is_contended = __ticket_spin_is_contended,
18227 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18228 index ab13760..01218e0 100644
18229 --- a/arch/x86/kernel/paravirt.c
18230 +++ b/arch/x86/kernel/paravirt.c
18231 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18232 {
18233 return x;
18234 }
18235 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18236 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18237 +#endif
18238
18239 void __init default_banner(void)
18240 {
18241 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18242 if (opfunc == NULL)
18243 /* If there's no function, patch it with a ud2a (BUG) */
18244 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18245 - else if (opfunc == _paravirt_nop)
18246 + else if (opfunc == (void *)_paravirt_nop)
18247 /* If the operation is a nop, then nop the callsite */
18248 ret = paravirt_patch_nop();
18249
18250 /* identity functions just return their single argument */
18251 - else if (opfunc == _paravirt_ident_32)
18252 + else if (opfunc == (void *)_paravirt_ident_32)
18253 ret = paravirt_patch_ident_32(insnbuf, len);
18254 - else if (opfunc == _paravirt_ident_64)
18255 + else if (opfunc == (void *)_paravirt_ident_64)
18256 ret = paravirt_patch_ident_64(insnbuf, len);
18257 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18258 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18259 + ret = paravirt_patch_ident_64(insnbuf, len);
18260 +#endif
18261
18262 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18263 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18264 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18265 if (insn_len > len || start == NULL)
18266 insn_len = len;
18267 else
18268 - memcpy(insnbuf, start, insn_len);
18269 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18270
18271 return insn_len;
18272 }
18273 @@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18274 preempt_enable();
18275 }
18276
18277 -struct pv_info pv_info = {
18278 +struct pv_info pv_info __read_only = {
18279 .name = "bare hardware",
18280 .paravirt_enabled = 0,
18281 .kernel_rpl = 0,
18282 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
18283 #endif
18284 };
18285
18286 -struct pv_init_ops pv_init_ops = {
18287 +struct pv_init_ops pv_init_ops __read_only = {
18288 .patch = native_patch,
18289 };
18290
18291 -struct pv_time_ops pv_time_ops = {
18292 +struct pv_time_ops pv_time_ops __read_only = {
18293 .sched_clock = native_sched_clock,
18294 .steal_clock = native_steal_clock,
18295 };
18296
18297 -struct pv_irq_ops pv_irq_ops = {
18298 +struct pv_irq_ops pv_irq_ops __read_only = {
18299 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18300 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18301 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18302 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18303 #endif
18304 };
18305
18306 -struct pv_cpu_ops pv_cpu_ops = {
18307 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18308 .cpuid = native_cpuid,
18309 .get_debugreg = native_get_debugreg,
18310 .set_debugreg = native_set_debugreg,
18311 @@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18312 .end_context_switch = paravirt_nop,
18313 };
18314
18315 -struct pv_apic_ops pv_apic_ops = {
18316 +struct pv_apic_ops pv_apic_ops __read_only = {
18317 #ifdef CONFIG_X86_LOCAL_APIC
18318 .startup_ipi_hook = paravirt_nop,
18319 #endif
18320 };
18321
18322 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18323 +#ifdef CONFIG_X86_32
18324 +#ifdef CONFIG_X86_PAE
18325 +/* 64-bit pagetable entries */
18326 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18327 +#else
18328 /* 32-bit pagetable entries */
18329 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18330 +#endif
18331 #else
18332 /* 64-bit pagetable entries */
18333 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18334 #endif
18335
18336 -struct pv_mmu_ops pv_mmu_ops = {
18337 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18338
18339 .read_cr2 = native_read_cr2,
18340 .write_cr2 = native_write_cr2,
18341 @@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18342 .make_pud = PTE_IDENT,
18343
18344 .set_pgd = native_set_pgd,
18345 + .set_pgd_batched = native_set_pgd_batched,
18346 #endif
18347 #endif /* PAGETABLE_LEVELS >= 3 */
18348
18349 @@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18350 },
18351
18352 .set_fixmap = native_set_fixmap,
18353 +
18354 +#ifdef CONFIG_PAX_KERNEXEC
18355 + .pax_open_kernel = native_pax_open_kernel,
18356 + .pax_close_kernel = native_pax_close_kernel,
18357 +#endif
18358 +
18359 };
18360
18361 EXPORT_SYMBOL_GPL(pv_time_ops);
18362 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18363 index 35ccf75..7a15747 100644
18364 --- a/arch/x86/kernel/pci-iommu_table.c
18365 +++ b/arch/x86/kernel/pci-iommu_table.c
18366 @@ -2,7 +2,7 @@
18367 #include <asm/iommu_table.h>
18368 #include <linux/string.h>
18369 #include <linux/kallsyms.h>
18370 -
18371 +#include <linux/sched.h>
18372
18373 #define DEBUG 1
18374
18375 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18376 index 1d92a5a..7bc8c29 100644
18377 --- a/arch/x86/kernel/process.c
18378 +++ b/arch/x86/kernel/process.c
18379 @@ -69,16 +69,33 @@ void free_thread_xstate(struct task_struct *tsk)
18380
18381 void free_thread_info(struct thread_info *ti)
18382 {
18383 - free_thread_xstate(ti->task);
18384 free_pages((unsigned long)ti, THREAD_ORDER);
18385 }
18386
18387 +static struct kmem_cache *task_struct_cachep;
18388 +
18389 void arch_task_cache_init(void)
18390 {
18391 - task_xstate_cachep =
18392 - kmem_cache_create("task_xstate", xstate_size,
18393 + /* create a slab on which task_structs can be allocated */
18394 + task_struct_cachep =
18395 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18396 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18397 +
18398 + task_xstate_cachep =
18399 + kmem_cache_create("task_xstate", xstate_size,
18400 __alignof__(union thread_xstate),
18401 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18402 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18403 +}
18404 +
18405 +struct task_struct *alloc_task_struct_node(int node)
18406 +{
18407 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18408 +}
18409 +
18410 +void free_task_struct(struct task_struct *task)
18411 +{
18412 + free_thread_xstate(task);
18413 + kmem_cache_free(task_struct_cachep, task);
18414 }
18415
18416 /*
18417 @@ -91,7 +108,7 @@ void exit_thread(void)
18418 unsigned long *bp = t->io_bitmap_ptr;
18419
18420 if (bp) {
18421 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18422 + struct tss_struct *tss = init_tss + get_cpu();
18423
18424 t->io_bitmap_ptr = NULL;
18425 clear_thread_flag(TIF_IO_BITMAP);
18426 @@ -127,7 +144,7 @@ void show_regs_common(void)
18427
18428 printk(KERN_CONT "\n");
18429 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18430 - current->pid, current->comm, print_tainted(),
18431 + task_pid_nr(current), current->comm, print_tainted(),
18432 init_utsname()->release,
18433 (int)strcspn(init_utsname()->version, " "),
18434 init_utsname()->version);
18435 @@ -141,6 +158,9 @@ void flush_thread(void)
18436 {
18437 struct task_struct *tsk = current;
18438
18439 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18440 + loadsegment(gs, 0);
18441 +#endif
18442 flush_ptrace_hw_breakpoint(tsk);
18443 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18444 /*
18445 @@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18446 regs.di = (unsigned long) arg;
18447
18448 #ifdef CONFIG_X86_32
18449 - regs.ds = __USER_DS;
18450 - regs.es = __USER_DS;
18451 + regs.ds = __KERNEL_DS;
18452 + regs.es = __KERNEL_DS;
18453 regs.fs = __KERNEL_PERCPU;
18454 - regs.gs = __KERNEL_STACK_CANARY;
18455 + savesegment(gs, regs.gs);
18456 #else
18457 regs.ss = __KERNEL_DS;
18458 #endif
18459 @@ -392,7 +412,7 @@ static void __exit_idle(void)
18460 void exit_idle(void)
18461 {
18462 /* idle loop has pid 0 */
18463 - if (current->pid)
18464 + if (task_pid_nr(current))
18465 return;
18466 __exit_idle();
18467 }
18468 @@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
18469
18470 return ret;
18471 }
18472 -void stop_this_cpu(void *dummy)
18473 +__noreturn void stop_this_cpu(void *dummy)
18474 {
18475 local_irq_disable();
18476 /*
18477 @@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
18478 }
18479 early_param("idle", idle_setup);
18480
18481 -unsigned long arch_align_stack(unsigned long sp)
18482 +#ifdef CONFIG_PAX_RANDKSTACK
18483 +void pax_randomize_kstack(struct pt_regs *regs)
18484 {
18485 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18486 - sp -= get_random_int() % 8192;
18487 - return sp & ~0xf;
18488 -}
18489 + struct thread_struct *thread = &current->thread;
18490 + unsigned long time;
18491
18492 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18493 -{
18494 - unsigned long range_end = mm->brk + 0x02000000;
18495 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18496 -}
18497 + if (!randomize_va_space)
18498 + return;
18499 +
18500 + if (v8086_mode(regs))
18501 + return;
18502
18503 + rdtscl(time);
18504 +
18505 + /* P4 seems to return a 0 LSB, ignore it */
18506 +#ifdef CONFIG_MPENTIUM4
18507 + time &= 0x3EUL;
18508 + time <<= 2;
18509 +#elif defined(CONFIG_X86_64)
18510 + time &= 0xFUL;
18511 + time <<= 4;
18512 +#else
18513 + time &= 0x1FUL;
18514 + time <<= 3;
18515 +#endif
18516 +
18517 + thread->sp0 ^= time;
18518 + load_sp0(init_tss + smp_processor_id(), thread);
18519 +
18520 +#ifdef CONFIG_X86_64
18521 + percpu_write(kernel_stack, thread->sp0);
18522 +#endif
18523 +}
18524 +#endif
18525 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18526 index ae68473..7b0bb71 100644
18527 --- a/arch/x86/kernel/process_32.c
18528 +++ b/arch/x86/kernel/process_32.c
18529 @@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18530 unsigned long thread_saved_pc(struct task_struct *tsk)
18531 {
18532 return ((unsigned long *)tsk->thread.sp)[3];
18533 +//XXX return tsk->thread.eip;
18534 }
18535
18536 void __show_regs(struct pt_regs *regs, int all)
18537 @@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18538 unsigned long sp;
18539 unsigned short ss, gs;
18540
18541 - if (user_mode_vm(regs)) {
18542 + if (user_mode(regs)) {
18543 sp = regs->sp;
18544 ss = regs->ss & 0xffff;
18545 - gs = get_user_gs(regs);
18546 } else {
18547 sp = kernel_stack_pointer(regs);
18548 savesegment(ss, ss);
18549 - savesegment(gs, gs);
18550 }
18551 + gs = get_user_gs(regs);
18552
18553 show_regs_common();
18554
18555 @@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18556 struct task_struct *tsk;
18557 int err;
18558
18559 - childregs = task_pt_regs(p);
18560 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18561 *childregs = *regs;
18562 childregs->ax = 0;
18563 childregs->sp = sp;
18564
18565 p->thread.sp = (unsigned long) childregs;
18566 p->thread.sp0 = (unsigned long) (childregs+1);
18567 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18568
18569 p->thread.ip = (unsigned long) ret_from_fork;
18570
18571 @@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18572 struct thread_struct *prev = &prev_p->thread,
18573 *next = &next_p->thread;
18574 int cpu = smp_processor_id();
18575 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18576 + struct tss_struct *tss = init_tss + cpu;
18577 fpu_switch_t fpu;
18578
18579 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18580 @@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18581 */
18582 lazy_save_gs(prev->gs);
18583
18584 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18585 + __set_fs(task_thread_info(next_p)->addr_limit);
18586 +#endif
18587 +
18588 /*
18589 * Load the per-thread Thread-Local Storage descriptor.
18590 */
18591 @@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18592 */
18593 arch_end_context_switch(next_p);
18594
18595 + percpu_write(current_task, next_p);
18596 + percpu_write(current_tinfo, &next_p->tinfo);
18597 +
18598 /*
18599 * Restore %gs if needed (which is common)
18600 */
18601 @@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18602
18603 switch_fpu_finish(next_p, fpu);
18604
18605 - percpu_write(current_task, next_p);
18606 -
18607 return prev_p;
18608 }
18609
18610 @@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
18611 } while (count++ < 16);
18612 return 0;
18613 }
18614 -
18615 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18616 index 43d8b48..c45d566 100644
18617 --- a/arch/x86/kernel/process_64.c
18618 +++ b/arch/x86/kernel/process_64.c
18619 @@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18620 struct pt_regs *childregs;
18621 struct task_struct *me = current;
18622
18623 - childregs = ((struct pt_regs *)
18624 - (THREAD_SIZE + task_stack_page(p))) - 1;
18625 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18626 *childregs = *regs;
18627
18628 childregs->ax = 0;
18629 @@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18630 p->thread.sp = (unsigned long) childregs;
18631 p->thread.sp0 = (unsigned long) (childregs+1);
18632 p->thread.usersp = me->thread.usersp;
18633 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18634
18635 set_tsk_thread_flag(p, TIF_FORK);
18636
18637 @@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18638 struct thread_struct *prev = &prev_p->thread;
18639 struct thread_struct *next = &next_p->thread;
18640 int cpu = smp_processor_id();
18641 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18642 + struct tss_struct *tss = init_tss + cpu;
18643 unsigned fsindex, gsindex;
18644 fpu_switch_t fpu;
18645
18646 @@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18647 prev->usersp = percpu_read(old_rsp);
18648 percpu_write(old_rsp, next->usersp);
18649 percpu_write(current_task, next_p);
18650 + percpu_write(current_tinfo, &next_p->tinfo);
18651
18652 - percpu_write(kernel_stack,
18653 - (unsigned long)task_stack_page(next_p) +
18654 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18655 + percpu_write(kernel_stack, next->sp0);
18656
18657 /*
18658 * Now maybe reload the debug registers and handle I/O bitmaps
18659 @@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_struct *p)
18660 if (!p || p == current || p->state == TASK_RUNNING)
18661 return 0;
18662 stack = (unsigned long)task_stack_page(p);
18663 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18664 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18665 return 0;
18666 fp = *(u64 *)(p->thread.sp);
18667 do {
18668 - if (fp < (unsigned long)stack ||
18669 - fp >= (unsigned long)stack+THREAD_SIZE)
18670 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18671 return 0;
18672 ip = *(u64 *)(fp+8);
18673 if (!in_sched_functions(ip))
18674 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18675 index 685845c..c8ac2fd 100644
18676 --- a/arch/x86/kernel/ptrace.c
18677 +++ b/arch/x86/kernel/ptrace.c
18678 @@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18679 unsigned long addr, unsigned long data)
18680 {
18681 int ret;
18682 - unsigned long __user *datap = (unsigned long __user *)data;
18683 + unsigned long __user *datap = (__force unsigned long __user *)data;
18684
18685 switch (request) {
18686 /* read the word at location addr in the USER area. */
18687 @@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18688 if ((int) addr < 0)
18689 return -EIO;
18690 ret = do_get_thread_area(child, addr,
18691 - (struct user_desc __user *)data);
18692 + (__force struct user_desc __user *) data);
18693 break;
18694
18695 case PTRACE_SET_THREAD_AREA:
18696 if ((int) addr < 0)
18697 return -EIO;
18698 ret = do_set_thread_area(child, addr,
18699 - (struct user_desc __user *)data, 0);
18700 + (__force struct user_desc __user *) data, 0);
18701 break;
18702 #endif
18703
18704 @@ -1432,7 +1432,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18705 memset(info, 0, sizeof(*info));
18706 info->si_signo = SIGTRAP;
18707 info->si_code = si_code;
18708 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18709 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18710 }
18711
18712 void user_single_step_siginfo(struct task_struct *tsk,
18713 @@ -1461,6 +1461,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18714 # define IS_IA32 0
18715 #endif
18716
18717 +#ifdef CONFIG_GRKERNSEC_SETXID
18718 +extern void gr_delayed_cred_worker(void);
18719 +#endif
18720 +
18721 /*
18722 * We must return the syscall number to actually look up in the table.
18723 * This can be -1L to skip running any syscall at all.
18724 @@ -1469,6 +1473,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18725 {
18726 long ret = 0;
18727
18728 +#ifdef CONFIG_GRKERNSEC_SETXID
18729 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18730 + gr_delayed_cred_worker();
18731 +#endif
18732 +
18733 /*
18734 * If we stepped into a sysenter/syscall insn, it trapped in
18735 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18736 @@ -1512,6 +1521,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18737 {
18738 bool step;
18739
18740 +#ifdef CONFIG_GRKERNSEC_SETXID
18741 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18742 + gr_delayed_cred_worker();
18743 +#endif
18744 +
18745 audit_syscall_exit(regs);
18746
18747 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18748 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18749 index 42eb330..139955c 100644
18750 --- a/arch/x86/kernel/pvclock.c
18751 +++ b/arch/x86/kernel/pvclock.c
18752 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18753 return pv_tsc_khz;
18754 }
18755
18756 -static atomic64_t last_value = ATOMIC64_INIT(0);
18757 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18758
18759 void pvclock_resume(void)
18760 {
18761 - atomic64_set(&last_value, 0);
18762 + atomic64_set_unchecked(&last_value, 0);
18763 }
18764
18765 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18766 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18767 * updating at the same time, and one of them could be slightly behind,
18768 * making the assumption that last_value always go forward fail to hold.
18769 */
18770 - last = atomic64_read(&last_value);
18771 + last = atomic64_read_unchecked(&last_value);
18772 do {
18773 if (ret < last)
18774 return last;
18775 - last = atomic64_cmpxchg(&last_value, last, ret);
18776 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18777 } while (unlikely(last != ret));
18778
18779 return ret;
18780 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18781 index d840e69..98e9581 100644
18782 --- a/arch/x86/kernel/reboot.c
18783 +++ b/arch/x86/kernel/reboot.c
18784 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18785 EXPORT_SYMBOL(pm_power_off);
18786
18787 static const struct desc_ptr no_idt = {};
18788 -static int reboot_mode;
18789 +static unsigned short reboot_mode;
18790 enum reboot_type reboot_type = BOOT_ACPI;
18791 int reboot_force;
18792
18793 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18794 extern const unsigned char machine_real_restart_asm[];
18795 extern const u64 machine_real_restart_gdt[3];
18796
18797 -void machine_real_restart(unsigned int type)
18798 +__noreturn void machine_real_restart(unsigned int type)
18799 {
18800 void *restart_va;
18801 unsigned long restart_pa;
18802 - void (*restart_lowmem)(unsigned int);
18803 + void (* __noreturn restart_lowmem)(unsigned int);
18804 u64 *lowmem_gdt;
18805
18806 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18807 + struct desc_struct *gdt;
18808 +#endif
18809 +
18810 local_irq_disable();
18811
18812 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18813 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18814 boot)". This seems like a fairly standard thing that gets set by
18815 REBOOT.COM programs, and the previous reset routine did this
18816 too. */
18817 - *((unsigned short *)0x472) = reboot_mode;
18818 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18819
18820 /* Patch the GDT in the low memory trampoline */
18821 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18822
18823 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18824 restart_pa = virt_to_phys(restart_va);
18825 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18826 + restart_lowmem = (void *)restart_pa;
18827
18828 /* GDT[0]: GDT self-pointer */
18829 lowmem_gdt[0] =
18830 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18831 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18832
18833 /* Jump to the identity-mapped low memory code */
18834 +
18835 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18836 + gdt = get_cpu_gdt_table(smp_processor_id());
18837 + pax_open_kernel();
18838 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18839 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18840 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18841 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18842 +#endif
18843 +#ifdef CONFIG_PAX_KERNEXEC
18844 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18845 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18846 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18847 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18848 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18849 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18850 +#endif
18851 + pax_close_kernel();
18852 +#endif
18853 +
18854 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18855 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18856 + unreachable();
18857 +#else
18858 restart_lowmem(type);
18859 +#endif
18860 +
18861 }
18862 #ifdef CONFIG_APM_MODULE
18863 EXPORT_SYMBOL(machine_real_restart);
18864 @@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18865 * try to force a triple fault and then cycle between hitting the keyboard
18866 * controller and doing that
18867 */
18868 -static void native_machine_emergency_restart(void)
18869 +__noreturn static void native_machine_emergency_restart(void)
18870 {
18871 int i;
18872 int attempt = 0;
18873 @@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18874 #endif
18875 }
18876
18877 -static void __machine_emergency_restart(int emergency)
18878 +static __noreturn void __machine_emergency_restart(int emergency)
18879 {
18880 reboot_emergency = emergency;
18881 machine_ops.emergency_restart();
18882 }
18883
18884 -static void native_machine_restart(char *__unused)
18885 +static __noreturn void native_machine_restart(char *__unused)
18886 {
18887 printk("machine restart\n");
18888
18889 @@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18890 __machine_emergency_restart(0);
18891 }
18892
18893 -static void native_machine_halt(void)
18894 +static __noreturn void native_machine_halt(void)
18895 {
18896 /* stop other cpus and apics */
18897 machine_shutdown();
18898 @@ -706,7 +736,7 @@ static void native_machine_halt(void)
18899 stop_this_cpu(NULL);
18900 }
18901
18902 -static void native_machine_power_off(void)
18903 +__noreturn static void native_machine_power_off(void)
18904 {
18905 if (pm_power_off) {
18906 if (!reboot_force)
18907 @@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18908 }
18909 /* a fallback in case there is no PM info available */
18910 tboot_shutdown(TB_SHUTDOWN_HALT);
18911 + unreachable();
18912 }
18913
18914 struct machine_ops machine_ops = {
18915 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18916 index 7a6f3b3..bed145d7 100644
18917 --- a/arch/x86/kernel/relocate_kernel_64.S
18918 +++ b/arch/x86/kernel/relocate_kernel_64.S
18919 @@ -11,6 +11,7 @@
18920 #include <asm/kexec.h>
18921 #include <asm/processor-flags.h>
18922 #include <asm/pgtable_types.h>
18923 +#include <asm/alternative-asm.h>
18924
18925 /*
18926 * Must be relocatable PIC code callable as a C function
18927 @@ -160,13 +161,14 @@ identity_mapped:
18928 xorq %rbp, %rbp
18929 xorq %r8, %r8
18930 xorq %r9, %r9
18931 - xorq %r10, %r9
18932 + xorq %r10, %r10
18933 xorq %r11, %r11
18934 xorq %r12, %r12
18935 xorq %r13, %r13
18936 xorq %r14, %r14
18937 xorq %r15, %r15
18938
18939 + pax_force_retaddr 0, 1
18940 ret
18941
18942 1:
18943 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18944 index 1a29015..712f324 100644
18945 --- a/arch/x86/kernel/setup.c
18946 +++ b/arch/x86/kernel/setup.c
18947 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
18948
18949 switch (data->type) {
18950 case SETUP_E820_EXT:
18951 - parse_e820_ext(data);
18952 + parse_e820_ext((struct setup_data __force_kernel *)data);
18953 break;
18954 case SETUP_DTB:
18955 add_dtb(pa_data);
18956 @@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
18957 * area (640->1Mb) as ram even though it is not.
18958 * take them out.
18959 */
18960 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18961 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18962 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18963 }
18964
18965 @@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
18966
18967 if (!boot_params.hdr.root_flags)
18968 root_mountflags &= ~MS_RDONLY;
18969 - init_mm.start_code = (unsigned long) _text;
18970 - init_mm.end_code = (unsigned long) _etext;
18971 + init_mm.start_code = ktla_ktva((unsigned long) _text);
18972 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
18973 init_mm.end_data = (unsigned long) _edata;
18974 init_mm.brk = _brk_end;
18975
18976 - code_resource.start = virt_to_phys(_text);
18977 - code_resource.end = virt_to_phys(_etext)-1;
18978 - data_resource.start = virt_to_phys(_etext);
18979 + code_resource.start = virt_to_phys(ktla_ktva(_text));
18980 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18981 + data_resource.start = virt_to_phys(_sdata);
18982 data_resource.end = virt_to_phys(_edata)-1;
18983 bss_resource.start = virt_to_phys(&__bss_start);
18984 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18985 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18986 index 5a98aa2..2f9288d 100644
18987 --- a/arch/x86/kernel/setup_percpu.c
18988 +++ b/arch/x86/kernel/setup_percpu.c
18989 @@ -21,19 +21,17 @@
18990 #include <asm/cpu.h>
18991 #include <asm/stackprotector.h>
18992
18993 -DEFINE_PER_CPU(int, cpu_number);
18994 +#ifdef CONFIG_SMP
18995 +DEFINE_PER_CPU(unsigned int, cpu_number);
18996 EXPORT_PER_CPU_SYMBOL(cpu_number);
18997 +#endif
18998
18999 -#ifdef CONFIG_X86_64
19000 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19001 -#else
19002 -#define BOOT_PERCPU_OFFSET 0
19003 -#endif
19004
19005 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19006 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19007
19008 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19009 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19010 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19011 };
19012 EXPORT_SYMBOL(__per_cpu_offset);
19013 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19014 {
19015 #ifdef CONFIG_X86_32
19016 struct desc_struct gdt;
19017 + unsigned long base = per_cpu_offset(cpu);
19018
19019 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19020 - 0x2 | DESCTYPE_S, 0x8);
19021 - gdt.s = 1;
19022 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19023 + 0x83 | DESCTYPE_S, 0xC);
19024 write_gdt_entry(get_cpu_gdt_table(cpu),
19025 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19026 #endif
19027 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19028 /* alrighty, percpu areas up and running */
19029 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19030 for_each_possible_cpu(cpu) {
19031 +#ifdef CONFIG_CC_STACKPROTECTOR
19032 +#ifdef CONFIG_X86_32
19033 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19034 +#endif
19035 +#endif
19036 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19037 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19038 per_cpu(cpu_number, cpu) = cpu;
19039 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19040 */
19041 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19042 #endif
19043 +#ifdef CONFIG_CC_STACKPROTECTOR
19044 +#ifdef CONFIG_X86_32
19045 + if (!cpu)
19046 + per_cpu(stack_canary.canary, cpu) = canary;
19047 +#endif
19048 +#endif
19049 /*
19050 * Up to this point, the boot CPU has been using .init.data
19051 * area. Reload any changed state for the boot CPU.
19052 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19053 index 115eac4..c0591d5 100644
19054 --- a/arch/x86/kernel/signal.c
19055 +++ b/arch/x86/kernel/signal.c
19056 @@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
19057 * Align the stack pointer according to the i386 ABI,
19058 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19059 */
19060 - sp = ((sp + 4) & -16ul) - 4;
19061 + sp = ((sp - 12) & -16ul) - 4;
19062 #else /* !CONFIG_X86_32 */
19063 sp = round_down(sp, 16) - 8;
19064 #endif
19065 @@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19066 * Return an always-bogus address instead so we will die with SIGSEGV.
19067 */
19068 if (onsigstack && !likely(on_sig_stack(sp)))
19069 - return (void __user *)-1L;
19070 + return (__force void __user *)-1L;
19071
19072 /* save i387 state */
19073 if (used_math() && save_i387_xstate(*fpstate) < 0)
19074 - return (void __user *)-1L;
19075 + return (__force void __user *)-1L;
19076
19077 return (void __user *)sp;
19078 }
19079 @@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19080 }
19081
19082 if (current->mm->context.vdso)
19083 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19084 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19085 else
19086 - restorer = &frame->retcode;
19087 + restorer = (void __user *)&frame->retcode;
19088 if (ka->sa.sa_flags & SA_RESTORER)
19089 restorer = ka->sa.sa_restorer;
19090
19091 @@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19092 * reasons and because gdb uses it as a signature to notice
19093 * signal handler stack frames.
19094 */
19095 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19096 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19097
19098 if (err)
19099 return -EFAULT;
19100 @@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19101 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19102
19103 /* Set up to return from userspace. */
19104 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19105 + if (current->mm->context.vdso)
19106 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19107 + else
19108 + restorer = (void __user *)&frame->retcode;
19109 if (ka->sa.sa_flags & SA_RESTORER)
19110 restorer = ka->sa.sa_restorer;
19111 put_user_ex(restorer, &frame->pretcode);
19112 @@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19113 * reasons and because gdb uses it as a signature to notice
19114 * signal handler stack frames.
19115 */
19116 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19117 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19118 } put_user_catch(err);
19119
19120 if (err)
19121 @@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
19122 * X86_32: vm86 regs switched out by assembly code before reaching
19123 * here, so testing against kernel CS suffices.
19124 */
19125 - if (!user_mode(regs))
19126 + if (!user_mode_novm(regs))
19127 return;
19128
19129 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19130 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19131 index 6e1e406..edfb7cb 100644
19132 --- a/arch/x86/kernel/smpboot.c
19133 +++ b/arch/x86/kernel/smpboot.c
19134 @@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19135 set_idle_for_cpu(cpu, c_idle.idle);
19136 do_rest:
19137 per_cpu(current_task, cpu) = c_idle.idle;
19138 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19139 #ifdef CONFIG_X86_32
19140 /* Stack for startup_32 can be just as for start_secondary onwards */
19141 irq_ctx_init(cpu);
19142 #else
19143 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19144 initial_gs = per_cpu_offset(cpu);
19145 - per_cpu(kernel_stack, cpu) =
19146 - (unsigned long)task_stack_page(c_idle.idle) -
19147 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19148 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19149 #endif
19150 +
19151 + pax_open_kernel();
19152 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19153 + pax_close_kernel();
19154 +
19155 initial_code = (unsigned long)start_secondary;
19156 stack_start = c_idle.idle->thread.sp;
19157
19158 @@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19159
19160 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19161
19162 +#ifdef CONFIG_PAX_PER_CPU_PGD
19163 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19164 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19165 + KERNEL_PGD_PTRS);
19166 +#endif
19167 +
19168 err = do_boot_cpu(apicid, cpu);
19169 if (err) {
19170 pr_debug("do_boot_cpu failed %d\n", err);
19171 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19172 index c346d11..d43b163 100644
19173 --- a/arch/x86/kernel/step.c
19174 +++ b/arch/x86/kernel/step.c
19175 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19176 struct desc_struct *desc;
19177 unsigned long base;
19178
19179 - seg &= ~7UL;
19180 + seg >>= 3;
19181
19182 mutex_lock(&child->mm->context.lock);
19183 - if (unlikely((seg >> 3) >= child->mm->context.size))
19184 + if (unlikely(seg >= child->mm->context.size))
19185 addr = -1L; /* bogus selector, access would fault */
19186 else {
19187 desc = child->mm->context.ldt + seg;
19188 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19189 addr += base;
19190 }
19191 mutex_unlock(&child->mm->context.lock);
19192 - }
19193 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19194 + addr = ktla_ktva(addr);
19195
19196 return addr;
19197 }
19198 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19199 unsigned char opcode[15];
19200 unsigned long addr = convert_ip_to_linear(child, regs);
19201
19202 + if (addr == -EINVAL)
19203 + return 0;
19204 +
19205 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19206 for (i = 0; i < copied; i++) {
19207 switch (opcode[i]) {
19208 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19209 index 0b0cb5f..db6b9ed 100644
19210 --- a/arch/x86/kernel/sys_i386_32.c
19211 +++ b/arch/x86/kernel/sys_i386_32.c
19212 @@ -24,17 +24,224 @@
19213
19214 #include <asm/syscalls.h>
19215
19216 -/*
19217 - * Do a system call from kernel instead of calling sys_execve so we
19218 - * end up with proper pt_regs.
19219 - */
19220 -int kernel_execve(const char *filename,
19221 - const char *const argv[],
19222 - const char *const envp[])
19223 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19224 {
19225 - long __res;
19226 - asm volatile ("int $0x80"
19227 - : "=a" (__res)
19228 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19229 - return __res;
19230 + unsigned long pax_task_size = TASK_SIZE;
19231 +
19232 +#ifdef CONFIG_PAX_SEGMEXEC
19233 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19234 + pax_task_size = SEGMEXEC_TASK_SIZE;
19235 +#endif
19236 +
19237 + if (len > pax_task_size || addr > pax_task_size - len)
19238 + return -EINVAL;
19239 +
19240 + return 0;
19241 +}
19242 +
19243 +unsigned long
19244 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19245 + unsigned long len, unsigned long pgoff, unsigned long flags)
19246 +{
19247 + struct mm_struct *mm = current->mm;
19248 + struct vm_area_struct *vma;
19249 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19250 +
19251 +#ifdef CONFIG_PAX_SEGMEXEC
19252 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19253 + pax_task_size = SEGMEXEC_TASK_SIZE;
19254 +#endif
19255 +
19256 + pax_task_size -= PAGE_SIZE;
19257 +
19258 + if (len > pax_task_size)
19259 + return -ENOMEM;
19260 +
19261 + if (flags & MAP_FIXED)
19262 + return addr;
19263 +
19264 +#ifdef CONFIG_PAX_RANDMMAP
19265 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19266 +#endif
19267 +
19268 + if (addr) {
19269 + addr = PAGE_ALIGN(addr);
19270 + if (pax_task_size - len >= addr) {
19271 + vma = find_vma(mm, addr);
19272 + if (check_heap_stack_gap(vma, addr, len))
19273 + return addr;
19274 + }
19275 + }
19276 + if (len > mm->cached_hole_size) {
19277 + start_addr = addr = mm->free_area_cache;
19278 + } else {
19279 + start_addr = addr = mm->mmap_base;
19280 + mm->cached_hole_size = 0;
19281 + }
19282 +
19283 +#ifdef CONFIG_PAX_PAGEEXEC
19284 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19285 + start_addr = 0x00110000UL;
19286 +
19287 +#ifdef CONFIG_PAX_RANDMMAP
19288 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19289 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19290 +#endif
19291 +
19292 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19293 + start_addr = addr = mm->mmap_base;
19294 + else
19295 + addr = start_addr;
19296 + }
19297 +#endif
19298 +
19299 +full_search:
19300 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19301 + /* At this point: (!vma || addr < vma->vm_end). */
19302 + if (pax_task_size - len < addr) {
19303 + /*
19304 + * Start a new search - just in case we missed
19305 + * some holes.
19306 + */
19307 + if (start_addr != mm->mmap_base) {
19308 + start_addr = addr = mm->mmap_base;
19309 + mm->cached_hole_size = 0;
19310 + goto full_search;
19311 + }
19312 + return -ENOMEM;
19313 + }
19314 + if (check_heap_stack_gap(vma, addr, len))
19315 + break;
19316 + if (addr + mm->cached_hole_size < vma->vm_start)
19317 + mm->cached_hole_size = vma->vm_start - addr;
19318 + addr = vma->vm_end;
19319 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19320 + start_addr = addr = mm->mmap_base;
19321 + mm->cached_hole_size = 0;
19322 + goto full_search;
19323 + }
19324 + }
19325 +
19326 + /*
19327 + * Remember the place where we stopped the search:
19328 + */
19329 + mm->free_area_cache = addr + len;
19330 + return addr;
19331 +}
19332 +
19333 +unsigned long
19334 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19335 + const unsigned long len, const unsigned long pgoff,
19336 + const unsigned long flags)
19337 +{
19338 + struct vm_area_struct *vma;
19339 + struct mm_struct *mm = current->mm;
19340 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19341 +
19342 +#ifdef CONFIG_PAX_SEGMEXEC
19343 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19344 + pax_task_size = SEGMEXEC_TASK_SIZE;
19345 +#endif
19346 +
19347 + pax_task_size -= PAGE_SIZE;
19348 +
19349 + /* requested length too big for entire address space */
19350 + if (len > pax_task_size)
19351 + return -ENOMEM;
19352 +
19353 + if (flags & MAP_FIXED)
19354 + return addr;
19355 +
19356 +#ifdef CONFIG_PAX_PAGEEXEC
19357 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19358 + goto bottomup;
19359 +#endif
19360 +
19361 +#ifdef CONFIG_PAX_RANDMMAP
19362 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19363 +#endif
19364 +
19365 + /* requesting a specific address */
19366 + if (addr) {
19367 + addr = PAGE_ALIGN(addr);
19368 + if (pax_task_size - len >= addr) {
19369 + vma = find_vma(mm, addr);
19370 + if (check_heap_stack_gap(vma, addr, len))
19371 + return addr;
19372 + }
19373 + }
19374 +
19375 + /* check if free_area_cache is useful for us */
19376 + if (len <= mm->cached_hole_size) {
19377 + mm->cached_hole_size = 0;
19378 + mm->free_area_cache = mm->mmap_base;
19379 + }
19380 +
19381 + /* either no address requested or can't fit in requested address hole */
19382 + addr = mm->free_area_cache;
19383 +
19384 + /* make sure it can fit in the remaining address space */
19385 + if (addr > len) {
19386 + vma = find_vma(mm, addr-len);
19387 + if (check_heap_stack_gap(vma, addr - len, len))
19388 + /* remember the address as a hint for next time */
19389 + return (mm->free_area_cache = addr-len);
19390 + }
19391 +
19392 + if (mm->mmap_base < len)
19393 + goto bottomup;
19394 +
19395 + addr = mm->mmap_base-len;
19396 +
19397 + do {
19398 + /*
19399 + * Lookup failure means no vma is above this address,
19400 + * else if new region fits below vma->vm_start,
19401 + * return with success:
19402 + */
19403 + vma = find_vma(mm, addr);
19404 + if (check_heap_stack_gap(vma, addr, len))
19405 + /* remember the address as a hint for next time */
19406 + return (mm->free_area_cache = addr);
19407 +
19408 + /* remember the largest hole we saw so far */
19409 + if (addr + mm->cached_hole_size < vma->vm_start)
19410 + mm->cached_hole_size = vma->vm_start - addr;
19411 +
19412 + /* try just below the current vma->vm_start */
19413 + addr = skip_heap_stack_gap(vma, len);
19414 + } while (!IS_ERR_VALUE(addr));
19415 +
19416 +bottomup:
19417 + /*
19418 + * A failed mmap() very likely causes application failure,
19419 + * so fall back to the bottom-up function here. This scenario
19420 + * can happen with large stack limits and large mmap()
19421 + * allocations.
19422 + */
19423 +
19424 +#ifdef CONFIG_PAX_SEGMEXEC
19425 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19426 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19427 + else
19428 +#endif
19429 +
19430 + mm->mmap_base = TASK_UNMAPPED_BASE;
19431 +
19432 +#ifdef CONFIG_PAX_RANDMMAP
19433 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19434 + mm->mmap_base += mm->delta_mmap;
19435 +#endif
19436 +
19437 + mm->free_area_cache = mm->mmap_base;
19438 + mm->cached_hole_size = ~0UL;
19439 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19440 + /*
19441 + * Restore the topdown base:
19442 + */
19443 + mm->mmap_base = base;
19444 + mm->free_area_cache = base;
19445 + mm->cached_hole_size = ~0UL;
19446 +
19447 + return addr;
19448 }
19449 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19450 index b4d3c39..82bb73b 100644
19451 --- a/arch/x86/kernel/sys_x86_64.c
19452 +++ b/arch/x86/kernel/sys_x86_64.c
19453 @@ -95,8 +95,8 @@ out:
19454 return error;
19455 }
19456
19457 -static void find_start_end(unsigned long flags, unsigned long *begin,
19458 - unsigned long *end)
19459 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19460 + unsigned long *begin, unsigned long *end)
19461 {
19462 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19463 unsigned long new_begin;
19464 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19465 *begin = new_begin;
19466 }
19467 } else {
19468 - *begin = TASK_UNMAPPED_BASE;
19469 + *begin = mm->mmap_base;
19470 *end = TASK_SIZE;
19471 }
19472 }
19473 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19474 if (flags & MAP_FIXED)
19475 return addr;
19476
19477 - find_start_end(flags, &begin, &end);
19478 + find_start_end(mm, flags, &begin, &end);
19479
19480 if (len > end)
19481 return -ENOMEM;
19482
19483 +#ifdef CONFIG_PAX_RANDMMAP
19484 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19485 +#endif
19486 +
19487 if (addr) {
19488 addr = PAGE_ALIGN(addr);
19489 vma = find_vma(mm, addr);
19490 - if (end - len >= addr &&
19491 - (!vma || addr + len <= vma->vm_start))
19492 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19493 return addr;
19494 }
19495 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19496 @@ -172,7 +175,7 @@ full_search:
19497 }
19498 return -ENOMEM;
19499 }
19500 - if (!vma || addr + len <= vma->vm_start) {
19501 + if (check_heap_stack_gap(vma, addr, len)) {
19502 /*
19503 * Remember the place where we stopped the search:
19504 */
19505 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19506 {
19507 struct vm_area_struct *vma;
19508 struct mm_struct *mm = current->mm;
19509 - unsigned long addr = addr0, start_addr;
19510 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19511
19512 /* requested length too big for entire address space */
19513 if (len > TASK_SIZE)
19514 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19515 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19516 goto bottomup;
19517
19518 +#ifdef CONFIG_PAX_RANDMMAP
19519 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19520 +#endif
19521 +
19522 /* requesting a specific address */
19523 if (addr) {
19524 addr = PAGE_ALIGN(addr);
19525 - vma = find_vma(mm, addr);
19526 - if (TASK_SIZE - len >= addr &&
19527 - (!vma || addr + len <= vma->vm_start))
19528 - return addr;
19529 + if (TASK_SIZE - len >= addr) {
19530 + vma = find_vma(mm, addr);
19531 + if (check_heap_stack_gap(vma, addr, len))
19532 + return addr;
19533 + }
19534 }
19535
19536 /* check if free_area_cache is useful for us */
19537 @@ -240,7 +248,7 @@ try_again:
19538 * return with success:
19539 */
19540 vma = find_vma(mm, addr);
19541 - if (!vma || addr+len <= vma->vm_start)
19542 + if (check_heap_stack_gap(vma, addr, len))
19543 /* remember the address as a hint for next time */
19544 return mm->free_area_cache = addr;
19545
19546 @@ -249,8 +257,8 @@ try_again:
19547 mm->cached_hole_size = vma->vm_start - addr;
19548
19549 /* try just below the current vma->vm_start */
19550 - addr = vma->vm_start-len;
19551 - } while (len < vma->vm_start);
19552 + addr = skip_heap_stack_gap(vma, len);
19553 + } while (!IS_ERR_VALUE(addr));
19554
19555 fail:
19556 /*
19557 @@ -270,13 +278,21 @@ bottomup:
19558 * can happen with large stack limits and large mmap()
19559 * allocations.
19560 */
19561 + mm->mmap_base = TASK_UNMAPPED_BASE;
19562 +
19563 +#ifdef CONFIG_PAX_RANDMMAP
19564 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19565 + mm->mmap_base += mm->delta_mmap;
19566 +#endif
19567 +
19568 + mm->free_area_cache = mm->mmap_base;
19569 mm->cached_hole_size = ~0UL;
19570 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19571 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19572 /*
19573 * Restore the topdown base:
19574 */
19575 - mm->free_area_cache = mm->mmap_base;
19576 + mm->mmap_base = base;
19577 + mm->free_area_cache = base;
19578 mm->cached_hole_size = ~0UL;
19579
19580 return addr;
19581 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19582 index 6410744..79758f0 100644
19583 --- a/arch/x86/kernel/tboot.c
19584 +++ b/arch/x86/kernel/tboot.c
19585 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19586
19587 void tboot_shutdown(u32 shutdown_type)
19588 {
19589 - void (*shutdown)(void);
19590 + void (* __noreturn shutdown)(void);
19591
19592 if (!tboot_enabled())
19593 return;
19594 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19595
19596 switch_to_tboot_pt();
19597
19598 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19599 + shutdown = (void *)tboot->shutdown_entry;
19600 shutdown();
19601
19602 /* should not reach here */
19603 @@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19604 return 0;
19605 }
19606
19607 -static atomic_t ap_wfs_count;
19608 +static atomic_unchecked_t ap_wfs_count;
19609
19610 static int tboot_wait_for_aps(int num_aps)
19611 {
19612 @@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19613 {
19614 switch (action) {
19615 case CPU_DYING:
19616 - atomic_inc(&ap_wfs_count);
19617 + atomic_inc_unchecked(&ap_wfs_count);
19618 if (num_online_cpus() == 1)
19619 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19620 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19621 return NOTIFY_BAD;
19622 break;
19623 }
19624 @@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
19625
19626 tboot_create_trampoline();
19627
19628 - atomic_set(&ap_wfs_count, 0);
19629 + atomic_set_unchecked(&ap_wfs_count, 0);
19630 register_hotcpu_notifier(&tboot_cpu_notifier);
19631
19632 acpi_os_set_prepare_sleep(&tboot_sleep);
19633 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19634 index c6eba2b..3303326 100644
19635 --- a/arch/x86/kernel/time.c
19636 +++ b/arch/x86/kernel/time.c
19637 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19638 {
19639 unsigned long pc = instruction_pointer(regs);
19640
19641 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19642 + if (!user_mode(regs) && in_lock_functions(pc)) {
19643 #ifdef CONFIG_FRAME_POINTER
19644 - return *(unsigned long *)(regs->bp + sizeof(long));
19645 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19646 #else
19647 unsigned long *sp =
19648 (unsigned long *)kernel_stack_pointer(regs);
19649 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19650 * or above a saved flags. Eflags has bits 22-31 zero,
19651 * kernel addresses don't.
19652 */
19653 +
19654 +#ifdef CONFIG_PAX_KERNEXEC
19655 + return ktla_ktva(sp[0]);
19656 +#else
19657 if (sp[0] >> 22)
19658 return sp[0];
19659 if (sp[1] >> 22)
19660 return sp[1];
19661 #endif
19662 +
19663 +#endif
19664 }
19665 return pc;
19666 }
19667 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19668 index 9d9d2f9..ed344e4 100644
19669 --- a/arch/x86/kernel/tls.c
19670 +++ b/arch/x86/kernel/tls.c
19671 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19672 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19673 return -EINVAL;
19674
19675 +#ifdef CONFIG_PAX_SEGMEXEC
19676 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19677 + return -EINVAL;
19678 +#endif
19679 +
19680 set_tls_desc(p, idx, &info, 1);
19681
19682 return 0;
19683 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19684 index 451c0a7..e57f551 100644
19685 --- a/arch/x86/kernel/trampoline_32.S
19686 +++ b/arch/x86/kernel/trampoline_32.S
19687 @@ -32,6 +32,12 @@
19688 #include <asm/segment.h>
19689 #include <asm/page_types.h>
19690
19691 +#ifdef CONFIG_PAX_KERNEXEC
19692 +#define ta(X) (X)
19693 +#else
19694 +#define ta(X) ((X) - __PAGE_OFFSET)
19695 +#endif
19696 +
19697 #ifdef CONFIG_SMP
19698
19699 .section ".x86_trampoline","a"
19700 @@ -62,7 +68,7 @@ r_base = .
19701 inc %ax # protected mode (PE) bit
19702 lmsw %ax # into protected mode
19703 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19704 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19705 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19706
19707 # These need to be in the same 64K segment as the above;
19708 # hence we don't use the boot_gdt_descr defined in head.S
19709 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19710 index 09ff517..df19fbff 100644
19711 --- a/arch/x86/kernel/trampoline_64.S
19712 +++ b/arch/x86/kernel/trampoline_64.S
19713 @@ -90,7 +90,7 @@ startup_32:
19714 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19715 movl %eax, %ds
19716
19717 - movl $X86_CR4_PAE, %eax
19718 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19719 movl %eax, %cr4 # Enable PAE mode
19720
19721 # Setup trampoline 4 level pagetables
19722 @@ -138,7 +138,7 @@ tidt:
19723 # so the kernel can live anywhere
19724 .balign 4
19725 tgdt:
19726 - .short tgdt_end - tgdt # gdt limit
19727 + .short tgdt_end - tgdt - 1 # gdt limit
19728 .long tgdt - r_base
19729 .short 0
19730 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19731 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19732 index ff9281f1..30cb4ac 100644
19733 --- a/arch/x86/kernel/traps.c
19734 +++ b/arch/x86/kernel/traps.c
19735 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19736
19737 /* Do we ignore FPU interrupts ? */
19738 char ignore_fpu_irq;
19739 -
19740 -/*
19741 - * The IDT has to be page-aligned to simplify the Pentium
19742 - * F0 0F bug workaround.
19743 - */
19744 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19745 #endif
19746
19747 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19748 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19749 }
19750
19751 static void __kprobes
19752 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19753 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19754 long error_code, siginfo_t *info)
19755 {
19756 struct task_struct *tsk = current;
19757
19758 #ifdef CONFIG_X86_32
19759 - if (regs->flags & X86_VM_MASK) {
19760 + if (v8086_mode(regs)) {
19761 /*
19762 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19763 * On nmi (interrupt 2), do_trap should not be called.
19764 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19765 }
19766 #endif
19767
19768 - if (!user_mode(regs))
19769 + if (!user_mode_novm(regs))
19770 goto kernel_trap;
19771
19772 #ifdef CONFIG_X86_32
19773 @@ -148,7 +142,7 @@ trap_signal:
19774 printk_ratelimit()) {
19775 printk(KERN_INFO
19776 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19777 - tsk->comm, tsk->pid, str,
19778 + tsk->comm, task_pid_nr(tsk), str,
19779 regs->ip, regs->sp, error_code);
19780 print_vma_addr(" in ", regs->ip);
19781 printk("\n");
19782 @@ -165,8 +159,20 @@ kernel_trap:
19783 if (!fixup_exception(regs)) {
19784 tsk->thread.error_code = error_code;
19785 tsk->thread.trap_nr = trapnr;
19786 +
19787 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19788 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19789 + str = "PAX: suspicious stack segment fault";
19790 +#endif
19791 +
19792 die(str, regs, error_code);
19793 }
19794 +
19795 +#ifdef CONFIG_PAX_REFCOUNT
19796 + if (trapnr == 4)
19797 + pax_report_refcount_overflow(regs);
19798 +#endif
19799 +
19800 return;
19801
19802 #ifdef CONFIG_X86_32
19803 @@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19804 conditional_sti(regs);
19805
19806 #ifdef CONFIG_X86_32
19807 - if (regs->flags & X86_VM_MASK)
19808 + if (v8086_mode(regs))
19809 goto gp_in_vm86;
19810 #endif
19811
19812 tsk = current;
19813 - if (!user_mode(regs))
19814 + if (!user_mode_novm(regs))
19815 goto gp_in_kernel;
19816
19817 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19818 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19819 + struct mm_struct *mm = tsk->mm;
19820 + unsigned long limit;
19821 +
19822 + down_write(&mm->mmap_sem);
19823 + limit = mm->context.user_cs_limit;
19824 + if (limit < TASK_SIZE) {
19825 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19826 + up_write(&mm->mmap_sem);
19827 + return;
19828 + }
19829 + up_write(&mm->mmap_sem);
19830 + }
19831 +#endif
19832 +
19833 tsk->thread.error_code = error_code;
19834 tsk->thread.trap_nr = X86_TRAP_GP;
19835
19836 @@ -299,6 +321,13 @@ gp_in_kernel:
19837 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19838 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19839 return;
19840 +
19841 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19842 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19843 + die("PAX: suspicious general protection fault", regs, error_code);
19844 + else
19845 +#endif
19846 +
19847 die("general protection fault", regs, error_code);
19848 }
19849
19850 @@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19851 /* It's safe to allow irq's after DR6 has been saved */
19852 preempt_conditional_sti(regs);
19853
19854 - if (regs->flags & X86_VM_MASK) {
19855 + if (v8086_mode(regs)) {
19856 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19857 X86_TRAP_DB);
19858 preempt_conditional_cli(regs);
19859 @@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19860 * We already checked v86 mode above, so we can check for kernel mode
19861 * by just checking the CPL of CS.
19862 */
19863 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19864 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19865 tsk->thread.debugreg6 &= ~DR_STEP;
19866 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19867 regs->flags &= ~X86_EFLAGS_TF;
19868 @@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19869 return;
19870 conditional_sti(regs);
19871
19872 - if (!user_mode_vm(regs))
19873 + if (!user_mode(regs))
19874 {
19875 if (!fixup_exception(regs)) {
19876 task->thread.error_code = error_code;
19877 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19878 index b9242ba..50c5edd 100644
19879 --- a/arch/x86/kernel/verify_cpu.S
19880 +++ b/arch/x86/kernel/verify_cpu.S
19881 @@ -20,6 +20,7 @@
19882 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19883 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19884 * arch/x86/kernel/head_32.S: processor startup
19885 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19886 *
19887 * verify_cpu, returns the status of longmode and SSE in register %eax.
19888 * 0: Success 1: Failure
19889 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19890 index 255f58a..5e91150 100644
19891 --- a/arch/x86/kernel/vm86_32.c
19892 +++ b/arch/x86/kernel/vm86_32.c
19893 @@ -41,6 +41,7 @@
19894 #include <linux/ptrace.h>
19895 #include <linux/audit.h>
19896 #include <linux/stddef.h>
19897 +#include <linux/grsecurity.h>
19898
19899 #include <asm/uaccess.h>
19900 #include <asm/io.h>
19901 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19902 do_exit(SIGSEGV);
19903 }
19904
19905 - tss = &per_cpu(init_tss, get_cpu());
19906 + tss = init_tss + get_cpu();
19907 current->thread.sp0 = current->thread.saved_sp0;
19908 current->thread.sysenter_cs = __KERNEL_CS;
19909 load_sp0(tss, &current->thread);
19910 @@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19911 struct task_struct *tsk;
19912 int tmp, ret = -EPERM;
19913
19914 +#ifdef CONFIG_GRKERNSEC_VM86
19915 + if (!capable(CAP_SYS_RAWIO)) {
19916 + gr_handle_vm86();
19917 + goto out;
19918 + }
19919 +#endif
19920 +
19921 tsk = current;
19922 if (tsk->thread.saved_sp0)
19923 goto out;
19924 @@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19925 int tmp, ret;
19926 struct vm86plus_struct __user *v86;
19927
19928 +#ifdef CONFIG_GRKERNSEC_VM86
19929 + if (!capable(CAP_SYS_RAWIO)) {
19930 + gr_handle_vm86();
19931 + ret = -EPERM;
19932 + goto out;
19933 + }
19934 +#endif
19935 +
19936 tsk = current;
19937 switch (cmd) {
19938 case VM86_REQUEST_IRQ:
19939 @@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19940 tsk->thread.saved_fs = info->regs32->fs;
19941 tsk->thread.saved_gs = get_user_gs(info->regs32);
19942
19943 - tss = &per_cpu(init_tss, get_cpu());
19944 + tss = init_tss + get_cpu();
19945 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19946 if (cpu_has_sep)
19947 tsk->thread.sysenter_cs = 0;
19948 @@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19949 goto cannot_handle;
19950 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19951 goto cannot_handle;
19952 - intr_ptr = (unsigned long __user *) (i << 2);
19953 + intr_ptr = (__force unsigned long __user *) (i << 2);
19954 if (get_user(segoffs, intr_ptr))
19955 goto cannot_handle;
19956 if ((segoffs >> 16) == BIOSSEG)
19957 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19958 index 0f703f1..9e15f64 100644
19959 --- a/arch/x86/kernel/vmlinux.lds.S
19960 +++ b/arch/x86/kernel/vmlinux.lds.S
19961 @@ -26,6 +26,13 @@
19962 #include <asm/page_types.h>
19963 #include <asm/cache.h>
19964 #include <asm/boot.h>
19965 +#include <asm/segment.h>
19966 +
19967 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19968 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19969 +#else
19970 +#define __KERNEL_TEXT_OFFSET 0
19971 +#endif
19972
19973 #undef i386 /* in case the preprocessor is a 32bit one */
19974
19975 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19976
19977 PHDRS {
19978 text PT_LOAD FLAGS(5); /* R_E */
19979 +#ifdef CONFIG_X86_32
19980 + module PT_LOAD FLAGS(5); /* R_E */
19981 +#endif
19982 +#ifdef CONFIG_XEN
19983 + rodata PT_LOAD FLAGS(5); /* R_E */
19984 +#else
19985 + rodata PT_LOAD FLAGS(4); /* R__ */
19986 +#endif
19987 data PT_LOAD FLAGS(6); /* RW_ */
19988 -#ifdef CONFIG_X86_64
19989 + init.begin PT_LOAD FLAGS(6); /* RW_ */
19990 #ifdef CONFIG_SMP
19991 percpu PT_LOAD FLAGS(6); /* RW_ */
19992 #endif
19993 + text.init PT_LOAD FLAGS(5); /* R_E */
19994 + text.exit PT_LOAD FLAGS(5); /* R_E */
19995 init PT_LOAD FLAGS(7); /* RWE */
19996 -#endif
19997 note PT_NOTE FLAGS(0); /* ___ */
19998 }
19999
20000 SECTIONS
20001 {
20002 #ifdef CONFIG_X86_32
20003 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20004 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20005 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20006 #else
20007 - . = __START_KERNEL;
20008 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20009 + . = __START_KERNEL;
20010 #endif
20011
20012 /* Text and read-only data */
20013 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20014 - _text = .;
20015 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20016 /* bootstrapping code */
20017 +#ifdef CONFIG_X86_32
20018 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20019 +#else
20020 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20021 +#endif
20022 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20023 + _text = .;
20024 HEAD_TEXT
20025 #ifdef CONFIG_X86_32
20026 . = ALIGN(PAGE_SIZE);
20027 @@ -108,13 +128,47 @@ SECTIONS
20028 IRQENTRY_TEXT
20029 *(.fixup)
20030 *(.gnu.warning)
20031 - /* End of text section */
20032 - _etext = .;
20033 } :text = 0x9090
20034
20035 - NOTES :text :note
20036 + . += __KERNEL_TEXT_OFFSET;
20037
20038 - EXCEPTION_TABLE(16) :text = 0x9090
20039 +#ifdef CONFIG_X86_32
20040 + . = ALIGN(PAGE_SIZE);
20041 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20042 +
20043 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20044 + MODULES_EXEC_VADDR = .;
20045 + BYTE(0)
20046 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20047 + . = ALIGN(HPAGE_SIZE);
20048 + MODULES_EXEC_END = . - 1;
20049 +#endif
20050 +
20051 + } :module
20052 +#endif
20053 +
20054 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20055 + /* End of text section */
20056 + _etext = . - __KERNEL_TEXT_OFFSET;
20057 + }
20058 +
20059 +#ifdef CONFIG_X86_32
20060 + . = ALIGN(PAGE_SIZE);
20061 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20062 + *(.idt)
20063 + . = ALIGN(PAGE_SIZE);
20064 + *(.empty_zero_page)
20065 + *(.initial_pg_fixmap)
20066 + *(.initial_pg_pmd)
20067 + *(.initial_page_table)
20068 + *(.swapper_pg_dir)
20069 + } :rodata
20070 +#endif
20071 +
20072 + . = ALIGN(PAGE_SIZE);
20073 + NOTES :rodata :note
20074 +
20075 + EXCEPTION_TABLE(16) :rodata
20076
20077 #if defined(CONFIG_DEBUG_RODATA)
20078 /* .text should occupy whole number of pages */
20079 @@ -126,16 +180,20 @@ SECTIONS
20080
20081 /* Data */
20082 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20083 +
20084 +#ifdef CONFIG_PAX_KERNEXEC
20085 + . = ALIGN(HPAGE_SIZE);
20086 +#else
20087 + . = ALIGN(PAGE_SIZE);
20088 +#endif
20089 +
20090 /* Start of data section */
20091 _sdata = .;
20092
20093 /* init_task */
20094 INIT_TASK_DATA(THREAD_SIZE)
20095
20096 -#ifdef CONFIG_X86_32
20097 - /* 32 bit has nosave before _edata */
20098 NOSAVE_DATA
20099 -#endif
20100
20101 PAGE_ALIGNED_DATA(PAGE_SIZE)
20102
20103 @@ -176,12 +234,19 @@ SECTIONS
20104 #endif /* CONFIG_X86_64 */
20105
20106 /* Init code and data - will be freed after init */
20107 - . = ALIGN(PAGE_SIZE);
20108 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20109 + BYTE(0)
20110 +
20111 +#ifdef CONFIG_PAX_KERNEXEC
20112 + . = ALIGN(HPAGE_SIZE);
20113 +#else
20114 + . = ALIGN(PAGE_SIZE);
20115 +#endif
20116 +
20117 __init_begin = .; /* paired with __init_end */
20118 - }
20119 + } :init.begin
20120
20121 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20122 +#ifdef CONFIG_SMP
20123 /*
20124 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20125 * output PHDR, so the next output section - .init.text - should
20126 @@ -190,12 +255,27 @@ SECTIONS
20127 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20128 #endif
20129
20130 - INIT_TEXT_SECTION(PAGE_SIZE)
20131 -#ifdef CONFIG_X86_64
20132 - :init
20133 -#endif
20134 + . = ALIGN(PAGE_SIZE);
20135 + init_begin = .;
20136 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20137 + VMLINUX_SYMBOL(_sinittext) = .;
20138 + INIT_TEXT
20139 + VMLINUX_SYMBOL(_einittext) = .;
20140 + . = ALIGN(PAGE_SIZE);
20141 + } :text.init
20142
20143 - INIT_DATA_SECTION(16)
20144 + /*
20145 + * .exit.text is discard at runtime, not link time, to deal with
20146 + * references from .altinstructions and .eh_frame
20147 + */
20148 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20149 + EXIT_TEXT
20150 + . = ALIGN(16);
20151 + } :text.exit
20152 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20153 +
20154 + . = ALIGN(PAGE_SIZE);
20155 + INIT_DATA_SECTION(16) :init
20156
20157 /*
20158 * Code and data for a variety of lowlevel trampolines, to be
20159 @@ -269,19 +349,12 @@ SECTIONS
20160 }
20161
20162 . = ALIGN(8);
20163 - /*
20164 - * .exit.text is discard at runtime, not link time, to deal with
20165 - * references from .altinstructions and .eh_frame
20166 - */
20167 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20168 - EXIT_TEXT
20169 - }
20170
20171 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20172 EXIT_DATA
20173 }
20174
20175 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20176 +#ifndef CONFIG_SMP
20177 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20178 #endif
20179
20180 @@ -300,16 +373,10 @@ SECTIONS
20181 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20182 __smp_locks = .;
20183 *(.smp_locks)
20184 - . = ALIGN(PAGE_SIZE);
20185 __smp_locks_end = .;
20186 + . = ALIGN(PAGE_SIZE);
20187 }
20188
20189 -#ifdef CONFIG_X86_64
20190 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20191 - NOSAVE_DATA
20192 - }
20193 -#endif
20194 -
20195 /* BSS */
20196 . = ALIGN(PAGE_SIZE);
20197 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20198 @@ -325,6 +392,7 @@ SECTIONS
20199 __brk_base = .;
20200 . += 64 * 1024; /* 64k alignment slop space */
20201 *(.brk_reservation) /* areas brk users have reserved */
20202 + . = ALIGN(HPAGE_SIZE);
20203 __brk_limit = .;
20204 }
20205
20206 @@ -351,13 +419,12 @@ SECTIONS
20207 * for the boot processor.
20208 */
20209 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20210 -INIT_PER_CPU(gdt_page);
20211 INIT_PER_CPU(irq_stack_union);
20212
20213 /*
20214 * Build-time check on the image size:
20215 */
20216 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20217 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20218 "kernel image bigger than KERNEL_IMAGE_SIZE");
20219
20220 #ifdef CONFIG_SMP
20221 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20222 index 7515cf0..331a1a0 100644
20223 --- a/arch/x86/kernel/vsyscall_64.c
20224 +++ b/arch/x86/kernel/vsyscall_64.c
20225 @@ -54,15 +54,13 @@
20226 DEFINE_VVAR(int, vgetcpu_mode);
20227 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20228
20229 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20230 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20231
20232 static int __init vsyscall_setup(char *str)
20233 {
20234 if (str) {
20235 if (!strcmp("emulate", str))
20236 vsyscall_mode = EMULATE;
20237 - else if (!strcmp("native", str))
20238 - vsyscall_mode = NATIVE;
20239 else if (!strcmp("none", str))
20240 vsyscall_mode = NONE;
20241 else
20242 @@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20243
20244 tsk = current;
20245 if (seccomp_mode(&tsk->seccomp))
20246 - do_exit(SIGKILL);
20247 + do_group_exit(SIGKILL);
20248
20249 /*
20250 * With a real vsyscall, page faults cause SIGSEGV. We want to
20251 @@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20252 return true;
20253
20254 sigsegv:
20255 - force_sig(SIGSEGV, current);
20256 - return true;
20257 + do_group_exit(SIGKILL);
20258 }
20259
20260 /*
20261 @@ -332,10 +329,7 @@ void __init map_vsyscall(void)
20262 extern char __vvar_page;
20263 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20264
20265 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20266 - vsyscall_mode == NATIVE
20267 - ? PAGE_KERNEL_VSYSCALL
20268 - : PAGE_KERNEL_VVAR);
20269 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20270 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20271 (unsigned long)VSYSCALL_START);
20272
20273 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20274 index 9796c2f..f686fbf 100644
20275 --- a/arch/x86/kernel/x8664_ksyms_64.c
20276 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20277 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20278 EXPORT_SYMBOL(copy_user_generic_string);
20279 EXPORT_SYMBOL(copy_user_generic_unrolled);
20280 EXPORT_SYMBOL(__copy_user_nocache);
20281 -EXPORT_SYMBOL(_copy_from_user);
20282 -EXPORT_SYMBOL(_copy_to_user);
20283
20284 EXPORT_SYMBOL(copy_page);
20285 EXPORT_SYMBOL(clear_page);
20286 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20287 index e62728e..5fc3a07 100644
20288 --- a/arch/x86/kernel/xsave.c
20289 +++ b/arch/x86/kernel/xsave.c
20290 @@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20291 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20292 return -EINVAL;
20293
20294 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20295 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20296 fx_sw_user->extended_size -
20297 FP_XSTATE_MAGIC2_SIZE));
20298 if (err)
20299 @@ -267,7 +267,7 @@ fx_only:
20300 * the other extended state.
20301 */
20302 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20303 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20304 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20305 }
20306
20307 /*
20308 @@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
20309 if (use_xsave())
20310 err = restore_user_xstate(buf);
20311 else
20312 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
20313 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20314 buf);
20315 if (unlikely(err)) {
20316 /*
20317 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20318 index 9fed5be..18fd595 100644
20319 --- a/arch/x86/kvm/cpuid.c
20320 +++ b/arch/x86/kvm/cpuid.c
20321 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20322 struct kvm_cpuid2 *cpuid,
20323 struct kvm_cpuid_entry2 __user *entries)
20324 {
20325 - int r;
20326 + int r, i;
20327
20328 r = -E2BIG;
20329 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20330 goto out;
20331 r = -EFAULT;
20332 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20333 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20334 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20335 goto out;
20336 + for (i = 0; i < cpuid->nent; ++i) {
20337 + struct kvm_cpuid_entry2 cpuid_entry;
20338 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20339 + goto out;
20340 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20341 + }
20342 vcpu->arch.cpuid_nent = cpuid->nent;
20343 kvm_apic_set_version(vcpu);
20344 kvm_x86_ops->cpuid_update(vcpu);
20345 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20346 struct kvm_cpuid2 *cpuid,
20347 struct kvm_cpuid_entry2 __user *entries)
20348 {
20349 - int r;
20350 + int r, i;
20351
20352 r = -E2BIG;
20353 if (cpuid->nent < vcpu->arch.cpuid_nent)
20354 goto out;
20355 r = -EFAULT;
20356 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20357 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20358 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20359 goto out;
20360 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20361 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20362 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20363 + goto out;
20364 + }
20365 return 0;
20366
20367 out:
20368 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20369 index 8375622..b7bca1a 100644
20370 --- a/arch/x86/kvm/emulate.c
20371 +++ b/arch/x86/kvm/emulate.c
20372 @@ -252,6 +252,7 @@ struct gprefix {
20373
20374 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20375 do { \
20376 + unsigned long _tmp; \
20377 __asm__ __volatile__ ( \
20378 _PRE_EFLAGS("0", "4", "2") \
20379 _op _suffix " %"_x"3,%1; " \
20380 @@ -266,8 +267,6 @@ struct gprefix {
20381 /* Raw emulation: instruction has two explicit operands. */
20382 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20383 do { \
20384 - unsigned long _tmp; \
20385 - \
20386 switch ((ctxt)->dst.bytes) { \
20387 case 2: \
20388 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20389 @@ -283,7 +282,6 @@ struct gprefix {
20390
20391 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20392 do { \
20393 - unsigned long _tmp; \
20394 switch ((ctxt)->dst.bytes) { \
20395 case 1: \
20396 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20397 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20398 index 8584322..17d5955 100644
20399 --- a/arch/x86/kvm/lapic.c
20400 +++ b/arch/x86/kvm/lapic.c
20401 @@ -54,7 +54,7 @@
20402 #define APIC_BUS_CYCLE_NS 1
20403
20404 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20405 -#define apic_debug(fmt, arg...)
20406 +#define apic_debug(fmt, arg...) do {} while (0)
20407
20408 #define APIC_LVT_NUM 6
20409 /* 14 is the version for Xeon and Pentium 8.4.8*/
20410 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20411 index df5a703..63748a7 100644
20412 --- a/arch/x86/kvm/paging_tmpl.h
20413 +++ b/arch/x86/kvm/paging_tmpl.h
20414 @@ -197,7 +197,7 @@ retry_walk:
20415 if (unlikely(kvm_is_error_hva(host_addr)))
20416 goto error;
20417
20418 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20419 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20420 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20421 goto error;
20422
20423 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20424 index e334389..6839087 100644
20425 --- a/arch/x86/kvm/svm.c
20426 +++ b/arch/x86/kvm/svm.c
20427 @@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20428 int cpu = raw_smp_processor_id();
20429
20430 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20431 +
20432 + pax_open_kernel();
20433 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20434 + pax_close_kernel();
20435 +
20436 load_TR_desc();
20437 }
20438
20439 @@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20440 #endif
20441 #endif
20442
20443 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20444 + __set_fs(current_thread_info()->addr_limit);
20445 +#endif
20446 +
20447 reload_tss(vcpu);
20448
20449 local_irq_disable();
20450 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20451 index 4ff0ab9..2ff68d3 100644
20452 --- a/arch/x86/kvm/vmx.c
20453 +++ b/arch/x86/kvm/vmx.c
20454 @@ -1303,7 +1303,11 @@ static void reload_tss(void)
20455 struct desc_struct *descs;
20456
20457 descs = (void *)gdt->address;
20458 +
20459 + pax_open_kernel();
20460 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20461 + pax_close_kernel();
20462 +
20463 load_TR_desc();
20464 }
20465
20466 @@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
20467 if (!cpu_has_vmx_flexpriority())
20468 flexpriority_enabled = 0;
20469
20470 - if (!cpu_has_vmx_tpr_shadow())
20471 - kvm_x86_ops->update_cr8_intercept = NULL;
20472 + if (!cpu_has_vmx_tpr_shadow()) {
20473 + pax_open_kernel();
20474 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20475 + pax_close_kernel();
20476 + }
20477
20478 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20479 kvm_disable_largepages();
20480 @@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(void)
20481 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20482
20483 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20484 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20485 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20486
20487 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20488 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20489 @@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20490 "jmp .Lkvm_vmx_return \n\t"
20491 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20492 ".Lkvm_vmx_return: "
20493 +
20494 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20495 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20496 + ".Lkvm_vmx_return2: "
20497 +#endif
20498 +
20499 /* Save guest registers, load host registers, keep flags */
20500 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20501 "pop %0 \n\t"
20502 @@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20503 #endif
20504 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20505 [wordsize]"i"(sizeof(ulong))
20506 +
20507 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20508 + ,[cs]"i"(__KERNEL_CS)
20509 +#endif
20510 +
20511 : "cc", "memory"
20512 , R"ax", R"bx", R"di", R"si"
20513 #ifdef CONFIG_X86_64
20514 @@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20515 }
20516 }
20517
20518 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20519 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20520 +
20521 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20522 + loadsegment(fs, __KERNEL_PERCPU);
20523 +#endif
20524 +
20525 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20526 + __set_fs(current_thread_info()->addr_limit);
20527 +#endif
20528 +
20529 vmx->loaded_vmcs->launched = 1;
20530
20531 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20532 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20533 index 185a2b8..866d2a6 100644
20534 --- a/arch/x86/kvm/x86.c
20535 +++ b/arch/x86/kvm/x86.c
20536 @@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20537 {
20538 struct kvm *kvm = vcpu->kvm;
20539 int lm = is_long_mode(vcpu);
20540 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20541 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20542 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20543 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20544 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20545 : kvm->arch.xen_hvm_config.blob_size_32;
20546 u32 page_num = data & ~PAGE_MASK;
20547 @@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20548 if (n < msr_list.nmsrs)
20549 goto out;
20550 r = -EFAULT;
20551 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20552 + goto out;
20553 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20554 num_msrs_to_save * sizeof(u32)))
20555 goto out;
20556 @@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20557 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20558 struct kvm_interrupt *irq)
20559 {
20560 - if (irq->irq < 0 || irq->irq >= 256)
20561 + if (irq->irq >= 256)
20562 return -EINVAL;
20563 if (irqchip_in_kernel(vcpu->kvm))
20564 return -ENXIO;
20565 @@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
20566 kvm_mmu_set_mmio_spte_mask(mask);
20567 }
20568
20569 -int kvm_arch_init(void *opaque)
20570 +int kvm_arch_init(const void *opaque)
20571 {
20572 int r;
20573 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20574 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20575 index 642d880..44e0f3f 100644
20576 --- a/arch/x86/lguest/boot.c
20577 +++ b/arch/x86/lguest/boot.c
20578 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20579 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20580 * Launcher to reboot us.
20581 */
20582 -static void lguest_restart(char *reason)
20583 +static __noreturn void lguest_restart(char *reason)
20584 {
20585 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20586 + BUG();
20587 }
20588
20589 /*G:050
20590 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20591 index 00933d5..3a64af9 100644
20592 --- a/arch/x86/lib/atomic64_386_32.S
20593 +++ b/arch/x86/lib/atomic64_386_32.S
20594 @@ -48,6 +48,10 @@ BEGIN(read)
20595 movl (v), %eax
20596 movl 4(v), %edx
20597 RET_ENDP
20598 +BEGIN(read_unchecked)
20599 + movl (v), %eax
20600 + movl 4(v), %edx
20601 +RET_ENDP
20602 #undef v
20603
20604 #define v %esi
20605 @@ -55,6 +59,10 @@ BEGIN(set)
20606 movl %ebx, (v)
20607 movl %ecx, 4(v)
20608 RET_ENDP
20609 +BEGIN(set_unchecked)
20610 + movl %ebx, (v)
20611 + movl %ecx, 4(v)
20612 +RET_ENDP
20613 #undef v
20614
20615 #define v %esi
20616 @@ -70,6 +78,20 @@ RET_ENDP
20617 BEGIN(add)
20618 addl %eax, (v)
20619 adcl %edx, 4(v)
20620 +
20621 +#ifdef CONFIG_PAX_REFCOUNT
20622 + jno 0f
20623 + subl %eax, (v)
20624 + sbbl %edx, 4(v)
20625 + int $4
20626 +0:
20627 + _ASM_EXTABLE(0b, 0b)
20628 +#endif
20629 +
20630 +RET_ENDP
20631 +BEGIN(add_unchecked)
20632 + addl %eax, (v)
20633 + adcl %edx, 4(v)
20634 RET_ENDP
20635 #undef v
20636
20637 @@ -77,6 +99,24 @@ RET_ENDP
20638 BEGIN(add_return)
20639 addl (v), %eax
20640 adcl 4(v), %edx
20641 +
20642 +#ifdef CONFIG_PAX_REFCOUNT
20643 + into
20644 +1234:
20645 + _ASM_EXTABLE(1234b, 2f)
20646 +#endif
20647 +
20648 + movl %eax, (v)
20649 + movl %edx, 4(v)
20650 +
20651 +#ifdef CONFIG_PAX_REFCOUNT
20652 +2:
20653 +#endif
20654 +
20655 +RET_ENDP
20656 +BEGIN(add_return_unchecked)
20657 + addl (v), %eax
20658 + adcl 4(v), %edx
20659 movl %eax, (v)
20660 movl %edx, 4(v)
20661 RET_ENDP
20662 @@ -86,6 +126,20 @@ RET_ENDP
20663 BEGIN(sub)
20664 subl %eax, (v)
20665 sbbl %edx, 4(v)
20666 +
20667 +#ifdef CONFIG_PAX_REFCOUNT
20668 + jno 0f
20669 + addl %eax, (v)
20670 + adcl %edx, 4(v)
20671 + int $4
20672 +0:
20673 + _ASM_EXTABLE(0b, 0b)
20674 +#endif
20675 +
20676 +RET_ENDP
20677 +BEGIN(sub_unchecked)
20678 + subl %eax, (v)
20679 + sbbl %edx, 4(v)
20680 RET_ENDP
20681 #undef v
20682
20683 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20684 sbbl $0, %edx
20685 addl (v), %eax
20686 adcl 4(v), %edx
20687 +
20688 +#ifdef CONFIG_PAX_REFCOUNT
20689 + into
20690 +1234:
20691 + _ASM_EXTABLE(1234b, 2f)
20692 +#endif
20693 +
20694 + movl %eax, (v)
20695 + movl %edx, 4(v)
20696 +
20697 +#ifdef CONFIG_PAX_REFCOUNT
20698 +2:
20699 +#endif
20700 +
20701 +RET_ENDP
20702 +BEGIN(sub_return_unchecked)
20703 + negl %edx
20704 + negl %eax
20705 + sbbl $0, %edx
20706 + addl (v), %eax
20707 + adcl 4(v), %edx
20708 movl %eax, (v)
20709 movl %edx, 4(v)
20710 RET_ENDP
20711 @@ -105,6 +180,20 @@ RET_ENDP
20712 BEGIN(inc)
20713 addl $1, (v)
20714 adcl $0, 4(v)
20715 +
20716 +#ifdef CONFIG_PAX_REFCOUNT
20717 + jno 0f
20718 + subl $1, (v)
20719 + sbbl $0, 4(v)
20720 + int $4
20721 +0:
20722 + _ASM_EXTABLE(0b, 0b)
20723 +#endif
20724 +
20725 +RET_ENDP
20726 +BEGIN(inc_unchecked)
20727 + addl $1, (v)
20728 + adcl $0, 4(v)
20729 RET_ENDP
20730 #undef v
20731
20732 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20733 movl 4(v), %edx
20734 addl $1, %eax
20735 adcl $0, %edx
20736 +
20737 +#ifdef CONFIG_PAX_REFCOUNT
20738 + into
20739 +1234:
20740 + _ASM_EXTABLE(1234b, 2f)
20741 +#endif
20742 +
20743 + movl %eax, (v)
20744 + movl %edx, 4(v)
20745 +
20746 +#ifdef CONFIG_PAX_REFCOUNT
20747 +2:
20748 +#endif
20749 +
20750 +RET_ENDP
20751 +BEGIN(inc_return_unchecked)
20752 + movl (v), %eax
20753 + movl 4(v), %edx
20754 + addl $1, %eax
20755 + adcl $0, %edx
20756 movl %eax, (v)
20757 movl %edx, 4(v)
20758 RET_ENDP
20759 @@ -123,6 +232,20 @@ RET_ENDP
20760 BEGIN(dec)
20761 subl $1, (v)
20762 sbbl $0, 4(v)
20763 +
20764 +#ifdef CONFIG_PAX_REFCOUNT
20765 + jno 0f
20766 + addl $1, (v)
20767 + adcl $0, 4(v)
20768 + int $4
20769 +0:
20770 + _ASM_EXTABLE(0b, 0b)
20771 +#endif
20772 +
20773 +RET_ENDP
20774 +BEGIN(dec_unchecked)
20775 + subl $1, (v)
20776 + sbbl $0, 4(v)
20777 RET_ENDP
20778 #undef v
20779
20780 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20781 movl 4(v), %edx
20782 subl $1, %eax
20783 sbbl $0, %edx
20784 +
20785 +#ifdef CONFIG_PAX_REFCOUNT
20786 + into
20787 +1234:
20788 + _ASM_EXTABLE(1234b, 2f)
20789 +#endif
20790 +
20791 + movl %eax, (v)
20792 + movl %edx, 4(v)
20793 +
20794 +#ifdef CONFIG_PAX_REFCOUNT
20795 +2:
20796 +#endif
20797 +
20798 +RET_ENDP
20799 +BEGIN(dec_return_unchecked)
20800 + movl (v), %eax
20801 + movl 4(v), %edx
20802 + subl $1, %eax
20803 + sbbl $0, %edx
20804 movl %eax, (v)
20805 movl %edx, 4(v)
20806 RET_ENDP
20807 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20808 adcl %edx, %edi
20809 addl (v), %eax
20810 adcl 4(v), %edx
20811 +
20812 +#ifdef CONFIG_PAX_REFCOUNT
20813 + into
20814 +1234:
20815 + _ASM_EXTABLE(1234b, 2f)
20816 +#endif
20817 +
20818 cmpl %eax, %ecx
20819 je 3f
20820 1:
20821 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20822 1:
20823 addl $1, %eax
20824 adcl $0, %edx
20825 +
20826 +#ifdef CONFIG_PAX_REFCOUNT
20827 + into
20828 +1234:
20829 + _ASM_EXTABLE(1234b, 2f)
20830 +#endif
20831 +
20832 movl %eax, (v)
20833 movl %edx, 4(v)
20834 movl $1, %eax
20835 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20836 movl 4(v), %edx
20837 subl $1, %eax
20838 sbbl $0, %edx
20839 +
20840 +#ifdef CONFIG_PAX_REFCOUNT
20841 + into
20842 +1234:
20843 + _ASM_EXTABLE(1234b, 1f)
20844 +#endif
20845 +
20846 js 1f
20847 movl %eax, (v)
20848 movl %edx, 4(v)
20849 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20850 index f5cc9eb..51fa319 100644
20851 --- a/arch/x86/lib/atomic64_cx8_32.S
20852 +++ b/arch/x86/lib/atomic64_cx8_32.S
20853 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20854 CFI_STARTPROC
20855
20856 read64 %ecx
20857 + pax_force_retaddr
20858 ret
20859 CFI_ENDPROC
20860 ENDPROC(atomic64_read_cx8)
20861
20862 +ENTRY(atomic64_read_unchecked_cx8)
20863 + CFI_STARTPROC
20864 +
20865 + read64 %ecx
20866 + pax_force_retaddr
20867 + ret
20868 + CFI_ENDPROC
20869 +ENDPROC(atomic64_read_unchecked_cx8)
20870 +
20871 ENTRY(atomic64_set_cx8)
20872 CFI_STARTPROC
20873
20874 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20875 cmpxchg8b (%esi)
20876 jne 1b
20877
20878 + pax_force_retaddr
20879 ret
20880 CFI_ENDPROC
20881 ENDPROC(atomic64_set_cx8)
20882
20883 +ENTRY(atomic64_set_unchecked_cx8)
20884 + CFI_STARTPROC
20885 +
20886 +1:
20887 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20888 + * are atomic on 586 and newer */
20889 + cmpxchg8b (%esi)
20890 + jne 1b
20891 +
20892 + pax_force_retaddr
20893 + ret
20894 + CFI_ENDPROC
20895 +ENDPROC(atomic64_set_unchecked_cx8)
20896 +
20897 ENTRY(atomic64_xchg_cx8)
20898 CFI_STARTPROC
20899
20900 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
20901 cmpxchg8b (%esi)
20902 jne 1b
20903
20904 + pax_force_retaddr
20905 ret
20906 CFI_ENDPROC
20907 ENDPROC(atomic64_xchg_cx8)
20908
20909 -.macro addsub_return func ins insc
20910 -ENTRY(atomic64_\func\()_return_cx8)
20911 +.macro addsub_return func ins insc unchecked=""
20912 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20913 CFI_STARTPROC
20914 SAVE ebp
20915 SAVE ebx
20916 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20917 movl %edx, %ecx
20918 \ins\()l %esi, %ebx
20919 \insc\()l %edi, %ecx
20920 +
20921 +.ifb \unchecked
20922 +#ifdef CONFIG_PAX_REFCOUNT
20923 + into
20924 +2:
20925 + _ASM_EXTABLE(2b, 3f)
20926 +#endif
20927 +.endif
20928 +
20929 LOCK_PREFIX
20930 cmpxchg8b (%ebp)
20931 jne 1b
20932 -
20933 -10:
20934 movl %ebx, %eax
20935 movl %ecx, %edx
20936 +
20937 +.ifb \unchecked
20938 +#ifdef CONFIG_PAX_REFCOUNT
20939 +3:
20940 +#endif
20941 +.endif
20942 +
20943 RESTORE edi
20944 RESTORE esi
20945 RESTORE ebx
20946 RESTORE ebp
20947 + pax_force_retaddr
20948 ret
20949 CFI_ENDPROC
20950 -ENDPROC(atomic64_\func\()_return_cx8)
20951 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20952 .endm
20953
20954 addsub_return add add adc
20955 addsub_return sub sub sbb
20956 +addsub_return add add adc _unchecked
20957 +addsub_return sub sub sbb _unchecked
20958
20959 -.macro incdec_return func ins insc
20960 -ENTRY(atomic64_\func\()_return_cx8)
20961 +.macro incdec_return func ins insc unchecked=""
20962 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20963 CFI_STARTPROC
20964 SAVE ebx
20965
20966 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
20967 movl %edx, %ecx
20968 \ins\()l $1, %ebx
20969 \insc\()l $0, %ecx
20970 +
20971 +.ifb \unchecked
20972 +#ifdef CONFIG_PAX_REFCOUNT
20973 + into
20974 +2:
20975 + _ASM_EXTABLE(2b, 3f)
20976 +#endif
20977 +.endif
20978 +
20979 LOCK_PREFIX
20980 cmpxchg8b (%esi)
20981 jne 1b
20982
20983 -10:
20984 movl %ebx, %eax
20985 movl %ecx, %edx
20986 +
20987 +.ifb \unchecked
20988 +#ifdef CONFIG_PAX_REFCOUNT
20989 +3:
20990 +#endif
20991 +.endif
20992 +
20993 RESTORE ebx
20994 + pax_force_retaddr
20995 ret
20996 CFI_ENDPROC
20997 -ENDPROC(atomic64_\func\()_return_cx8)
20998 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20999 .endm
21000
21001 incdec_return inc add adc
21002 incdec_return dec sub sbb
21003 +incdec_return inc add adc _unchecked
21004 +incdec_return dec sub sbb _unchecked
21005
21006 ENTRY(atomic64_dec_if_positive_cx8)
21007 CFI_STARTPROC
21008 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21009 movl %edx, %ecx
21010 subl $1, %ebx
21011 sbb $0, %ecx
21012 +
21013 +#ifdef CONFIG_PAX_REFCOUNT
21014 + into
21015 +1234:
21016 + _ASM_EXTABLE(1234b, 2f)
21017 +#endif
21018 +
21019 js 2f
21020 LOCK_PREFIX
21021 cmpxchg8b (%esi)
21022 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21023 movl %ebx, %eax
21024 movl %ecx, %edx
21025 RESTORE ebx
21026 + pax_force_retaddr
21027 ret
21028 CFI_ENDPROC
21029 ENDPROC(atomic64_dec_if_positive_cx8)
21030 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21031 movl %edx, %ecx
21032 addl %ebp, %ebx
21033 adcl %edi, %ecx
21034 +
21035 +#ifdef CONFIG_PAX_REFCOUNT
21036 + into
21037 +1234:
21038 + _ASM_EXTABLE(1234b, 3f)
21039 +#endif
21040 +
21041 LOCK_PREFIX
21042 cmpxchg8b (%esi)
21043 jne 1b
21044 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21045 CFI_ADJUST_CFA_OFFSET -8
21046 RESTORE ebx
21047 RESTORE ebp
21048 + pax_force_retaddr
21049 ret
21050 4:
21051 cmpl %edx, 4(%esp)
21052 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21053 xorl %ecx, %ecx
21054 addl $1, %ebx
21055 adcl %edx, %ecx
21056 +
21057 +#ifdef CONFIG_PAX_REFCOUNT
21058 + into
21059 +1234:
21060 + _ASM_EXTABLE(1234b, 3f)
21061 +#endif
21062 +
21063 LOCK_PREFIX
21064 cmpxchg8b (%esi)
21065 jne 1b
21066 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21067 movl $1, %eax
21068 3:
21069 RESTORE ebx
21070 + pax_force_retaddr
21071 ret
21072 CFI_ENDPROC
21073 ENDPROC(atomic64_inc_not_zero_cx8)
21074 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21075 index 78d16a5..fbcf666 100644
21076 --- a/arch/x86/lib/checksum_32.S
21077 +++ b/arch/x86/lib/checksum_32.S
21078 @@ -28,7 +28,8 @@
21079 #include <linux/linkage.h>
21080 #include <asm/dwarf2.h>
21081 #include <asm/errno.h>
21082 -
21083 +#include <asm/segment.h>
21084 +
21085 /*
21086 * computes a partial checksum, e.g. for TCP/UDP fragments
21087 */
21088 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21089
21090 #define ARGBASE 16
21091 #define FP 12
21092 -
21093 -ENTRY(csum_partial_copy_generic)
21094 +
21095 +ENTRY(csum_partial_copy_generic_to_user)
21096 CFI_STARTPROC
21097 +
21098 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21099 + pushl_cfi %gs
21100 + popl_cfi %es
21101 + jmp csum_partial_copy_generic
21102 +#endif
21103 +
21104 +ENTRY(csum_partial_copy_generic_from_user)
21105 +
21106 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21107 + pushl_cfi %gs
21108 + popl_cfi %ds
21109 +#endif
21110 +
21111 +ENTRY(csum_partial_copy_generic)
21112 subl $4,%esp
21113 CFI_ADJUST_CFA_OFFSET 4
21114 pushl_cfi %edi
21115 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21116 jmp 4f
21117 SRC(1: movw (%esi), %bx )
21118 addl $2, %esi
21119 -DST( movw %bx, (%edi) )
21120 +DST( movw %bx, %es:(%edi) )
21121 addl $2, %edi
21122 addw %bx, %ax
21123 adcl $0, %eax
21124 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21125 SRC(1: movl (%esi), %ebx )
21126 SRC( movl 4(%esi), %edx )
21127 adcl %ebx, %eax
21128 -DST( movl %ebx, (%edi) )
21129 +DST( movl %ebx, %es:(%edi) )
21130 adcl %edx, %eax
21131 -DST( movl %edx, 4(%edi) )
21132 +DST( movl %edx, %es:4(%edi) )
21133
21134 SRC( movl 8(%esi), %ebx )
21135 SRC( movl 12(%esi), %edx )
21136 adcl %ebx, %eax
21137 -DST( movl %ebx, 8(%edi) )
21138 +DST( movl %ebx, %es:8(%edi) )
21139 adcl %edx, %eax
21140 -DST( movl %edx, 12(%edi) )
21141 +DST( movl %edx, %es:12(%edi) )
21142
21143 SRC( movl 16(%esi), %ebx )
21144 SRC( movl 20(%esi), %edx )
21145 adcl %ebx, %eax
21146 -DST( movl %ebx, 16(%edi) )
21147 +DST( movl %ebx, %es:16(%edi) )
21148 adcl %edx, %eax
21149 -DST( movl %edx, 20(%edi) )
21150 +DST( movl %edx, %es:20(%edi) )
21151
21152 SRC( movl 24(%esi), %ebx )
21153 SRC( movl 28(%esi), %edx )
21154 adcl %ebx, %eax
21155 -DST( movl %ebx, 24(%edi) )
21156 +DST( movl %ebx, %es:24(%edi) )
21157 adcl %edx, %eax
21158 -DST( movl %edx, 28(%edi) )
21159 +DST( movl %edx, %es:28(%edi) )
21160
21161 lea 32(%esi), %esi
21162 lea 32(%edi), %edi
21163 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21164 shrl $2, %edx # This clears CF
21165 SRC(3: movl (%esi), %ebx )
21166 adcl %ebx, %eax
21167 -DST( movl %ebx, (%edi) )
21168 +DST( movl %ebx, %es:(%edi) )
21169 lea 4(%esi), %esi
21170 lea 4(%edi), %edi
21171 dec %edx
21172 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21173 jb 5f
21174 SRC( movw (%esi), %cx )
21175 leal 2(%esi), %esi
21176 -DST( movw %cx, (%edi) )
21177 +DST( movw %cx, %es:(%edi) )
21178 leal 2(%edi), %edi
21179 je 6f
21180 shll $16,%ecx
21181 SRC(5: movb (%esi), %cl )
21182 -DST( movb %cl, (%edi) )
21183 +DST( movb %cl, %es:(%edi) )
21184 6: addl %ecx, %eax
21185 adcl $0, %eax
21186 7:
21187 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21188
21189 6001:
21190 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21191 - movl $-EFAULT, (%ebx)
21192 + movl $-EFAULT, %ss:(%ebx)
21193
21194 # zero the complete destination - computing the rest
21195 # is too much work
21196 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21197
21198 6002:
21199 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21200 - movl $-EFAULT,(%ebx)
21201 + movl $-EFAULT,%ss:(%ebx)
21202 jmp 5000b
21203
21204 .previous
21205
21206 + pushl_cfi %ss
21207 + popl_cfi %ds
21208 + pushl_cfi %ss
21209 + popl_cfi %es
21210 popl_cfi %ebx
21211 CFI_RESTORE ebx
21212 popl_cfi %esi
21213 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21214 popl_cfi %ecx # equivalent to addl $4,%esp
21215 ret
21216 CFI_ENDPROC
21217 -ENDPROC(csum_partial_copy_generic)
21218 +ENDPROC(csum_partial_copy_generic_to_user)
21219
21220 #else
21221
21222 /* Version for PentiumII/PPro */
21223
21224 #define ROUND1(x) \
21225 + nop; nop; nop; \
21226 SRC(movl x(%esi), %ebx ) ; \
21227 addl %ebx, %eax ; \
21228 - DST(movl %ebx, x(%edi) ) ;
21229 + DST(movl %ebx, %es:x(%edi)) ;
21230
21231 #define ROUND(x) \
21232 + nop; nop; nop; \
21233 SRC(movl x(%esi), %ebx ) ; \
21234 adcl %ebx, %eax ; \
21235 - DST(movl %ebx, x(%edi) ) ;
21236 + DST(movl %ebx, %es:x(%edi)) ;
21237
21238 #define ARGBASE 12
21239 -
21240 -ENTRY(csum_partial_copy_generic)
21241 +
21242 +ENTRY(csum_partial_copy_generic_to_user)
21243 CFI_STARTPROC
21244 +
21245 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21246 + pushl_cfi %gs
21247 + popl_cfi %es
21248 + jmp csum_partial_copy_generic
21249 +#endif
21250 +
21251 +ENTRY(csum_partial_copy_generic_from_user)
21252 +
21253 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21254 + pushl_cfi %gs
21255 + popl_cfi %ds
21256 +#endif
21257 +
21258 +ENTRY(csum_partial_copy_generic)
21259 pushl_cfi %ebx
21260 CFI_REL_OFFSET ebx, 0
21261 pushl_cfi %edi
21262 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21263 subl %ebx, %edi
21264 lea -1(%esi),%edx
21265 andl $-32,%edx
21266 - lea 3f(%ebx,%ebx), %ebx
21267 + lea 3f(%ebx,%ebx,2), %ebx
21268 testl %esi, %esi
21269 jmp *%ebx
21270 1: addl $64,%esi
21271 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21272 jb 5f
21273 SRC( movw (%esi), %dx )
21274 leal 2(%esi), %esi
21275 -DST( movw %dx, (%edi) )
21276 +DST( movw %dx, %es:(%edi) )
21277 leal 2(%edi), %edi
21278 je 6f
21279 shll $16,%edx
21280 5:
21281 SRC( movb (%esi), %dl )
21282 -DST( movb %dl, (%edi) )
21283 +DST( movb %dl, %es:(%edi) )
21284 6: addl %edx, %eax
21285 adcl $0, %eax
21286 7:
21287 .section .fixup, "ax"
21288 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21289 - movl $-EFAULT, (%ebx)
21290 + movl $-EFAULT, %ss:(%ebx)
21291 # zero the complete destination (computing the rest is too much work)
21292 movl ARGBASE+8(%esp),%edi # dst
21293 movl ARGBASE+12(%esp),%ecx # len
21294 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21295 rep; stosb
21296 jmp 7b
21297 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21298 - movl $-EFAULT, (%ebx)
21299 + movl $-EFAULT, %ss:(%ebx)
21300 jmp 7b
21301 .previous
21302
21303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21304 + pushl_cfi %ss
21305 + popl_cfi %ds
21306 + pushl_cfi %ss
21307 + popl_cfi %es
21308 +#endif
21309 +
21310 popl_cfi %esi
21311 CFI_RESTORE esi
21312 popl_cfi %edi
21313 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21314 CFI_RESTORE ebx
21315 ret
21316 CFI_ENDPROC
21317 -ENDPROC(csum_partial_copy_generic)
21318 +ENDPROC(csum_partial_copy_generic_to_user)
21319
21320 #undef ROUND
21321 #undef ROUND1
21322 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21323 index f2145cf..cea889d 100644
21324 --- a/arch/x86/lib/clear_page_64.S
21325 +++ b/arch/x86/lib/clear_page_64.S
21326 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21327 movl $4096/8,%ecx
21328 xorl %eax,%eax
21329 rep stosq
21330 + pax_force_retaddr
21331 ret
21332 CFI_ENDPROC
21333 ENDPROC(clear_page_c)
21334 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21335 movl $4096,%ecx
21336 xorl %eax,%eax
21337 rep stosb
21338 + pax_force_retaddr
21339 ret
21340 CFI_ENDPROC
21341 ENDPROC(clear_page_c_e)
21342 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21343 leaq 64(%rdi),%rdi
21344 jnz .Lloop
21345 nop
21346 + pax_force_retaddr
21347 ret
21348 CFI_ENDPROC
21349 .Lclear_page_end:
21350 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21351
21352 #include <asm/cpufeature.h>
21353
21354 - .section .altinstr_replacement,"ax"
21355 + .section .altinstr_replacement,"a"
21356 1: .byte 0xeb /* jmp <disp8> */
21357 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21358 2: .byte 0xeb /* jmp <disp8> */
21359 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21360 index 1e572c5..2a162cd 100644
21361 --- a/arch/x86/lib/cmpxchg16b_emu.S
21362 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21363 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21364
21365 popf
21366 mov $1, %al
21367 + pax_force_retaddr
21368 ret
21369
21370 not_same:
21371 popf
21372 xor %al,%al
21373 + pax_force_retaddr
21374 ret
21375
21376 CFI_ENDPROC
21377 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21378 index 6b34d04..dccb07f 100644
21379 --- a/arch/x86/lib/copy_page_64.S
21380 +++ b/arch/x86/lib/copy_page_64.S
21381 @@ -9,6 +9,7 @@ copy_page_c:
21382 CFI_STARTPROC
21383 movl $4096/8,%ecx
21384 rep movsq
21385 + pax_force_retaddr
21386 ret
21387 CFI_ENDPROC
21388 ENDPROC(copy_page_c)
21389 @@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21390
21391 ENTRY(copy_page)
21392 CFI_STARTPROC
21393 - subq $2*8,%rsp
21394 - CFI_ADJUST_CFA_OFFSET 2*8
21395 + subq $3*8,%rsp
21396 + CFI_ADJUST_CFA_OFFSET 3*8
21397 movq %rbx,(%rsp)
21398 CFI_REL_OFFSET rbx, 0
21399 movq %r12,1*8(%rsp)
21400 CFI_REL_OFFSET r12, 1*8
21401 + movq %r13,2*8(%rsp)
21402 + CFI_REL_OFFSET r13, 2*8
21403
21404 movl $(4096/64)-5,%ecx
21405 .p2align 4
21406 @@ -37,7 +40,7 @@ ENTRY(copy_page)
21407 movq 16 (%rsi), %rdx
21408 movq 24 (%rsi), %r8
21409 movq 32 (%rsi), %r9
21410 - movq 40 (%rsi), %r10
21411 + movq 40 (%rsi), %r13
21412 movq 48 (%rsi), %r11
21413 movq 56 (%rsi), %r12
21414
21415 @@ -48,7 +51,7 @@ ENTRY(copy_page)
21416 movq %rdx, 16 (%rdi)
21417 movq %r8, 24 (%rdi)
21418 movq %r9, 32 (%rdi)
21419 - movq %r10, 40 (%rdi)
21420 + movq %r13, 40 (%rdi)
21421 movq %r11, 48 (%rdi)
21422 movq %r12, 56 (%rdi)
21423
21424 @@ -67,7 +70,7 @@ ENTRY(copy_page)
21425 movq 16 (%rsi), %rdx
21426 movq 24 (%rsi), %r8
21427 movq 32 (%rsi), %r9
21428 - movq 40 (%rsi), %r10
21429 + movq 40 (%rsi), %r13
21430 movq 48 (%rsi), %r11
21431 movq 56 (%rsi), %r12
21432
21433 @@ -76,7 +79,7 @@ ENTRY(copy_page)
21434 movq %rdx, 16 (%rdi)
21435 movq %r8, 24 (%rdi)
21436 movq %r9, 32 (%rdi)
21437 - movq %r10, 40 (%rdi)
21438 + movq %r13, 40 (%rdi)
21439 movq %r11, 48 (%rdi)
21440 movq %r12, 56 (%rdi)
21441
21442 @@ -89,8 +92,11 @@ ENTRY(copy_page)
21443 CFI_RESTORE rbx
21444 movq 1*8(%rsp),%r12
21445 CFI_RESTORE r12
21446 - addq $2*8,%rsp
21447 - CFI_ADJUST_CFA_OFFSET -2*8
21448 + movq 2*8(%rsp),%r13
21449 + CFI_RESTORE r13
21450 + addq $3*8,%rsp
21451 + CFI_ADJUST_CFA_OFFSET -3*8
21452 + pax_force_retaddr
21453 ret
21454 .Lcopy_page_end:
21455 CFI_ENDPROC
21456 @@ -101,7 +107,7 @@ ENDPROC(copy_page)
21457
21458 #include <asm/cpufeature.h>
21459
21460 - .section .altinstr_replacement,"ax"
21461 + .section .altinstr_replacement,"a"
21462 1: .byte 0xeb /* jmp <disp8> */
21463 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21464 2:
21465 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21466 index 0248402..821c786 100644
21467 --- a/arch/x86/lib/copy_user_64.S
21468 +++ b/arch/x86/lib/copy_user_64.S
21469 @@ -16,6 +16,7 @@
21470 #include <asm/thread_info.h>
21471 #include <asm/cpufeature.h>
21472 #include <asm/alternative-asm.h>
21473 +#include <asm/pgtable.h>
21474
21475 /*
21476 * By placing feature2 after feature1 in altinstructions section, we logically
21477 @@ -29,7 +30,7 @@
21478 .byte 0xe9 /* 32bit jump */
21479 .long \orig-1f /* by default jump to orig */
21480 1:
21481 - .section .altinstr_replacement,"ax"
21482 + .section .altinstr_replacement,"a"
21483 2: .byte 0xe9 /* near jump with 32bit immediate */
21484 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21485 3: .byte 0xe9 /* near jump with 32bit immediate */
21486 @@ -71,47 +72,20 @@
21487 #endif
21488 .endm
21489
21490 -/* Standard copy_to_user with segment limit checking */
21491 -ENTRY(_copy_to_user)
21492 - CFI_STARTPROC
21493 - GET_THREAD_INFO(%rax)
21494 - movq %rdi,%rcx
21495 - addq %rdx,%rcx
21496 - jc bad_to_user
21497 - cmpq TI_addr_limit(%rax),%rcx
21498 - ja bad_to_user
21499 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21500 - copy_user_generic_unrolled,copy_user_generic_string, \
21501 - copy_user_enhanced_fast_string
21502 - CFI_ENDPROC
21503 -ENDPROC(_copy_to_user)
21504 -
21505 -/* Standard copy_from_user with segment limit checking */
21506 -ENTRY(_copy_from_user)
21507 - CFI_STARTPROC
21508 - GET_THREAD_INFO(%rax)
21509 - movq %rsi,%rcx
21510 - addq %rdx,%rcx
21511 - jc bad_from_user
21512 - cmpq TI_addr_limit(%rax),%rcx
21513 - ja bad_from_user
21514 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21515 - copy_user_generic_unrolled,copy_user_generic_string, \
21516 - copy_user_enhanced_fast_string
21517 - CFI_ENDPROC
21518 -ENDPROC(_copy_from_user)
21519 -
21520 .section .fixup,"ax"
21521 /* must zero dest */
21522 ENTRY(bad_from_user)
21523 bad_from_user:
21524 CFI_STARTPROC
21525 + testl %edx,%edx
21526 + js bad_to_user
21527 movl %edx,%ecx
21528 xorl %eax,%eax
21529 rep
21530 stosb
21531 bad_to_user:
21532 movl %edx,%eax
21533 + pax_force_retaddr
21534 ret
21535 CFI_ENDPROC
21536 ENDPROC(bad_from_user)
21537 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21538 jz 17f
21539 1: movq (%rsi),%r8
21540 2: movq 1*8(%rsi),%r9
21541 -3: movq 2*8(%rsi),%r10
21542 +3: movq 2*8(%rsi),%rax
21543 4: movq 3*8(%rsi),%r11
21544 5: movq %r8,(%rdi)
21545 6: movq %r9,1*8(%rdi)
21546 -7: movq %r10,2*8(%rdi)
21547 +7: movq %rax,2*8(%rdi)
21548 8: movq %r11,3*8(%rdi)
21549 9: movq 4*8(%rsi),%r8
21550 10: movq 5*8(%rsi),%r9
21551 -11: movq 6*8(%rsi),%r10
21552 +11: movq 6*8(%rsi),%rax
21553 12: movq 7*8(%rsi),%r11
21554 13: movq %r8,4*8(%rdi)
21555 14: movq %r9,5*8(%rdi)
21556 -15: movq %r10,6*8(%rdi)
21557 +15: movq %rax,6*8(%rdi)
21558 16: movq %r11,7*8(%rdi)
21559 leaq 64(%rsi),%rsi
21560 leaq 64(%rdi),%rdi
21561 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21562 decl %ecx
21563 jnz 21b
21564 23: xor %eax,%eax
21565 + pax_force_retaddr
21566 ret
21567
21568 .section .fixup,"ax"
21569 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21570 3: rep
21571 movsb
21572 4: xorl %eax,%eax
21573 + pax_force_retaddr
21574 ret
21575
21576 .section .fixup,"ax"
21577 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21578 1: rep
21579 movsb
21580 2: xorl %eax,%eax
21581 + pax_force_retaddr
21582 ret
21583
21584 .section .fixup,"ax"
21585 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21586 index cb0c112..e3a6895 100644
21587 --- a/arch/x86/lib/copy_user_nocache_64.S
21588 +++ b/arch/x86/lib/copy_user_nocache_64.S
21589 @@ -8,12 +8,14 @@
21590
21591 #include <linux/linkage.h>
21592 #include <asm/dwarf2.h>
21593 +#include <asm/alternative-asm.h>
21594
21595 #define FIX_ALIGNMENT 1
21596
21597 #include <asm/current.h>
21598 #include <asm/asm-offsets.h>
21599 #include <asm/thread_info.h>
21600 +#include <asm/pgtable.h>
21601
21602 .macro ALIGN_DESTINATION
21603 #ifdef FIX_ALIGNMENT
21604 @@ -50,6 +52,15 @@
21605 */
21606 ENTRY(__copy_user_nocache)
21607 CFI_STARTPROC
21608 +
21609 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21610 + mov $PAX_USER_SHADOW_BASE,%rcx
21611 + cmp %rcx,%rsi
21612 + jae 1f
21613 + add %rcx,%rsi
21614 +1:
21615 +#endif
21616 +
21617 cmpl $8,%edx
21618 jb 20f /* less then 8 bytes, go to byte copy loop */
21619 ALIGN_DESTINATION
21620 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21621 jz 17f
21622 1: movq (%rsi),%r8
21623 2: movq 1*8(%rsi),%r9
21624 -3: movq 2*8(%rsi),%r10
21625 +3: movq 2*8(%rsi),%rax
21626 4: movq 3*8(%rsi),%r11
21627 5: movnti %r8,(%rdi)
21628 6: movnti %r9,1*8(%rdi)
21629 -7: movnti %r10,2*8(%rdi)
21630 +7: movnti %rax,2*8(%rdi)
21631 8: movnti %r11,3*8(%rdi)
21632 9: movq 4*8(%rsi),%r8
21633 10: movq 5*8(%rsi),%r9
21634 -11: movq 6*8(%rsi),%r10
21635 +11: movq 6*8(%rsi),%rax
21636 12: movq 7*8(%rsi),%r11
21637 13: movnti %r8,4*8(%rdi)
21638 14: movnti %r9,5*8(%rdi)
21639 -15: movnti %r10,6*8(%rdi)
21640 +15: movnti %rax,6*8(%rdi)
21641 16: movnti %r11,7*8(%rdi)
21642 leaq 64(%rsi),%rsi
21643 leaq 64(%rdi),%rdi
21644 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21645 jnz 21b
21646 23: xorl %eax,%eax
21647 sfence
21648 + pax_force_retaddr
21649 ret
21650
21651 .section .fixup,"ax"
21652 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21653 index fb903b7..c92b7f7 100644
21654 --- a/arch/x86/lib/csum-copy_64.S
21655 +++ b/arch/x86/lib/csum-copy_64.S
21656 @@ -8,6 +8,7 @@
21657 #include <linux/linkage.h>
21658 #include <asm/dwarf2.h>
21659 #include <asm/errno.h>
21660 +#include <asm/alternative-asm.h>
21661
21662 /*
21663 * Checksum copy with exception handling.
21664 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21665 CFI_RESTORE rbp
21666 addq $7*8, %rsp
21667 CFI_ADJUST_CFA_OFFSET -7*8
21668 + pax_force_retaddr 0, 1
21669 ret
21670 CFI_RESTORE_STATE
21671
21672 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21673 index 459b58a..9570bc7 100644
21674 --- a/arch/x86/lib/csum-wrappers_64.c
21675 +++ b/arch/x86/lib/csum-wrappers_64.c
21676 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21677 len -= 2;
21678 }
21679 }
21680 - isum = csum_partial_copy_generic((__force const void *)src,
21681 +
21682 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21683 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21684 + src += PAX_USER_SHADOW_BASE;
21685 +#endif
21686 +
21687 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21688 dst, len, isum, errp, NULL);
21689 if (unlikely(*errp))
21690 goto out_err;
21691 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21692 }
21693
21694 *errp = 0;
21695 - return csum_partial_copy_generic(src, (void __force *)dst,
21696 +
21697 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21698 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21699 + dst += PAX_USER_SHADOW_BASE;
21700 +#endif
21701 +
21702 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21703 len, isum, NULL, errp);
21704 }
21705 EXPORT_SYMBOL(csum_partial_copy_to_user);
21706 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21707 index 51f1504..ddac4c1 100644
21708 --- a/arch/x86/lib/getuser.S
21709 +++ b/arch/x86/lib/getuser.S
21710 @@ -33,15 +33,38 @@
21711 #include <asm/asm-offsets.h>
21712 #include <asm/thread_info.h>
21713 #include <asm/asm.h>
21714 +#include <asm/segment.h>
21715 +#include <asm/pgtable.h>
21716 +#include <asm/alternative-asm.h>
21717 +
21718 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21719 +#define __copyuser_seg gs;
21720 +#else
21721 +#define __copyuser_seg
21722 +#endif
21723
21724 .text
21725 ENTRY(__get_user_1)
21726 CFI_STARTPROC
21727 +
21728 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21729 GET_THREAD_INFO(%_ASM_DX)
21730 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21731 jae bad_get_user
21732 -1: movzb (%_ASM_AX),%edx
21733 +
21734 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21735 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21736 + cmp %_ASM_DX,%_ASM_AX
21737 + jae 1234f
21738 + add %_ASM_DX,%_ASM_AX
21739 +1234:
21740 +#endif
21741 +
21742 +#endif
21743 +
21744 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21745 xor %eax,%eax
21746 + pax_force_retaddr
21747 ret
21748 CFI_ENDPROC
21749 ENDPROC(__get_user_1)
21750 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21751 ENTRY(__get_user_2)
21752 CFI_STARTPROC
21753 add $1,%_ASM_AX
21754 +
21755 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21756 jc bad_get_user
21757 GET_THREAD_INFO(%_ASM_DX)
21758 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21759 jae bad_get_user
21760 -2: movzwl -1(%_ASM_AX),%edx
21761 +
21762 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21763 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21764 + cmp %_ASM_DX,%_ASM_AX
21765 + jae 1234f
21766 + add %_ASM_DX,%_ASM_AX
21767 +1234:
21768 +#endif
21769 +
21770 +#endif
21771 +
21772 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21773 xor %eax,%eax
21774 + pax_force_retaddr
21775 ret
21776 CFI_ENDPROC
21777 ENDPROC(__get_user_2)
21778 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21779 ENTRY(__get_user_4)
21780 CFI_STARTPROC
21781 add $3,%_ASM_AX
21782 +
21783 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21784 jc bad_get_user
21785 GET_THREAD_INFO(%_ASM_DX)
21786 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21787 jae bad_get_user
21788 -3: mov -3(%_ASM_AX),%edx
21789 +
21790 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21791 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21792 + cmp %_ASM_DX,%_ASM_AX
21793 + jae 1234f
21794 + add %_ASM_DX,%_ASM_AX
21795 +1234:
21796 +#endif
21797 +
21798 +#endif
21799 +
21800 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21801 xor %eax,%eax
21802 + pax_force_retaddr
21803 ret
21804 CFI_ENDPROC
21805 ENDPROC(__get_user_4)
21806 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21807 GET_THREAD_INFO(%_ASM_DX)
21808 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21809 jae bad_get_user
21810 +
21811 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21812 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21813 + cmp %_ASM_DX,%_ASM_AX
21814 + jae 1234f
21815 + add %_ASM_DX,%_ASM_AX
21816 +1234:
21817 +#endif
21818 +
21819 4: movq -7(%_ASM_AX),%_ASM_DX
21820 xor %eax,%eax
21821 + pax_force_retaddr
21822 ret
21823 CFI_ENDPROC
21824 ENDPROC(__get_user_8)
21825 @@ -91,6 +152,7 @@ bad_get_user:
21826 CFI_STARTPROC
21827 xor %edx,%edx
21828 mov $(-EFAULT),%_ASM_AX
21829 + pax_force_retaddr
21830 ret
21831 CFI_ENDPROC
21832 END(bad_get_user)
21833 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21834 index b1e6c4b..21ae8fc 100644
21835 --- a/arch/x86/lib/insn.c
21836 +++ b/arch/x86/lib/insn.c
21837 @@ -21,6 +21,11 @@
21838 #include <linux/string.h>
21839 #include <asm/inat.h>
21840 #include <asm/insn.h>
21841 +#ifdef __KERNEL__
21842 +#include <asm/pgtable_types.h>
21843 +#else
21844 +#define ktla_ktva(addr) addr
21845 +#endif
21846
21847 /* Verify next sizeof(t) bytes can be on the same instruction */
21848 #define validate_next(t, insn, n) \
21849 @@ -49,8 +54,8 @@
21850 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21851 {
21852 memset(insn, 0, sizeof(*insn));
21853 - insn->kaddr = kaddr;
21854 - insn->next_byte = kaddr;
21855 + insn->kaddr = ktla_ktva(kaddr);
21856 + insn->next_byte = ktla_ktva(kaddr);
21857 insn->x86_64 = x86_64 ? 1 : 0;
21858 insn->opnd_bytes = 4;
21859 if (x86_64)
21860 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21861 index 05a95e7..326f2fa 100644
21862 --- a/arch/x86/lib/iomap_copy_64.S
21863 +++ b/arch/x86/lib/iomap_copy_64.S
21864 @@ -17,6 +17,7 @@
21865
21866 #include <linux/linkage.h>
21867 #include <asm/dwarf2.h>
21868 +#include <asm/alternative-asm.h>
21869
21870 /*
21871 * override generic version in lib/iomap_copy.c
21872 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21873 CFI_STARTPROC
21874 movl %edx,%ecx
21875 rep movsd
21876 + pax_force_retaddr
21877 ret
21878 CFI_ENDPROC
21879 ENDPROC(__iowrite32_copy)
21880 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21881 index 1c273be..da9cc0e 100644
21882 --- a/arch/x86/lib/memcpy_64.S
21883 +++ b/arch/x86/lib/memcpy_64.S
21884 @@ -33,6 +33,7 @@
21885 rep movsq
21886 movl %edx, %ecx
21887 rep movsb
21888 + pax_force_retaddr
21889 ret
21890 .Lmemcpy_e:
21891 .previous
21892 @@ -49,6 +50,7 @@
21893 movq %rdi, %rax
21894 movq %rdx, %rcx
21895 rep movsb
21896 + pax_force_retaddr
21897 ret
21898 .Lmemcpy_e_e:
21899 .previous
21900 @@ -76,13 +78,13 @@ ENTRY(memcpy)
21901 */
21902 movq 0*8(%rsi), %r8
21903 movq 1*8(%rsi), %r9
21904 - movq 2*8(%rsi), %r10
21905 + movq 2*8(%rsi), %rcx
21906 movq 3*8(%rsi), %r11
21907 leaq 4*8(%rsi), %rsi
21908
21909 movq %r8, 0*8(%rdi)
21910 movq %r9, 1*8(%rdi)
21911 - movq %r10, 2*8(%rdi)
21912 + movq %rcx, 2*8(%rdi)
21913 movq %r11, 3*8(%rdi)
21914 leaq 4*8(%rdi), %rdi
21915 jae .Lcopy_forward_loop
21916 @@ -105,12 +107,12 @@ ENTRY(memcpy)
21917 subq $0x20, %rdx
21918 movq -1*8(%rsi), %r8
21919 movq -2*8(%rsi), %r9
21920 - movq -3*8(%rsi), %r10
21921 + movq -3*8(%rsi), %rcx
21922 movq -4*8(%rsi), %r11
21923 leaq -4*8(%rsi), %rsi
21924 movq %r8, -1*8(%rdi)
21925 movq %r9, -2*8(%rdi)
21926 - movq %r10, -3*8(%rdi)
21927 + movq %rcx, -3*8(%rdi)
21928 movq %r11, -4*8(%rdi)
21929 leaq -4*8(%rdi), %rdi
21930 jae .Lcopy_backward_loop
21931 @@ -130,12 +132,13 @@ ENTRY(memcpy)
21932 */
21933 movq 0*8(%rsi), %r8
21934 movq 1*8(%rsi), %r9
21935 - movq -2*8(%rsi, %rdx), %r10
21936 + movq -2*8(%rsi, %rdx), %rcx
21937 movq -1*8(%rsi, %rdx), %r11
21938 movq %r8, 0*8(%rdi)
21939 movq %r9, 1*8(%rdi)
21940 - movq %r10, -2*8(%rdi, %rdx)
21941 + movq %rcx, -2*8(%rdi, %rdx)
21942 movq %r11, -1*8(%rdi, %rdx)
21943 + pax_force_retaddr
21944 retq
21945 .p2align 4
21946 .Lless_16bytes:
21947 @@ -148,6 +151,7 @@ ENTRY(memcpy)
21948 movq -1*8(%rsi, %rdx), %r9
21949 movq %r8, 0*8(%rdi)
21950 movq %r9, -1*8(%rdi, %rdx)
21951 + pax_force_retaddr
21952 retq
21953 .p2align 4
21954 .Lless_8bytes:
21955 @@ -161,6 +165,7 @@ ENTRY(memcpy)
21956 movl -4(%rsi, %rdx), %r8d
21957 movl %ecx, (%rdi)
21958 movl %r8d, -4(%rdi, %rdx)
21959 + pax_force_retaddr
21960 retq
21961 .p2align 4
21962 .Lless_3bytes:
21963 @@ -179,6 +184,7 @@ ENTRY(memcpy)
21964 movb %cl, (%rdi)
21965
21966 .Lend:
21967 + pax_force_retaddr
21968 retq
21969 CFI_ENDPROC
21970 ENDPROC(memcpy)
21971 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
21972 index ee16461..c39c199 100644
21973 --- a/arch/x86/lib/memmove_64.S
21974 +++ b/arch/x86/lib/memmove_64.S
21975 @@ -61,13 +61,13 @@ ENTRY(memmove)
21976 5:
21977 sub $0x20, %rdx
21978 movq 0*8(%rsi), %r11
21979 - movq 1*8(%rsi), %r10
21980 + movq 1*8(%rsi), %rcx
21981 movq 2*8(%rsi), %r9
21982 movq 3*8(%rsi), %r8
21983 leaq 4*8(%rsi), %rsi
21984
21985 movq %r11, 0*8(%rdi)
21986 - movq %r10, 1*8(%rdi)
21987 + movq %rcx, 1*8(%rdi)
21988 movq %r9, 2*8(%rdi)
21989 movq %r8, 3*8(%rdi)
21990 leaq 4*8(%rdi), %rdi
21991 @@ -81,10 +81,10 @@ ENTRY(memmove)
21992 4:
21993 movq %rdx, %rcx
21994 movq -8(%rsi, %rdx), %r11
21995 - lea -8(%rdi, %rdx), %r10
21996 + lea -8(%rdi, %rdx), %r9
21997 shrq $3, %rcx
21998 rep movsq
21999 - movq %r11, (%r10)
22000 + movq %r11, (%r9)
22001 jmp 13f
22002 .Lmemmove_end_forward:
22003
22004 @@ -95,14 +95,14 @@ ENTRY(memmove)
22005 7:
22006 movq %rdx, %rcx
22007 movq (%rsi), %r11
22008 - movq %rdi, %r10
22009 + movq %rdi, %r9
22010 leaq -8(%rsi, %rdx), %rsi
22011 leaq -8(%rdi, %rdx), %rdi
22012 shrq $3, %rcx
22013 std
22014 rep movsq
22015 cld
22016 - movq %r11, (%r10)
22017 + movq %r11, (%r9)
22018 jmp 13f
22019
22020 /*
22021 @@ -127,13 +127,13 @@ ENTRY(memmove)
22022 8:
22023 subq $0x20, %rdx
22024 movq -1*8(%rsi), %r11
22025 - movq -2*8(%rsi), %r10
22026 + movq -2*8(%rsi), %rcx
22027 movq -3*8(%rsi), %r9
22028 movq -4*8(%rsi), %r8
22029 leaq -4*8(%rsi), %rsi
22030
22031 movq %r11, -1*8(%rdi)
22032 - movq %r10, -2*8(%rdi)
22033 + movq %rcx, -2*8(%rdi)
22034 movq %r9, -3*8(%rdi)
22035 movq %r8, -4*8(%rdi)
22036 leaq -4*8(%rdi), %rdi
22037 @@ -151,11 +151,11 @@ ENTRY(memmove)
22038 * Move data from 16 bytes to 31 bytes.
22039 */
22040 movq 0*8(%rsi), %r11
22041 - movq 1*8(%rsi), %r10
22042 + movq 1*8(%rsi), %rcx
22043 movq -2*8(%rsi, %rdx), %r9
22044 movq -1*8(%rsi, %rdx), %r8
22045 movq %r11, 0*8(%rdi)
22046 - movq %r10, 1*8(%rdi)
22047 + movq %rcx, 1*8(%rdi)
22048 movq %r9, -2*8(%rdi, %rdx)
22049 movq %r8, -1*8(%rdi, %rdx)
22050 jmp 13f
22051 @@ -167,9 +167,9 @@ ENTRY(memmove)
22052 * Move data from 8 bytes to 15 bytes.
22053 */
22054 movq 0*8(%rsi), %r11
22055 - movq -1*8(%rsi, %rdx), %r10
22056 + movq -1*8(%rsi, %rdx), %r9
22057 movq %r11, 0*8(%rdi)
22058 - movq %r10, -1*8(%rdi, %rdx)
22059 + movq %r9, -1*8(%rdi, %rdx)
22060 jmp 13f
22061 10:
22062 cmpq $4, %rdx
22063 @@ -178,9 +178,9 @@ ENTRY(memmove)
22064 * Move data from 4 bytes to 7 bytes.
22065 */
22066 movl (%rsi), %r11d
22067 - movl -4(%rsi, %rdx), %r10d
22068 + movl -4(%rsi, %rdx), %r9d
22069 movl %r11d, (%rdi)
22070 - movl %r10d, -4(%rdi, %rdx)
22071 + movl %r9d, -4(%rdi, %rdx)
22072 jmp 13f
22073 11:
22074 cmp $2, %rdx
22075 @@ -189,9 +189,9 @@ ENTRY(memmove)
22076 * Move data from 2 bytes to 3 bytes.
22077 */
22078 movw (%rsi), %r11w
22079 - movw -2(%rsi, %rdx), %r10w
22080 + movw -2(%rsi, %rdx), %r9w
22081 movw %r11w, (%rdi)
22082 - movw %r10w, -2(%rdi, %rdx)
22083 + movw %r9w, -2(%rdi, %rdx)
22084 jmp 13f
22085 12:
22086 cmp $1, %rdx
22087 @@ -202,6 +202,7 @@ ENTRY(memmove)
22088 movb (%rsi), %r11b
22089 movb %r11b, (%rdi)
22090 13:
22091 + pax_force_retaddr
22092 retq
22093 CFI_ENDPROC
22094
22095 @@ -210,6 +211,7 @@ ENTRY(memmove)
22096 /* Forward moving data. */
22097 movq %rdx, %rcx
22098 rep movsb
22099 + pax_force_retaddr
22100 retq
22101 .Lmemmove_end_forward_efs:
22102 .previous
22103 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22104 index 2dcb380..963660a 100644
22105 --- a/arch/x86/lib/memset_64.S
22106 +++ b/arch/x86/lib/memset_64.S
22107 @@ -30,6 +30,7 @@
22108 movl %edx,%ecx
22109 rep stosb
22110 movq %r9,%rax
22111 + pax_force_retaddr
22112 ret
22113 .Lmemset_e:
22114 .previous
22115 @@ -52,6 +53,7 @@
22116 movq %rdx,%rcx
22117 rep stosb
22118 movq %r9,%rax
22119 + pax_force_retaddr
22120 ret
22121 .Lmemset_e_e:
22122 .previous
22123 @@ -59,7 +61,7 @@
22124 ENTRY(memset)
22125 ENTRY(__memset)
22126 CFI_STARTPROC
22127 - movq %rdi,%r10
22128 + movq %rdi,%r11
22129
22130 /* expand byte value */
22131 movzbl %sil,%ecx
22132 @@ -117,7 +119,8 @@ ENTRY(__memset)
22133 jnz .Lloop_1
22134
22135 .Lende:
22136 - movq %r10,%rax
22137 + movq %r11,%rax
22138 + pax_force_retaddr
22139 ret
22140
22141 CFI_RESTORE_STATE
22142 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22143 index c9f2d9b..e7fd2c0 100644
22144 --- a/arch/x86/lib/mmx_32.c
22145 +++ b/arch/x86/lib/mmx_32.c
22146 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22147 {
22148 void *p;
22149 int i;
22150 + unsigned long cr0;
22151
22152 if (unlikely(in_interrupt()))
22153 return __memcpy(to, from, len);
22154 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22155 kernel_fpu_begin();
22156
22157 __asm__ __volatile__ (
22158 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22159 - " prefetch 64(%0)\n"
22160 - " prefetch 128(%0)\n"
22161 - " prefetch 192(%0)\n"
22162 - " prefetch 256(%0)\n"
22163 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22164 + " prefetch 64(%1)\n"
22165 + " prefetch 128(%1)\n"
22166 + " prefetch 192(%1)\n"
22167 + " prefetch 256(%1)\n"
22168 "2: \n"
22169 ".section .fixup, \"ax\"\n"
22170 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22171 + "3: \n"
22172 +
22173 +#ifdef CONFIG_PAX_KERNEXEC
22174 + " movl %%cr0, %0\n"
22175 + " movl %0, %%eax\n"
22176 + " andl $0xFFFEFFFF, %%eax\n"
22177 + " movl %%eax, %%cr0\n"
22178 +#endif
22179 +
22180 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22181 +
22182 +#ifdef CONFIG_PAX_KERNEXEC
22183 + " movl %0, %%cr0\n"
22184 +#endif
22185 +
22186 " jmp 2b\n"
22187 ".previous\n"
22188 _ASM_EXTABLE(1b, 3b)
22189 - : : "r" (from));
22190 + : "=&r" (cr0) : "r" (from) : "ax");
22191
22192 for ( ; i > 5; i--) {
22193 __asm__ __volatile__ (
22194 - "1: prefetch 320(%0)\n"
22195 - "2: movq (%0), %%mm0\n"
22196 - " movq 8(%0), %%mm1\n"
22197 - " movq 16(%0), %%mm2\n"
22198 - " movq 24(%0), %%mm3\n"
22199 - " movq %%mm0, (%1)\n"
22200 - " movq %%mm1, 8(%1)\n"
22201 - " movq %%mm2, 16(%1)\n"
22202 - " movq %%mm3, 24(%1)\n"
22203 - " movq 32(%0), %%mm0\n"
22204 - " movq 40(%0), %%mm1\n"
22205 - " movq 48(%0), %%mm2\n"
22206 - " movq 56(%0), %%mm3\n"
22207 - " movq %%mm0, 32(%1)\n"
22208 - " movq %%mm1, 40(%1)\n"
22209 - " movq %%mm2, 48(%1)\n"
22210 - " movq %%mm3, 56(%1)\n"
22211 + "1: prefetch 320(%1)\n"
22212 + "2: movq (%1), %%mm0\n"
22213 + " movq 8(%1), %%mm1\n"
22214 + " movq 16(%1), %%mm2\n"
22215 + " movq 24(%1), %%mm3\n"
22216 + " movq %%mm0, (%2)\n"
22217 + " movq %%mm1, 8(%2)\n"
22218 + " movq %%mm2, 16(%2)\n"
22219 + " movq %%mm3, 24(%2)\n"
22220 + " movq 32(%1), %%mm0\n"
22221 + " movq 40(%1), %%mm1\n"
22222 + " movq 48(%1), %%mm2\n"
22223 + " movq 56(%1), %%mm3\n"
22224 + " movq %%mm0, 32(%2)\n"
22225 + " movq %%mm1, 40(%2)\n"
22226 + " movq %%mm2, 48(%2)\n"
22227 + " movq %%mm3, 56(%2)\n"
22228 ".section .fixup, \"ax\"\n"
22229 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22230 + "3:\n"
22231 +
22232 +#ifdef CONFIG_PAX_KERNEXEC
22233 + " movl %%cr0, %0\n"
22234 + " movl %0, %%eax\n"
22235 + " andl $0xFFFEFFFF, %%eax\n"
22236 + " movl %%eax, %%cr0\n"
22237 +#endif
22238 +
22239 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22240 +
22241 +#ifdef CONFIG_PAX_KERNEXEC
22242 + " movl %0, %%cr0\n"
22243 +#endif
22244 +
22245 " jmp 2b\n"
22246 ".previous\n"
22247 _ASM_EXTABLE(1b, 3b)
22248 - : : "r" (from), "r" (to) : "memory");
22249 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22250
22251 from += 64;
22252 to += 64;
22253 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22254 static void fast_copy_page(void *to, void *from)
22255 {
22256 int i;
22257 + unsigned long cr0;
22258
22259 kernel_fpu_begin();
22260
22261 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22262 * but that is for later. -AV
22263 */
22264 __asm__ __volatile__(
22265 - "1: prefetch (%0)\n"
22266 - " prefetch 64(%0)\n"
22267 - " prefetch 128(%0)\n"
22268 - " prefetch 192(%0)\n"
22269 - " prefetch 256(%0)\n"
22270 + "1: prefetch (%1)\n"
22271 + " prefetch 64(%1)\n"
22272 + " prefetch 128(%1)\n"
22273 + " prefetch 192(%1)\n"
22274 + " prefetch 256(%1)\n"
22275 "2: \n"
22276 ".section .fixup, \"ax\"\n"
22277 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22278 + "3: \n"
22279 +
22280 +#ifdef CONFIG_PAX_KERNEXEC
22281 + " movl %%cr0, %0\n"
22282 + " movl %0, %%eax\n"
22283 + " andl $0xFFFEFFFF, %%eax\n"
22284 + " movl %%eax, %%cr0\n"
22285 +#endif
22286 +
22287 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22288 +
22289 +#ifdef CONFIG_PAX_KERNEXEC
22290 + " movl %0, %%cr0\n"
22291 +#endif
22292 +
22293 " jmp 2b\n"
22294 ".previous\n"
22295 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22296 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22297
22298 for (i = 0; i < (4096-320)/64; i++) {
22299 __asm__ __volatile__ (
22300 - "1: prefetch 320(%0)\n"
22301 - "2: movq (%0), %%mm0\n"
22302 - " movntq %%mm0, (%1)\n"
22303 - " movq 8(%0), %%mm1\n"
22304 - " movntq %%mm1, 8(%1)\n"
22305 - " movq 16(%0), %%mm2\n"
22306 - " movntq %%mm2, 16(%1)\n"
22307 - " movq 24(%0), %%mm3\n"
22308 - " movntq %%mm3, 24(%1)\n"
22309 - " movq 32(%0), %%mm4\n"
22310 - " movntq %%mm4, 32(%1)\n"
22311 - " movq 40(%0), %%mm5\n"
22312 - " movntq %%mm5, 40(%1)\n"
22313 - " movq 48(%0), %%mm6\n"
22314 - " movntq %%mm6, 48(%1)\n"
22315 - " movq 56(%0), %%mm7\n"
22316 - " movntq %%mm7, 56(%1)\n"
22317 + "1: prefetch 320(%1)\n"
22318 + "2: movq (%1), %%mm0\n"
22319 + " movntq %%mm0, (%2)\n"
22320 + " movq 8(%1), %%mm1\n"
22321 + " movntq %%mm1, 8(%2)\n"
22322 + " movq 16(%1), %%mm2\n"
22323 + " movntq %%mm2, 16(%2)\n"
22324 + " movq 24(%1), %%mm3\n"
22325 + " movntq %%mm3, 24(%2)\n"
22326 + " movq 32(%1), %%mm4\n"
22327 + " movntq %%mm4, 32(%2)\n"
22328 + " movq 40(%1), %%mm5\n"
22329 + " movntq %%mm5, 40(%2)\n"
22330 + " movq 48(%1), %%mm6\n"
22331 + " movntq %%mm6, 48(%2)\n"
22332 + " movq 56(%1), %%mm7\n"
22333 + " movntq %%mm7, 56(%2)\n"
22334 ".section .fixup, \"ax\"\n"
22335 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22336 + "3:\n"
22337 +
22338 +#ifdef CONFIG_PAX_KERNEXEC
22339 + " movl %%cr0, %0\n"
22340 + " movl %0, %%eax\n"
22341 + " andl $0xFFFEFFFF, %%eax\n"
22342 + " movl %%eax, %%cr0\n"
22343 +#endif
22344 +
22345 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22346 +
22347 +#ifdef CONFIG_PAX_KERNEXEC
22348 + " movl %0, %%cr0\n"
22349 +#endif
22350 +
22351 " jmp 2b\n"
22352 ".previous\n"
22353 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22354 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22355
22356 from += 64;
22357 to += 64;
22358 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22359 static void fast_copy_page(void *to, void *from)
22360 {
22361 int i;
22362 + unsigned long cr0;
22363
22364 kernel_fpu_begin();
22365
22366 __asm__ __volatile__ (
22367 - "1: prefetch (%0)\n"
22368 - " prefetch 64(%0)\n"
22369 - " prefetch 128(%0)\n"
22370 - " prefetch 192(%0)\n"
22371 - " prefetch 256(%0)\n"
22372 + "1: prefetch (%1)\n"
22373 + " prefetch 64(%1)\n"
22374 + " prefetch 128(%1)\n"
22375 + " prefetch 192(%1)\n"
22376 + " prefetch 256(%1)\n"
22377 "2: \n"
22378 ".section .fixup, \"ax\"\n"
22379 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22380 + "3: \n"
22381 +
22382 +#ifdef CONFIG_PAX_KERNEXEC
22383 + " movl %%cr0, %0\n"
22384 + " movl %0, %%eax\n"
22385 + " andl $0xFFFEFFFF, %%eax\n"
22386 + " movl %%eax, %%cr0\n"
22387 +#endif
22388 +
22389 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22390 +
22391 +#ifdef CONFIG_PAX_KERNEXEC
22392 + " movl %0, %%cr0\n"
22393 +#endif
22394 +
22395 " jmp 2b\n"
22396 ".previous\n"
22397 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22398 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22399
22400 for (i = 0; i < 4096/64; i++) {
22401 __asm__ __volatile__ (
22402 - "1: prefetch 320(%0)\n"
22403 - "2: movq (%0), %%mm0\n"
22404 - " movq 8(%0), %%mm1\n"
22405 - " movq 16(%0), %%mm2\n"
22406 - " movq 24(%0), %%mm3\n"
22407 - " movq %%mm0, (%1)\n"
22408 - " movq %%mm1, 8(%1)\n"
22409 - " movq %%mm2, 16(%1)\n"
22410 - " movq %%mm3, 24(%1)\n"
22411 - " movq 32(%0), %%mm0\n"
22412 - " movq 40(%0), %%mm1\n"
22413 - " movq 48(%0), %%mm2\n"
22414 - " movq 56(%0), %%mm3\n"
22415 - " movq %%mm0, 32(%1)\n"
22416 - " movq %%mm1, 40(%1)\n"
22417 - " movq %%mm2, 48(%1)\n"
22418 - " movq %%mm3, 56(%1)\n"
22419 + "1: prefetch 320(%1)\n"
22420 + "2: movq (%1), %%mm0\n"
22421 + " movq 8(%1), %%mm1\n"
22422 + " movq 16(%1), %%mm2\n"
22423 + " movq 24(%1), %%mm3\n"
22424 + " movq %%mm0, (%2)\n"
22425 + " movq %%mm1, 8(%2)\n"
22426 + " movq %%mm2, 16(%2)\n"
22427 + " movq %%mm3, 24(%2)\n"
22428 + " movq 32(%1), %%mm0\n"
22429 + " movq 40(%1), %%mm1\n"
22430 + " movq 48(%1), %%mm2\n"
22431 + " movq 56(%1), %%mm3\n"
22432 + " movq %%mm0, 32(%2)\n"
22433 + " movq %%mm1, 40(%2)\n"
22434 + " movq %%mm2, 48(%2)\n"
22435 + " movq %%mm3, 56(%2)\n"
22436 ".section .fixup, \"ax\"\n"
22437 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22438 + "3:\n"
22439 +
22440 +#ifdef CONFIG_PAX_KERNEXEC
22441 + " movl %%cr0, %0\n"
22442 + " movl %0, %%eax\n"
22443 + " andl $0xFFFEFFFF, %%eax\n"
22444 + " movl %%eax, %%cr0\n"
22445 +#endif
22446 +
22447 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22448 +
22449 +#ifdef CONFIG_PAX_KERNEXEC
22450 + " movl %0, %%cr0\n"
22451 +#endif
22452 +
22453 " jmp 2b\n"
22454 ".previous\n"
22455 _ASM_EXTABLE(1b, 3b)
22456 - : : "r" (from), "r" (to) : "memory");
22457 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22458
22459 from += 64;
22460 to += 64;
22461 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22462 index 69fa106..adda88b 100644
22463 --- a/arch/x86/lib/msr-reg.S
22464 +++ b/arch/x86/lib/msr-reg.S
22465 @@ -3,6 +3,7 @@
22466 #include <asm/dwarf2.h>
22467 #include <asm/asm.h>
22468 #include <asm/msr.h>
22469 +#include <asm/alternative-asm.h>
22470
22471 #ifdef CONFIG_X86_64
22472 /*
22473 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22474 CFI_STARTPROC
22475 pushq_cfi %rbx
22476 pushq_cfi %rbp
22477 - movq %rdi, %r10 /* Save pointer */
22478 + movq %rdi, %r9 /* Save pointer */
22479 xorl %r11d, %r11d /* Return value */
22480 movl (%rdi), %eax
22481 movl 4(%rdi), %ecx
22482 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22483 movl 28(%rdi), %edi
22484 CFI_REMEMBER_STATE
22485 1: \op
22486 -2: movl %eax, (%r10)
22487 +2: movl %eax, (%r9)
22488 movl %r11d, %eax /* Return value */
22489 - movl %ecx, 4(%r10)
22490 - movl %edx, 8(%r10)
22491 - movl %ebx, 12(%r10)
22492 - movl %ebp, 20(%r10)
22493 - movl %esi, 24(%r10)
22494 - movl %edi, 28(%r10)
22495 + movl %ecx, 4(%r9)
22496 + movl %edx, 8(%r9)
22497 + movl %ebx, 12(%r9)
22498 + movl %ebp, 20(%r9)
22499 + movl %esi, 24(%r9)
22500 + movl %edi, 28(%r9)
22501 popq_cfi %rbp
22502 popq_cfi %rbx
22503 + pax_force_retaddr
22504 ret
22505 3:
22506 CFI_RESTORE_STATE
22507 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22508 index 36b0d15..d381858 100644
22509 --- a/arch/x86/lib/putuser.S
22510 +++ b/arch/x86/lib/putuser.S
22511 @@ -15,7 +15,9 @@
22512 #include <asm/thread_info.h>
22513 #include <asm/errno.h>
22514 #include <asm/asm.h>
22515 -
22516 +#include <asm/segment.h>
22517 +#include <asm/pgtable.h>
22518 +#include <asm/alternative-asm.h>
22519
22520 /*
22521 * __put_user_X
22522 @@ -29,52 +31,119 @@
22523 * as they get called from within inline assembly.
22524 */
22525
22526 -#define ENTER CFI_STARTPROC ; \
22527 - GET_THREAD_INFO(%_ASM_BX)
22528 -#define EXIT ret ; \
22529 +#define ENTER CFI_STARTPROC
22530 +#define EXIT pax_force_retaddr; ret ; \
22531 CFI_ENDPROC
22532
22533 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22534 +#define _DEST %_ASM_CX,%_ASM_BX
22535 +#else
22536 +#define _DEST %_ASM_CX
22537 +#endif
22538 +
22539 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22540 +#define __copyuser_seg gs;
22541 +#else
22542 +#define __copyuser_seg
22543 +#endif
22544 +
22545 .text
22546 ENTRY(__put_user_1)
22547 ENTER
22548 +
22549 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22550 + GET_THREAD_INFO(%_ASM_BX)
22551 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22552 jae bad_put_user
22553 -1: movb %al,(%_ASM_CX)
22554 +
22555 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22556 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22557 + cmp %_ASM_BX,%_ASM_CX
22558 + jb 1234f
22559 + xor %ebx,%ebx
22560 +1234:
22561 +#endif
22562 +
22563 +#endif
22564 +
22565 +1: __copyuser_seg movb %al,(_DEST)
22566 xor %eax,%eax
22567 EXIT
22568 ENDPROC(__put_user_1)
22569
22570 ENTRY(__put_user_2)
22571 ENTER
22572 +
22573 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22574 + GET_THREAD_INFO(%_ASM_BX)
22575 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22576 sub $1,%_ASM_BX
22577 cmp %_ASM_BX,%_ASM_CX
22578 jae bad_put_user
22579 -2: movw %ax,(%_ASM_CX)
22580 +
22581 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22582 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22583 + cmp %_ASM_BX,%_ASM_CX
22584 + jb 1234f
22585 + xor %ebx,%ebx
22586 +1234:
22587 +#endif
22588 +
22589 +#endif
22590 +
22591 +2: __copyuser_seg movw %ax,(_DEST)
22592 xor %eax,%eax
22593 EXIT
22594 ENDPROC(__put_user_2)
22595
22596 ENTRY(__put_user_4)
22597 ENTER
22598 +
22599 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22600 + GET_THREAD_INFO(%_ASM_BX)
22601 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22602 sub $3,%_ASM_BX
22603 cmp %_ASM_BX,%_ASM_CX
22604 jae bad_put_user
22605 -3: movl %eax,(%_ASM_CX)
22606 +
22607 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22608 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22609 + cmp %_ASM_BX,%_ASM_CX
22610 + jb 1234f
22611 + xor %ebx,%ebx
22612 +1234:
22613 +#endif
22614 +
22615 +#endif
22616 +
22617 +3: __copyuser_seg movl %eax,(_DEST)
22618 xor %eax,%eax
22619 EXIT
22620 ENDPROC(__put_user_4)
22621
22622 ENTRY(__put_user_8)
22623 ENTER
22624 +
22625 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22626 + GET_THREAD_INFO(%_ASM_BX)
22627 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22628 sub $7,%_ASM_BX
22629 cmp %_ASM_BX,%_ASM_CX
22630 jae bad_put_user
22631 -4: mov %_ASM_AX,(%_ASM_CX)
22632 +
22633 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22634 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22635 + cmp %_ASM_BX,%_ASM_CX
22636 + jb 1234f
22637 + xor %ebx,%ebx
22638 +1234:
22639 +#endif
22640 +
22641 +#endif
22642 +
22643 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22644 #ifdef CONFIG_X86_32
22645 -5: movl %edx,4(%_ASM_CX)
22646 +5: __copyuser_seg movl %edx,4(_DEST)
22647 #endif
22648 xor %eax,%eax
22649 EXIT
22650 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22651 index 1cad221..de671ee 100644
22652 --- a/arch/x86/lib/rwlock.S
22653 +++ b/arch/x86/lib/rwlock.S
22654 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22655 FRAME
22656 0: LOCK_PREFIX
22657 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22658 +
22659 +#ifdef CONFIG_PAX_REFCOUNT
22660 + jno 1234f
22661 + LOCK_PREFIX
22662 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22663 + int $4
22664 +1234:
22665 + _ASM_EXTABLE(1234b, 1234b)
22666 +#endif
22667 +
22668 1: rep; nop
22669 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22670 jne 1b
22671 LOCK_PREFIX
22672 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22673 +
22674 +#ifdef CONFIG_PAX_REFCOUNT
22675 + jno 1234f
22676 + LOCK_PREFIX
22677 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22678 + int $4
22679 +1234:
22680 + _ASM_EXTABLE(1234b, 1234b)
22681 +#endif
22682 +
22683 jnz 0b
22684 ENDFRAME
22685 + pax_force_retaddr
22686 ret
22687 CFI_ENDPROC
22688 END(__write_lock_failed)
22689 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22690 FRAME
22691 0: LOCK_PREFIX
22692 READ_LOCK_SIZE(inc) (%__lock_ptr)
22693 +
22694 +#ifdef CONFIG_PAX_REFCOUNT
22695 + jno 1234f
22696 + LOCK_PREFIX
22697 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22698 + int $4
22699 +1234:
22700 + _ASM_EXTABLE(1234b, 1234b)
22701 +#endif
22702 +
22703 1: rep; nop
22704 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22705 js 1b
22706 LOCK_PREFIX
22707 READ_LOCK_SIZE(dec) (%__lock_ptr)
22708 +
22709 +#ifdef CONFIG_PAX_REFCOUNT
22710 + jno 1234f
22711 + LOCK_PREFIX
22712 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22713 + int $4
22714 +1234:
22715 + _ASM_EXTABLE(1234b, 1234b)
22716 +#endif
22717 +
22718 js 0b
22719 ENDFRAME
22720 + pax_force_retaddr
22721 ret
22722 CFI_ENDPROC
22723 END(__read_lock_failed)
22724 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22725 index 5dff5f0..cadebf4 100644
22726 --- a/arch/x86/lib/rwsem.S
22727 +++ b/arch/x86/lib/rwsem.S
22728 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22729 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22730 CFI_RESTORE __ASM_REG(dx)
22731 restore_common_regs
22732 + pax_force_retaddr
22733 ret
22734 CFI_ENDPROC
22735 ENDPROC(call_rwsem_down_read_failed)
22736 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22737 movq %rax,%rdi
22738 call rwsem_down_write_failed
22739 restore_common_regs
22740 + pax_force_retaddr
22741 ret
22742 CFI_ENDPROC
22743 ENDPROC(call_rwsem_down_write_failed)
22744 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22745 movq %rax,%rdi
22746 call rwsem_wake
22747 restore_common_regs
22748 -1: ret
22749 +1: pax_force_retaddr
22750 + ret
22751 CFI_ENDPROC
22752 ENDPROC(call_rwsem_wake)
22753
22754 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22755 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22756 CFI_RESTORE __ASM_REG(dx)
22757 restore_common_regs
22758 + pax_force_retaddr
22759 ret
22760 CFI_ENDPROC
22761 ENDPROC(call_rwsem_downgrade_wake)
22762 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22763 index a63efd6..ccecad8 100644
22764 --- a/arch/x86/lib/thunk_64.S
22765 +++ b/arch/x86/lib/thunk_64.S
22766 @@ -8,6 +8,7 @@
22767 #include <linux/linkage.h>
22768 #include <asm/dwarf2.h>
22769 #include <asm/calling.h>
22770 +#include <asm/alternative-asm.h>
22771
22772 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22773 .macro THUNK name, func, put_ret_addr_in_rdi=0
22774 @@ -41,5 +42,6 @@
22775 SAVE_ARGS
22776 restore:
22777 RESTORE_ARGS
22778 + pax_force_retaddr
22779 ret
22780 CFI_ENDPROC
22781 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22782 index ef2a6a5..3b28862 100644
22783 --- a/arch/x86/lib/usercopy_32.c
22784 +++ b/arch/x86/lib/usercopy_32.c
22785 @@ -41,10 +41,12 @@ do { \
22786 int __d0; \
22787 might_fault(); \
22788 __asm__ __volatile__( \
22789 + __COPYUSER_SET_ES \
22790 "0: rep; stosl\n" \
22791 " movl %2,%0\n" \
22792 "1: rep; stosb\n" \
22793 "2:\n" \
22794 + __COPYUSER_RESTORE_ES \
22795 ".section .fixup,\"ax\"\n" \
22796 "3: lea 0(%2,%0,4),%0\n" \
22797 " jmp 2b\n" \
22798 @@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
22799 might_fault();
22800
22801 __asm__ __volatile__(
22802 + __COPYUSER_SET_ES
22803 " testl %0, %0\n"
22804 " jz 3f\n"
22805 " andl %0,%%ecx\n"
22806 @@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
22807 " subl %%ecx,%0\n"
22808 " addl %0,%%eax\n"
22809 "1:\n"
22810 + __COPYUSER_RESTORE_ES
22811 ".section .fixup,\"ax\"\n"
22812 "2: xorl %%eax,%%eax\n"
22813 " jmp 1b\n"
22814 @@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
22815
22816 #ifdef CONFIG_X86_INTEL_USERCOPY
22817 static unsigned long
22818 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22819 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22820 {
22821 int d0, d1;
22822 __asm__ __volatile__(
22823 @@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22824 " .align 2,0x90\n"
22825 "3: movl 0(%4), %%eax\n"
22826 "4: movl 4(%4), %%edx\n"
22827 - "5: movl %%eax, 0(%3)\n"
22828 - "6: movl %%edx, 4(%3)\n"
22829 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22830 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22831 "7: movl 8(%4), %%eax\n"
22832 "8: movl 12(%4),%%edx\n"
22833 - "9: movl %%eax, 8(%3)\n"
22834 - "10: movl %%edx, 12(%3)\n"
22835 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22836 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22837 "11: movl 16(%4), %%eax\n"
22838 "12: movl 20(%4), %%edx\n"
22839 - "13: movl %%eax, 16(%3)\n"
22840 - "14: movl %%edx, 20(%3)\n"
22841 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22842 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22843 "15: movl 24(%4), %%eax\n"
22844 "16: movl 28(%4), %%edx\n"
22845 - "17: movl %%eax, 24(%3)\n"
22846 - "18: movl %%edx, 28(%3)\n"
22847 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22848 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22849 "19: movl 32(%4), %%eax\n"
22850 "20: movl 36(%4), %%edx\n"
22851 - "21: movl %%eax, 32(%3)\n"
22852 - "22: movl %%edx, 36(%3)\n"
22853 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22854 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22855 "23: movl 40(%4), %%eax\n"
22856 "24: movl 44(%4), %%edx\n"
22857 - "25: movl %%eax, 40(%3)\n"
22858 - "26: movl %%edx, 44(%3)\n"
22859 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22860 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22861 "27: movl 48(%4), %%eax\n"
22862 "28: movl 52(%4), %%edx\n"
22863 - "29: movl %%eax, 48(%3)\n"
22864 - "30: movl %%edx, 52(%3)\n"
22865 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22866 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22867 "31: movl 56(%4), %%eax\n"
22868 "32: movl 60(%4), %%edx\n"
22869 - "33: movl %%eax, 56(%3)\n"
22870 - "34: movl %%edx, 60(%3)\n"
22871 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22872 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22873 " addl $-64, %0\n"
22874 " addl $64, %4\n"
22875 " addl $64, %3\n"
22876 @@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22877 " shrl $2, %0\n"
22878 " andl $3, %%eax\n"
22879 " cld\n"
22880 + __COPYUSER_SET_ES
22881 "99: rep; movsl\n"
22882 "36: movl %%eax, %0\n"
22883 "37: rep; movsb\n"
22884 "100:\n"
22885 + __COPYUSER_RESTORE_ES
22886 ".section .fixup,\"ax\"\n"
22887 "101: lea 0(%%eax,%0,4),%0\n"
22888 " jmp 100b\n"
22889 @@ -247,46 +253,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22890 }
22891
22892 static unsigned long
22893 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22894 +{
22895 + int d0, d1;
22896 + __asm__ __volatile__(
22897 + " .align 2,0x90\n"
22898 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22899 + " cmpl $67, %0\n"
22900 + " jbe 3f\n"
22901 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22902 + " .align 2,0x90\n"
22903 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22904 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22905 + "5: movl %%eax, 0(%3)\n"
22906 + "6: movl %%edx, 4(%3)\n"
22907 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22908 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22909 + "9: movl %%eax, 8(%3)\n"
22910 + "10: movl %%edx, 12(%3)\n"
22911 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22912 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22913 + "13: movl %%eax, 16(%3)\n"
22914 + "14: movl %%edx, 20(%3)\n"
22915 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22916 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22917 + "17: movl %%eax, 24(%3)\n"
22918 + "18: movl %%edx, 28(%3)\n"
22919 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22920 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22921 + "21: movl %%eax, 32(%3)\n"
22922 + "22: movl %%edx, 36(%3)\n"
22923 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22924 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22925 + "25: movl %%eax, 40(%3)\n"
22926 + "26: movl %%edx, 44(%3)\n"
22927 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22928 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22929 + "29: movl %%eax, 48(%3)\n"
22930 + "30: movl %%edx, 52(%3)\n"
22931 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22932 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22933 + "33: movl %%eax, 56(%3)\n"
22934 + "34: movl %%edx, 60(%3)\n"
22935 + " addl $-64, %0\n"
22936 + " addl $64, %4\n"
22937 + " addl $64, %3\n"
22938 + " cmpl $63, %0\n"
22939 + " ja 1b\n"
22940 + "35: movl %0, %%eax\n"
22941 + " shrl $2, %0\n"
22942 + " andl $3, %%eax\n"
22943 + " cld\n"
22944 + "99: rep; "__copyuser_seg" movsl\n"
22945 + "36: movl %%eax, %0\n"
22946 + "37: rep; "__copyuser_seg" movsb\n"
22947 + "100:\n"
22948 + ".section .fixup,\"ax\"\n"
22949 + "101: lea 0(%%eax,%0,4),%0\n"
22950 + " jmp 100b\n"
22951 + ".previous\n"
22952 + ".section __ex_table,\"a\"\n"
22953 + " .align 4\n"
22954 + " .long 1b,100b\n"
22955 + " .long 2b,100b\n"
22956 + " .long 3b,100b\n"
22957 + " .long 4b,100b\n"
22958 + " .long 5b,100b\n"
22959 + " .long 6b,100b\n"
22960 + " .long 7b,100b\n"
22961 + " .long 8b,100b\n"
22962 + " .long 9b,100b\n"
22963 + " .long 10b,100b\n"
22964 + " .long 11b,100b\n"
22965 + " .long 12b,100b\n"
22966 + " .long 13b,100b\n"
22967 + " .long 14b,100b\n"
22968 + " .long 15b,100b\n"
22969 + " .long 16b,100b\n"
22970 + " .long 17b,100b\n"
22971 + " .long 18b,100b\n"
22972 + " .long 19b,100b\n"
22973 + " .long 20b,100b\n"
22974 + " .long 21b,100b\n"
22975 + " .long 22b,100b\n"
22976 + " .long 23b,100b\n"
22977 + " .long 24b,100b\n"
22978 + " .long 25b,100b\n"
22979 + " .long 26b,100b\n"
22980 + " .long 27b,100b\n"
22981 + " .long 28b,100b\n"
22982 + " .long 29b,100b\n"
22983 + " .long 30b,100b\n"
22984 + " .long 31b,100b\n"
22985 + " .long 32b,100b\n"
22986 + " .long 33b,100b\n"
22987 + " .long 34b,100b\n"
22988 + " .long 35b,100b\n"
22989 + " .long 36b,100b\n"
22990 + " .long 37b,100b\n"
22991 + " .long 99b,101b\n"
22992 + ".previous"
22993 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
22994 + : "1"(to), "2"(from), "0"(size)
22995 + : "eax", "edx", "memory");
22996 + return size;
22997 +}
22998 +
22999 +static unsigned long
23000 +__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23001 +static unsigned long
23002 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23003 {
23004 int d0, d1;
23005 __asm__ __volatile__(
23006 " .align 2,0x90\n"
23007 - "0: movl 32(%4), %%eax\n"
23008 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23009 " cmpl $67, %0\n"
23010 " jbe 2f\n"
23011 - "1: movl 64(%4), %%eax\n"
23012 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23013 " .align 2,0x90\n"
23014 - "2: movl 0(%4), %%eax\n"
23015 - "21: movl 4(%4), %%edx\n"
23016 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23017 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23018 " movl %%eax, 0(%3)\n"
23019 " movl %%edx, 4(%3)\n"
23020 - "3: movl 8(%4), %%eax\n"
23021 - "31: movl 12(%4),%%edx\n"
23022 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23023 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23024 " movl %%eax, 8(%3)\n"
23025 " movl %%edx, 12(%3)\n"
23026 - "4: movl 16(%4), %%eax\n"
23027 - "41: movl 20(%4), %%edx\n"
23028 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23029 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23030 " movl %%eax, 16(%3)\n"
23031 " movl %%edx, 20(%3)\n"
23032 - "10: movl 24(%4), %%eax\n"
23033 - "51: movl 28(%4), %%edx\n"
23034 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23035 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23036 " movl %%eax, 24(%3)\n"
23037 " movl %%edx, 28(%3)\n"
23038 - "11: movl 32(%4), %%eax\n"
23039 - "61: movl 36(%4), %%edx\n"
23040 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23041 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23042 " movl %%eax, 32(%3)\n"
23043 " movl %%edx, 36(%3)\n"
23044 - "12: movl 40(%4), %%eax\n"
23045 - "71: movl 44(%4), %%edx\n"
23046 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23047 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23048 " movl %%eax, 40(%3)\n"
23049 " movl %%edx, 44(%3)\n"
23050 - "13: movl 48(%4), %%eax\n"
23051 - "81: movl 52(%4), %%edx\n"
23052 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23053 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23054 " movl %%eax, 48(%3)\n"
23055 " movl %%edx, 52(%3)\n"
23056 - "14: movl 56(%4), %%eax\n"
23057 - "91: movl 60(%4), %%edx\n"
23058 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23059 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23060 " movl %%eax, 56(%3)\n"
23061 " movl %%edx, 60(%3)\n"
23062 " addl $-64, %0\n"
23063 @@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23064 " shrl $2, %0\n"
23065 " andl $3, %%eax\n"
23066 " cld\n"
23067 - "6: rep; movsl\n"
23068 + "6: rep; "__copyuser_seg" movsl\n"
23069 " movl %%eax,%0\n"
23070 - "7: rep; movsb\n"
23071 + "7: rep; "__copyuser_seg" movsb\n"
23072 "8:\n"
23073 ".section .fixup,\"ax\"\n"
23074 "9: lea 0(%%eax,%0,4),%0\n"
23075 @@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23076 */
23077
23078 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23079 + const void __user *from, unsigned long size) __size_overflow(3);
23080 +static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23081 const void __user *from, unsigned long size)
23082 {
23083 int d0, d1;
23084
23085 __asm__ __volatile__(
23086 " .align 2,0x90\n"
23087 - "0: movl 32(%4), %%eax\n"
23088 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23089 " cmpl $67, %0\n"
23090 " jbe 2f\n"
23091 - "1: movl 64(%4), %%eax\n"
23092 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23093 " .align 2,0x90\n"
23094 - "2: movl 0(%4), %%eax\n"
23095 - "21: movl 4(%4), %%edx\n"
23096 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23097 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23098 " movnti %%eax, 0(%3)\n"
23099 " movnti %%edx, 4(%3)\n"
23100 - "3: movl 8(%4), %%eax\n"
23101 - "31: movl 12(%4),%%edx\n"
23102 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23103 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23104 " movnti %%eax, 8(%3)\n"
23105 " movnti %%edx, 12(%3)\n"
23106 - "4: movl 16(%4), %%eax\n"
23107 - "41: movl 20(%4), %%edx\n"
23108 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23109 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23110 " movnti %%eax, 16(%3)\n"
23111 " movnti %%edx, 20(%3)\n"
23112 - "10: movl 24(%4), %%eax\n"
23113 - "51: movl 28(%4), %%edx\n"
23114 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23115 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23116 " movnti %%eax, 24(%3)\n"
23117 " movnti %%edx, 28(%3)\n"
23118 - "11: movl 32(%4), %%eax\n"
23119 - "61: movl 36(%4), %%edx\n"
23120 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23121 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23122 " movnti %%eax, 32(%3)\n"
23123 " movnti %%edx, 36(%3)\n"
23124 - "12: movl 40(%4), %%eax\n"
23125 - "71: movl 44(%4), %%edx\n"
23126 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23127 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23128 " movnti %%eax, 40(%3)\n"
23129 " movnti %%edx, 44(%3)\n"
23130 - "13: movl 48(%4), %%eax\n"
23131 - "81: movl 52(%4), %%edx\n"
23132 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23133 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23134 " movnti %%eax, 48(%3)\n"
23135 " movnti %%edx, 52(%3)\n"
23136 - "14: movl 56(%4), %%eax\n"
23137 - "91: movl 60(%4), %%edx\n"
23138 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23139 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23140 " movnti %%eax, 56(%3)\n"
23141 " movnti %%edx, 60(%3)\n"
23142 " addl $-64, %0\n"
23143 @@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23144 " shrl $2, %0\n"
23145 " andl $3, %%eax\n"
23146 " cld\n"
23147 - "6: rep; movsl\n"
23148 + "6: rep; "__copyuser_seg" movsl\n"
23149 " movl %%eax,%0\n"
23150 - "7: rep; movsb\n"
23151 + "7: rep; "__copyuser_seg" movsb\n"
23152 "8:\n"
23153 ".section .fixup,\"ax\"\n"
23154 "9: lea 0(%%eax,%0,4),%0\n"
23155 @@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23156 }
23157
23158 static unsigned long __copy_user_intel_nocache(void *to,
23159 + const void __user *from, unsigned long size) __size_overflow(3);
23160 +static unsigned long __copy_user_intel_nocache(void *to,
23161 const void __user *from, unsigned long size)
23162 {
23163 int d0, d1;
23164
23165 __asm__ __volatile__(
23166 " .align 2,0x90\n"
23167 - "0: movl 32(%4), %%eax\n"
23168 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23169 " cmpl $67, %0\n"
23170 " jbe 2f\n"
23171 - "1: movl 64(%4), %%eax\n"
23172 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23173 " .align 2,0x90\n"
23174 - "2: movl 0(%4), %%eax\n"
23175 - "21: movl 4(%4), %%edx\n"
23176 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23177 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23178 " movnti %%eax, 0(%3)\n"
23179 " movnti %%edx, 4(%3)\n"
23180 - "3: movl 8(%4), %%eax\n"
23181 - "31: movl 12(%4),%%edx\n"
23182 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23183 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23184 " movnti %%eax, 8(%3)\n"
23185 " movnti %%edx, 12(%3)\n"
23186 - "4: movl 16(%4), %%eax\n"
23187 - "41: movl 20(%4), %%edx\n"
23188 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23189 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23190 " movnti %%eax, 16(%3)\n"
23191 " movnti %%edx, 20(%3)\n"
23192 - "10: movl 24(%4), %%eax\n"
23193 - "51: movl 28(%4), %%edx\n"
23194 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23195 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23196 " movnti %%eax, 24(%3)\n"
23197 " movnti %%edx, 28(%3)\n"
23198 - "11: movl 32(%4), %%eax\n"
23199 - "61: movl 36(%4), %%edx\n"
23200 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23201 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23202 " movnti %%eax, 32(%3)\n"
23203 " movnti %%edx, 36(%3)\n"
23204 - "12: movl 40(%4), %%eax\n"
23205 - "71: movl 44(%4), %%edx\n"
23206 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23207 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23208 " movnti %%eax, 40(%3)\n"
23209 " movnti %%edx, 44(%3)\n"
23210 - "13: movl 48(%4), %%eax\n"
23211 - "81: movl 52(%4), %%edx\n"
23212 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23213 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23214 " movnti %%eax, 48(%3)\n"
23215 " movnti %%edx, 52(%3)\n"
23216 - "14: movl 56(%4), %%eax\n"
23217 - "91: movl 60(%4), %%edx\n"
23218 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23219 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23220 " movnti %%eax, 56(%3)\n"
23221 " movnti %%edx, 60(%3)\n"
23222 " addl $-64, %0\n"
23223 @@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23224 " shrl $2, %0\n"
23225 " andl $3, %%eax\n"
23226 " cld\n"
23227 - "6: rep; movsl\n"
23228 + "6: rep; "__copyuser_seg" movsl\n"
23229 " movl %%eax,%0\n"
23230 - "7: rep; movsb\n"
23231 + "7: rep; "__copyuser_seg" movsb\n"
23232 "8:\n"
23233 ".section .fixup,\"ax\"\n"
23234 "9: lea 0(%%eax,%0,4),%0\n"
23235 @@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23236 */
23237 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23238 unsigned long size);
23239 -unsigned long __copy_user_intel(void __user *to, const void *from,
23240 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23241 + unsigned long size);
23242 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23243 unsigned long size);
23244 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23245 const void __user *from, unsigned long size);
23246 #endif /* CONFIG_X86_INTEL_USERCOPY */
23247
23248 /* Generic arbitrary sized copy. */
23249 -#define __copy_user(to, from, size) \
23250 +#define __copy_user(to, from, size, prefix, set, restore) \
23251 do { \
23252 int __d0, __d1, __d2; \
23253 __asm__ __volatile__( \
23254 + set \
23255 " cmp $7,%0\n" \
23256 " jbe 1f\n" \
23257 " movl %1,%0\n" \
23258 " negl %0\n" \
23259 " andl $7,%0\n" \
23260 " subl %0,%3\n" \
23261 - "4: rep; movsb\n" \
23262 + "4: rep; "prefix"movsb\n" \
23263 " movl %3,%0\n" \
23264 " shrl $2,%0\n" \
23265 " andl $3,%3\n" \
23266 " .align 2,0x90\n" \
23267 - "0: rep; movsl\n" \
23268 + "0: rep; "prefix"movsl\n" \
23269 " movl %3,%0\n" \
23270 - "1: rep; movsb\n" \
23271 + "1: rep; "prefix"movsb\n" \
23272 "2:\n" \
23273 + restore \
23274 ".section .fixup,\"ax\"\n" \
23275 "5: addl %3,%0\n" \
23276 " jmp 2b\n" \
23277 @@ -595,14 +718,14 @@ do { \
23278 " negl %0\n" \
23279 " andl $7,%0\n" \
23280 " subl %0,%3\n" \
23281 - "4: rep; movsb\n" \
23282 + "4: rep; "__copyuser_seg"movsb\n" \
23283 " movl %3,%0\n" \
23284 " shrl $2,%0\n" \
23285 " andl $3,%3\n" \
23286 " .align 2,0x90\n" \
23287 - "0: rep; movsl\n" \
23288 + "0: rep; "__copyuser_seg"movsl\n" \
23289 " movl %3,%0\n" \
23290 - "1: rep; movsb\n" \
23291 + "1: rep; "__copyuser_seg"movsb\n" \
23292 "2:\n" \
23293 ".section .fixup,\"ax\"\n" \
23294 "5: addl %3,%0\n" \
23295 @@ -688,9 +811,9 @@ survive:
23296 }
23297 #endif
23298 if (movsl_is_ok(to, from, n))
23299 - __copy_user(to, from, n);
23300 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23301 else
23302 - n = __copy_user_intel(to, from, n);
23303 + n = __generic_copy_to_user_intel(to, from, n);
23304 return n;
23305 }
23306 EXPORT_SYMBOL(__copy_to_user_ll);
23307 @@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23308 unsigned long n)
23309 {
23310 if (movsl_is_ok(to, from, n))
23311 - __copy_user(to, from, n);
23312 + __copy_user(to, from, n, __copyuser_seg, "", "");
23313 else
23314 - n = __copy_user_intel((void __user *)to,
23315 - (const void *)from, n);
23316 + n = __generic_copy_from_user_intel(to, from, n);
23317 return n;
23318 }
23319 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23320 @@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23321 if (n > 64 && cpu_has_xmm2)
23322 n = __copy_user_intel_nocache(to, from, n);
23323 else
23324 - __copy_user(to, from, n);
23325 + __copy_user(to, from, n, __copyuser_seg, "", "");
23326 #else
23327 - __copy_user(to, from, n);
23328 + __copy_user(to, from, n, __copyuser_seg, "", "");
23329 #endif
23330 return n;
23331 }
23332 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23333
23334 -/**
23335 - * copy_to_user: - Copy a block of data into user space.
23336 - * @to: Destination address, in user space.
23337 - * @from: Source address, in kernel space.
23338 - * @n: Number of bytes to copy.
23339 - *
23340 - * Context: User context only. This function may sleep.
23341 - *
23342 - * Copy data from kernel space to user space.
23343 - *
23344 - * Returns number of bytes that could not be copied.
23345 - * On success, this will be zero.
23346 - */
23347 -unsigned long
23348 -copy_to_user(void __user *to, const void *from, unsigned long n)
23349 -{
23350 - if (access_ok(VERIFY_WRITE, to, n))
23351 - n = __copy_to_user(to, from, n);
23352 - return n;
23353 -}
23354 -EXPORT_SYMBOL(copy_to_user);
23355 -
23356 -/**
23357 - * copy_from_user: - Copy a block of data from user space.
23358 - * @to: Destination address, in kernel space.
23359 - * @from: Source address, in user space.
23360 - * @n: Number of bytes to copy.
23361 - *
23362 - * Context: User context only. This function may sleep.
23363 - *
23364 - * Copy data from user space to kernel space.
23365 - *
23366 - * Returns number of bytes that could not be copied.
23367 - * On success, this will be zero.
23368 - *
23369 - * If some data could not be copied, this function will pad the copied
23370 - * data to the requested size using zero bytes.
23371 - */
23372 -unsigned long
23373 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23374 -{
23375 - if (access_ok(VERIFY_READ, from, n))
23376 - n = __copy_from_user(to, from, n);
23377 - else
23378 - memset(to, 0, n);
23379 - return n;
23380 -}
23381 -EXPORT_SYMBOL(_copy_from_user);
23382 -
23383 void copy_from_user_overflow(void)
23384 {
23385 WARN(1, "Buffer overflow detected!\n");
23386 }
23387 EXPORT_SYMBOL(copy_from_user_overflow);
23388 +
23389 +void copy_to_user_overflow(void)
23390 +{
23391 + WARN(1, "Buffer overflow detected!\n");
23392 +}
23393 +EXPORT_SYMBOL(copy_to_user_overflow);
23394 +
23395 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23396 +void __set_fs(mm_segment_t x)
23397 +{
23398 + switch (x.seg) {
23399 + case 0:
23400 + loadsegment(gs, 0);
23401 + break;
23402 + case TASK_SIZE_MAX:
23403 + loadsegment(gs, __USER_DS);
23404 + break;
23405 + case -1UL:
23406 + loadsegment(gs, __KERNEL_DS);
23407 + break;
23408 + default:
23409 + BUG();
23410 + }
23411 + return;
23412 +}
23413 +EXPORT_SYMBOL(__set_fs);
23414 +
23415 +void set_fs(mm_segment_t x)
23416 +{
23417 + current_thread_info()->addr_limit = x;
23418 + __set_fs(x);
23419 +}
23420 +EXPORT_SYMBOL(set_fs);
23421 +#endif
23422 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23423 index 0d0326f..5c5f91e 100644
23424 --- a/arch/x86/lib/usercopy_64.c
23425 +++ b/arch/x86/lib/usercopy_64.c
23426 @@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23427 {
23428 long __d0;
23429 might_fault();
23430 +
23431 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23432 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23433 + addr += PAX_USER_SHADOW_BASE;
23434 +#endif
23435 +
23436 /* no memory constraint because it doesn't change any memory gcc knows
23437 about */
23438 asm volatile(
23439 @@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
23440 }
23441 EXPORT_SYMBOL(strlen_user);
23442
23443 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23444 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23445 {
23446 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23447 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23448 - }
23449 - return len;
23450 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23451 +
23452 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23453 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23454 + to += PAX_USER_SHADOW_BASE;
23455 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23456 + from += PAX_USER_SHADOW_BASE;
23457 +#endif
23458 +
23459 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23460 + }
23461 + return len;
23462 }
23463 EXPORT_SYMBOL(copy_in_user);
23464
23465 @@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
23466 * it is not necessary to optimize tail handling.
23467 */
23468 unsigned long
23469 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23470 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23471 {
23472 char c;
23473 unsigned zero_len;
23474 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23475 index 1fb85db..8b3540b 100644
23476 --- a/arch/x86/mm/extable.c
23477 +++ b/arch/x86/mm/extable.c
23478 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23479 const struct exception_table_entry *fixup;
23480
23481 #ifdef CONFIG_PNPBIOS
23482 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23483 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23484 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23485 extern u32 pnp_bios_is_utter_crap;
23486 pnp_bios_is_utter_crap = 1;
23487 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23488 index 3ecfd1a..304d554 100644
23489 --- a/arch/x86/mm/fault.c
23490 +++ b/arch/x86/mm/fault.c
23491 @@ -13,11 +13,18 @@
23492 #include <linux/perf_event.h> /* perf_sw_event */
23493 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23494 #include <linux/prefetch.h> /* prefetchw */
23495 +#include <linux/unistd.h>
23496 +#include <linux/compiler.h>
23497
23498 #include <asm/traps.h> /* dotraplinkage, ... */
23499 #include <asm/pgalloc.h> /* pgd_*(), ... */
23500 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23501 #include <asm/fixmap.h> /* VSYSCALL_START */
23502 +#include <asm/tlbflush.h>
23503 +
23504 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23505 +#include <asm/stacktrace.h>
23506 +#endif
23507
23508 /*
23509 * Page fault error code bits:
23510 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23511 int ret = 0;
23512
23513 /* kprobe_running() needs smp_processor_id() */
23514 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23515 + if (kprobes_built_in() && !user_mode(regs)) {
23516 preempt_disable();
23517 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23518 ret = 1;
23519 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23520 return !instr_lo || (instr_lo>>1) == 1;
23521 case 0x00:
23522 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23523 - if (probe_kernel_address(instr, opcode))
23524 + if (user_mode(regs)) {
23525 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23526 + return 0;
23527 + } else if (probe_kernel_address(instr, opcode))
23528 return 0;
23529
23530 *prefetch = (instr_lo == 0xF) &&
23531 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23532 while (instr < max_instr) {
23533 unsigned char opcode;
23534
23535 - if (probe_kernel_address(instr, opcode))
23536 + if (user_mode(regs)) {
23537 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23538 + break;
23539 + } else if (probe_kernel_address(instr, opcode))
23540 break;
23541
23542 instr++;
23543 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23544 force_sig_info(si_signo, &info, tsk);
23545 }
23546
23547 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23548 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23549 +#endif
23550 +
23551 +#ifdef CONFIG_PAX_EMUTRAMP
23552 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23553 +#endif
23554 +
23555 +#ifdef CONFIG_PAX_PAGEEXEC
23556 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23557 +{
23558 + pgd_t *pgd;
23559 + pud_t *pud;
23560 + pmd_t *pmd;
23561 +
23562 + pgd = pgd_offset(mm, address);
23563 + if (!pgd_present(*pgd))
23564 + return NULL;
23565 + pud = pud_offset(pgd, address);
23566 + if (!pud_present(*pud))
23567 + return NULL;
23568 + pmd = pmd_offset(pud, address);
23569 + if (!pmd_present(*pmd))
23570 + return NULL;
23571 + return pmd;
23572 +}
23573 +#endif
23574 +
23575 DEFINE_SPINLOCK(pgd_lock);
23576 LIST_HEAD(pgd_list);
23577
23578 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23579 for (address = VMALLOC_START & PMD_MASK;
23580 address >= TASK_SIZE && address < FIXADDR_TOP;
23581 address += PMD_SIZE) {
23582 +
23583 +#ifdef CONFIG_PAX_PER_CPU_PGD
23584 + unsigned long cpu;
23585 +#else
23586 struct page *page;
23587 +#endif
23588
23589 spin_lock(&pgd_lock);
23590 +
23591 +#ifdef CONFIG_PAX_PER_CPU_PGD
23592 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23593 + pgd_t *pgd = get_cpu_pgd(cpu);
23594 + pmd_t *ret;
23595 +#else
23596 list_for_each_entry(page, &pgd_list, lru) {
23597 + pgd_t *pgd = page_address(page);
23598 spinlock_t *pgt_lock;
23599 pmd_t *ret;
23600
23601 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23602 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23603
23604 spin_lock(pgt_lock);
23605 - ret = vmalloc_sync_one(page_address(page), address);
23606 +#endif
23607 +
23608 + ret = vmalloc_sync_one(pgd, address);
23609 +
23610 +#ifndef CONFIG_PAX_PER_CPU_PGD
23611 spin_unlock(pgt_lock);
23612 +#endif
23613
23614 if (!ret)
23615 break;
23616 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23617 * an interrupt in the middle of a task switch..
23618 */
23619 pgd_paddr = read_cr3();
23620 +
23621 +#ifdef CONFIG_PAX_PER_CPU_PGD
23622 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23623 +#endif
23624 +
23625 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23626 if (!pmd_k)
23627 return -1;
23628 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23629 * happen within a race in page table update. In the later
23630 * case just flush:
23631 */
23632 +
23633 +#ifdef CONFIG_PAX_PER_CPU_PGD
23634 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23635 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23636 +#else
23637 pgd = pgd_offset(current->active_mm, address);
23638 +#endif
23639 +
23640 pgd_ref = pgd_offset_k(address);
23641 if (pgd_none(*pgd_ref))
23642 return -1;
23643 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23644 static int is_errata100(struct pt_regs *regs, unsigned long address)
23645 {
23646 #ifdef CONFIG_X86_64
23647 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23648 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23649 return 1;
23650 #endif
23651 return 0;
23652 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23653 }
23654
23655 static const char nx_warning[] = KERN_CRIT
23656 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23657 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23658
23659 static void
23660 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23661 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23662 if (!oops_may_print())
23663 return;
23664
23665 - if (error_code & PF_INSTR) {
23666 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23667 unsigned int level;
23668
23669 pte_t *pte = lookup_address(address, &level);
23670
23671 if (pte && pte_present(*pte) && !pte_exec(*pte))
23672 - printk(nx_warning, current_uid());
23673 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23674 }
23675
23676 +#ifdef CONFIG_PAX_KERNEXEC
23677 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23678 + if (current->signal->curr_ip)
23679 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23680 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23681 + else
23682 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23683 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23684 + }
23685 +#endif
23686 +
23687 printk(KERN_ALERT "BUG: unable to handle kernel ");
23688 if (address < PAGE_SIZE)
23689 printk(KERN_CONT "NULL pointer dereference");
23690 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23691 }
23692 #endif
23693
23694 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23695 + if (pax_is_fetch_fault(regs, error_code, address)) {
23696 +
23697 +#ifdef CONFIG_PAX_EMUTRAMP
23698 + switch (pax_handle_fetch_fault(regs)) {
23699 + case 2:
23700 + return;
23701 + }
23702 +#endif
23703 +
23704 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23705 + do_group_exit(SIGKILL);
23706 + }
23707 +#endif
23708 +
23709 if (unlikely(show_unhandled_signals))
23710 show_signal_msg(regs, error_code, address, tsk);
23711
23712 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23713 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23714 printk(KERN_ERR
23715 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23716 - tsk->comm, tsk->pid, address);
23717 + tsk->comm, task_pid_nr(tsk), address);
23718 code = BUS_MCEERR_AR;
23719 }
23720 #endif
23721 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23722 return 1;
23723 }
23724
23725 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23726 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23727 +{
23728 + pte_t *pte;
23729 + pmd_t *pmd;
23730 + spinlock_t *ptl;
23731 + unsigned char pte_mask;
23732 +
23733 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23734 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23735 + return 0;
23736 +
23737 + /* PaX: it's our fault, let's handle it if we can */
23738 +
23739 + /* PaX: take a look at read faults before acquiring any locks */
23740 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23741 + /* instruction fetch attempt from a protected page in user mode */
23742 + up_read(&mm->mmap_sem);
23743 +
23744 +#ifdef CONFIG_PAX_EMUTRAMP
23745 + switch (pax_handle_fetch_fault(regs)) {
23746 + case 2:
23747 + return 1;
23748 + }
23749 +#endif
23750 +
23751 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23752 + do_group_exit(SIGKILL);
23753 + }
23754 +
23755 + pmd = pax_get_pmd(mm, address);
23756 + if (unlikely(!pmd))
23757 + return 0;
23758 +
23759 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23760 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23761 + pte_unmap_unlock(pte, ptl);
23762 + return 0;
23763 + }
23764 +
23765 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23766 + /* write attempt to a protected page in user mode */
23767 + pte_unmap_unlock(pte, ptl);
23768 + return 0;
23769 + }
23770 +
23771 +#ifdef CONFIG_SMP
23772 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23773 +#else
23774 + if (likely(address > get_limit(regs->cs)))
23775 +#endif
23776 + {
23777 + set_pte(pte, pte_mkread(*pte));
23778 + __flush_tlb_one(address);
23779 + pte_unmap_unlock(pte, ptl);
23780 + up_read(&mm->mmap_sem);
23781 + return 1;
23782 + }
23783 +
23784 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23785 +
23786 + /*
23787 + * PaX: fill DTLB with user rights and retry
23788 + */
23789 + __asm__ __volatile__ (
23790 + "orb %2,(%1)\n"
23791 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23792 +/*
23793 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23794 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23795 + * page fault when examined during a TLB load attempt. this is true not only
23796 + * for PTEs holding a non-present entry but also present entries that will
23797 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23798 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23799 + * for our target pages since their PTEs are simply not in the TLBs at all.
23800 +
23801 + * the best thing in omitting it is that we gain around 15-20% speed in the
23802 + * fast path of the page fault handler and can get rid of tracing since we
23803 + * can no longer flush unintended entries.
23804 + */
23805 + "invlpg (%0)\n"
23806 +#endif
23807 + __copyuser_seg"testb $0,(%0)\n"
23808 + "xorb %3,(%1)\n"
23809 + :
23810 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23811 + : "memory", "cc");
23812 + pte_unmap_unlock(pte, ptl);
23813 + up_read(&mm->mmap_sem);
23814 + return 1;
23815 +}
23816 +#endif
23817 +
23818 /*
23819 * Handle a spurious fault caused by a stale TLB entry.
23820 *
23821 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23822 static inline int
23823 access_error(unsigned long error_code, struct vm_area_struct *vma)
23824 {
23825 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23826 + return 1;
23827 +
23828 if (error_code & PF_WRITE) {
23829 /* write, present and write, not present: */
23830 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23831 @@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23832 {
23833 struct vm_area_struct *vma;
23834 struct task_struct *tsk;
23835 - unsigned long address;
23836 struct mm_struct *mm;
23837 int fault;
23838 int write = error_code & PF_WRITE;
23839 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23840 (write ? FAULT_FLAG_WRITE : 0);
23841
23842 - tsk = current;
23843 - mm = tsk->mm;
23844 -
23845 /* Get the faulting address: */
23846 - address = read_cr2();
23847 + unsigned long address = read_cr2();
23848 +
23849 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23850 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23851 + if (!search_exception_tables(regs->ip)) {
23852 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23853 + bad_area_nosemaphore(regs, error_code, address);
23854 + return;
23855 + }
23856 + if (address < PAX_USER_SHADOW_BASE) {
23857 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23858 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23859 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23860 + } else
23861 + address -= PAX_USER_SHADOW_BASE;
23862 + }
23863 +#endif
23864 +
23865 + tsk = current;
23866 + mm = tsk->mm;
23867
23868 /*
23869 * Detect and handle instructions that would cause a page fault for
23870 @@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23871 * User-mode registers count as a user access even for any
23872 * potential system fault or CPU buglet:
23873 */
23874 - if (user_mode_vm(regs)) {
23875 + if (user_mode(regs)) {
23876 local_irq_enable();
23877 error_code |= PF_USER;
23878 } else {
23879 @@ -1132,6 +1339,11 @@ retry:
23880 might_sleep();
23881 }
23882
23883 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23884 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23885 + return;
23886 +#endif
23887 +
23888 vma = find_vma(mm, address);
23889 if (unlikely(!vma)) {
23890 bad_area(regs, error_code, address);
23891 @@ -1143,18 +1355,24 @@ retry:
23892 bad_area(regs, error_code, address);
23893 return;
23894 }
23895 - if (error_code & PF_USER) {
23896 - /*
23897 - * Accessing the stack below %sp is always a bug.
23898 - * The large cushion allows instructions like enter
23899 - * and pusha to work. ("enter $65535, $31" pushes
23900 - * 32 pointers and then decrements %sp by 65535.)
23901 - */
23902 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23903 - bad_area(regs, error_code, address);
23904 - return;
23905 - }
23906 + /*
23907 + * Accessing the stack below %sp is always a bug.
23908 + * The large cushion allows instructions like enter
23909 + * and pusha to work. ("enter $65535, $31" pushes
23910 + * 32 pointers and then decrements %sp by 65535.)
23911 + */
23912 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23913 + bad_area(regs, error_code, address);
23914 + return;
23915 }
23916 +
23917 +#ifdef CONFIG_PAX_SEGMEXEC
23918 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23919 + bad_area(regs, error_code, address);
23920 + return;
23921 + }
23922 +#endif
23923 +
23924 if (unlikely(expand_stack(vma, address))) {
23925 bad_area(regs, error_code, address);
23926 return;
23927 @@ -1209,3 +1427,292 @@ good_area:
23928
23929 up_read(&mm->mmap_sem);
23930 }
23931 +
23932 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23933 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23934 +{
23935 + struct mm_struct *mm = current->mm;
23936 + unsigned long ip = regs->ip;
23937 +
23938 + if (v8086_mode(regs))
23939 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23940 +
23941 +#ifdef CONFIG_PAX_PAGEEXEC
23942 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23943 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23944 + return true;
23945 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23946 + return true;
23947 + return false;
23948 + }
23949 +#endif
23950 +
23951 +#ifdef CONFIG_PAX_SEGMEXEC
23952 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23953 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23954 + return true;
23955 + return false;
23956 + }
23957 +#endif
23958 +
23959 + return false;
23960 +}
23961 +#endif
23962 +
23963 +#ifdef CONFIG_PAX_EMUTRAMP
23964 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23965 +{
23966 + int err;
23967 +
23968 + do { /* PaX: libffi trampoline emulation */
23969 + unsigned char mov, jmp;
23970 + unsigned int addr1, addr2;
23971 +
23972 +#ifdef CONFIG_X86_64
23973 + if ((regs->ip + 9) >> 32)
23974 + break;
23975 +#endif
23976 +
23977 + err = get_user(mov, (unsigned char __user *)regs->ip);
23978 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23979 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23980 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23981 +
23982 + if (err)
23983 + break;
23984 +
23985 + if (mov == 0xB8 && jmp == 0xE9) {
23986 + regs->ax = addr1;
23987 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23988 + return 2;
23989 + }
23990 + } while (0);
23991 +
23992 + do { /* PaX: gcc trampoline emulation #1 */
23993 + unsigned char mov1, mov2;
23994 + unsigned short jmp;
23995 + unsigned int addr1, addr2;
23996 +
23997 +#ifdef CONFIG_X86_64
23998 + if ((regs->ip + 11) >> 32)
23999 + break;
24000 +#endif
24001 +
24002 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24003 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24004 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24005 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24006 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24007 +
24008 + if (err)
24009 + break;
24010 +
24011 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24012 + regs->cx = addr1;
24013 + regs->ax = addr2;
24014 + regs->ip = addr2;
24015 + return 2;
24016 + }
24017 + } while (0);
24018 +
24019 + do { /* PaX: gcc trampoline emulation #2 */
24020 + unsigned char mov, jmp;
24021 + unsigned int addr1, addr2;
24022 +
24023 +#ifdef CONFIG_X86_64
24024 + if ((regs->ip + 9) >> 32)
24025 + break;
24026 +#endif
24027 +
24028 + err = get_user(mov, (unsigned char __user *)regs->ip);
24029 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24030 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24031 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24032 +
24033 + if (err)
24034 + break;
24035 +
24036 + if (mov == 0xB9 && jmp == 0xE9) {
24037 + regs->cx = addr1;
24038 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24039 + return 2;
24040 + }
24041 + } while (0);
24042 +
24043 + return 1; /* PaX in action */
24044 +}
24045 +
24046 +#ifdef CONFIG_X86_64
24047 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24048 +{
24049 + int err;
24050 +
24051 + do { /* PaX: libffi trampoline emulation */
24052 + unsigned short mov1, mov2, jmp1;
24053 + unsigned char stcclc, jmp2;
24054 + unsigned long addr1, addr2;
24055 +
24056 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24057 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24058 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24059 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24060 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24061 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24062 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24063 +
24064 + if (err)
24065 + break;
24066 +
24067 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24068 + regs->r11 = addr1;
24069 + regs->r10 = addr2;
24070 + if (stcclc == 0xF8)
24071 + regs->flags &= ~X86_EFLAGS_CF;
24072 + else
24073 + regs->flags |= X86_EFLAGS_CF;
24074 + regs->ip = addr1;
24075 + return 2;
24076 + }
24077 + } while (0);
24078 +
24079 + do { /* PaX: gcc trampoline emulation #1 */
24080 + unsigned short mov1, mov2, jmp1;
24081 + unsigned char jmp2;
24082 + unsigned int addr1;
24083 + unsigned long addr2;
24084 +
24085 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24086 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24087 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24088 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24089 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24090 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24091 +
24092 + if (err)
24093 + break;
24094 +
24095 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24096 + regs->r11 = addr1;
24097 + regs->r10 = addr2;
24098 + regs->ip = addr1;
24099 + return 2;
24100 + }
24101 + } while (0);
24102 +
24103 + do { /* PaX: gcc trampoline emulation #2 */
24104 + unsigned short mov1, mov2, jmp1;
24105 + unsigned char jmp2;
24106 + unsigned long addr1, addr2;
24107 +
24108 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24109 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24110 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24111 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24112 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24113 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24114 +
24115 + if (err)
24116 + break;
24117 +
24118 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24119 + regs->r11 = addr1;
24120 + regs->r10 = addr2;
24121 + regs->ip = addr1;
24122 + return 2;
24123 + }
24124 + } while (0);
24125 +
24126 + return 1; /* PaX in action */
24127 +}
24128 +#endif
24129 +
24130 +/*
24131 + * PaX: decide what to do with offenders (regs->ip = fault address)
24132 + *
24133 + * returns 1 when task should be killed
24134 + * 2 when gcc trampoline was detected
24135 + */
24136 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24137 +{
24138 + if (v8086_mode(regs))
24139 + return 1;
24140 +
24141 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24142 + return 1;
24143 +
24144 +#ifdef CONFIG_X86_32
24145 + return pax_handle_fetch_fault_32(regs);
24146 +#else
24147 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24148 + return pax_handle_fetch_fault_32(regs);
24149 + else
24150 + return pax_handle_fetch_fault_64(regs);
24151 +#endif
24152 +}
24153 +#endif
24154 +
24155 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24156 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24157 +{
24158 + long i;
24159 +
24160 + printk(KERN_ERR "PAX: bytes at PC: ");
24161 + for (i = 0; i < 20; i++) {
24162 + unsigned char c;
24163 + if (get_user(c, (unsigned char __force_user *)pc+i))
24164 + printk(KERN_CONT "?? ");
24165 + else
24166 + printk(KERN_CONT "%02x ", c);
24167 + }
24168 + printk("\n");
24169 +
24170 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24171 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24172 + unsigned long c;
24173 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24174 +#ifdef CONFIG_X86_32
24175 + printk(KERN_CONT "???????? ");
24176 +#else
24177 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24178 + printk(KERN_CONT "???????? ???????? ");
24179 + else
24180 + printk(KERN_CONT "???????????????? ");
24181 +#endif
24182 + } else {
24183 +#ifdef CONFIG_X86_64
24184 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24185 + printk(KERN_CONT "%08x ", (unsigned int)c);
24186 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24187 + } else
24188 +#endif
24189 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24190 + }
24191 + }
24192 + printk("\n");
24193 +}
24194 +#endif
24195 +
24196 +/**
24197 + * probe_kernel_write(): safely attempt to write to a location
24198 + * @dst: address to write to
24199 + * @src: pointer to the data that shall be written
24200 + * @size: size of the data chunk
24201 + *
24202 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24203 + * happens, handle that and return -EFAULT.
24204 + */
24205 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24206 +{
24207 + long ret;
24208 + mm_segment_t old_fs = get_fs();
24209 +
24210 + set_fs(KERNEL_DS);
24211 + pagefault_disable();
24212 + pax_open_kernel();
24213 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24214 + pax_close_kernel();
24215 + pagefault_enable();
24216 + set_fs(old_fs);
24217 +
24218 + return ret ? -EFAULT : 0;
24219 +}
24220 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24221 index dd74e46..7d26398 100644
24222 --- a/arch/x86/mm/gup.c
24223 +++ b/arch/x86/mm/gup.c
24224 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24225 addr = start;
24226 len = (unsigned long) nr_pages << PAGE_SHIFT;
24227 end = start + len;
24228 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24229 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24230 (void __user *)start, len)))
24231 return 0;
24232
24233 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24234 index 6f31ee5..8ee4164 100644
24235 --- a/arch/x86/mm/highmem_32.c
24236 +++ b/arch/x86/mm/highmem_32.c
24237 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24238 idx = type + KM_TYPE_NR*smp_processor_id();
24239 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24240 BUG_ON(!pte_none(*(kmap_pte-idx)));
24241 +
24242 + pax_open_kernel();
24243 set_pte(kmap_pte-idx, mk_pte(page, prot));
24244 + pax_close_kernel();
24245 +
24246 arch_flush_lazy_mmu_mode();
24247
24248 return (void *)vaddr;
24249 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24250 index f6679a7..8f795a3 100644
24251 --- a/arch/x86/mm/hugetlbpage.c
24252 +++ b/arch/x86/mm/hugetlbpage.c
24253 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24254 struct hstate *h = hstate_file(file);
24255 struct mm_struct *mm = current->mm;
24256 struct vm_area_struct *vma;
24257 - unsigned long start_addr;
24258 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24259 +
24260 +#ifdef CONFIG_PAX_SEGMEXEC
24261 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24262 + pax_task_size = SEGMEXEC_TASK_SIZE;
24263 +#endif
24264 +
24265 + pax_task_size -= PAGE_SIZE;
24266
24267 if (len > mm->cached_hole_size) {
24268 - start_addr = mm->free_area_cache;
24269 + start_addr = mm->free_area_cache;
24270 } else {
24271 - start_addr = TASK_UNMAPPED_BASE;
24272 - mm->cached_hole_size = 0;
24273 + start_addr = mm->mmap_base;
24274 + mm->cached_hole_size = 0;
24275 }
24276
24277 full_search:
24278 @@ -280,26 +287,27 @@ full_search:
24279
24280 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24281 /* At this point: (!vma || addr < vma->vm_end). */
24282 - if (TASK_SIZE - len < addr) {
24283 + if (pax_task_size - len < addr) {
24284 /*
24285 * Start a new search - just in case we missed
24286 * some holes.
24287 */
24288 - if (start_addr != TASK_UNMAPPED_BASE) {
24289 - start_addr = TASK_UNMAPPED_BASE;
24290 + if (start_addr != mm->mmap_base) {
24291 + start_addr = mm->mmap_base;
24292 mm->cached_hole_size = 0;
24293 goto full_search;
24294 }
24295 return -ENOMEM;
24296 }
24297 - if (!vma || addr + len <= vma->vm_start) {
24298 - mm->free_area_cache = addr + len;
24299 - return addr;
24300 - }
24301 + if (check_heap_stack_gap(vma, addr, len))
24302 + break;
24303 if (addr + mm->cached_hole_size < vma->vm_start)
24304 mm->cached_hole_size = vma->vm_start - addr;
24305 addr = ALIGN(vma->vm_end, huge_page_size(h));
24306 }
24307 +
24308 + mm->free_area_cache = addr + len;
24309 + return addr;
24310 }
24311
24312 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24313 @@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24314 struct mm_struct *mm = current->mm;
24315 struct vm_area_struct *vma;
24316 unsigned long base = mm->mmap_base;
24317 - unsigned long addr = addr0;
24318 + unsigned long addr;
24319 unsigned long largest_hole = mm->cached_hole_size;
24320 - unsigned long start_addr;
24321
24322 /* don't allow allocations above current base */
24323 if (mm->free_area_cache > base)
24324 @@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24325 largest_hole = 0;
24326 mm->free_area_cache = base;
24327 }
24328 -try_again:
24329 - start_addr = mm->free_area_cache;
24330
24331 /* make sure it can fit in the remaining address space */
24332 if (mm->free_area_cache < len)
24333 goto fail;
24334
24335 /* either no address requested or can't fit in requested address hole */
24336 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24337 + addr = mm->free_area_cache - len;
24338 do {
24339 + addr &= huge_page_mask(h);
24340 /*
24341 * Lookup failure means no vma is above this address,
24342 * i.e. return with success:
24343 @@ -340,10 +346,10 @@ try_again:
24344 if (!vma)
24345 return addr;
24346
24347 - if (addr + len <= vma->vm_start) {
24348 + if (check_heap_stack_gap(vma, addr, len)) {
24349 /* remember the address as a hint for next time */
24350 - mm->cached_hole_size = largest_hole;
24351 - return (mm->free_area_cache = addr);
24352 + mm->cached_hole_size = largest_hole;
24353 + return (mm->free_area_cache = addr);
24354 } else if (mm->free_area_cache == vma->vm_end) {
24355 /* pull free_area_cache down to the first hole */
24356 mm->free_area_cache = vma->vm_start;
24357 @@ -352,29 +358,34 @@ try_again:
24358
24359 /* remember the largest hole we saw so far */
24360 if (addr + largest_hole < vma->vm_start)
24361 - largest_hole = vma->vm_start - addr;
24362 + largest_hole = vma->vm_start - addr;
24363
24364 /* try just below the current vma->vm_start */
24365 - addr = (vma->vm_start - len) & huge_page_mask(h);
24366 - } while (len <= vma->vm_start);
24367 + addr = skip_heap_stack_gap(vma, len);
24368 + } while (!IS_ERR_VALUE(addr));
24369
24370 fail:
24371 /*
24372 - * if hint left us with no space for the requested
24373 - * mapping then try again:
24374 - */
24375 - if (start_addr != base) {
24376 - mm->free_area_cache = base;
24377 - largest_hole = 0;
24378 - goto try_again;
24379 - }
24380 - /*
24381 * A failed mmap() very likely causes application failure,
24382 * so fall back to the bottom-up function here. This scenario
24383 * can happen with large stack limits and large mmap()
24384 * allocations.
24385 */
24386 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24387 +
24388 +#ifdef CONFIG_PAX_SEGMEXEC
24389 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24390 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24391 + else
24392 +#endif
24393 +
24394 + mm->mmap_base = TASK_UNMAPPED_BASE;
24395 +
24396 +#ifdef CONFIG_PAX_RANDMMAP
24397 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24398 + mm->mmap_base += mm->delta_mmap;
24399 +#endif
24400 +
24401 + mm->free_area_cache = mm->mmap_base;
24402 mm->cached_hole_size = ~0UL;
24403 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24404 len, pgoff, flags);
24405 @@ -382,6 +393,7 @@ fail:
24406 /*
24407 * Restore the topdown base:
24408 */
24409 + mm->mmap_base = base;
24410 mm->free_area_cache = base;
24411 mm->cached_hole_size = ~0UL;
24412
24413 @@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24414 struct hstate *h = hstate_file(file);
24415 struct mm_struct *mm = current->mm;
24416 struct vm_area_struct *vma;
24417 + unsigned long pax_task_size = TASK_SIZE;
24418
24419 if (len & ~huge_page_mask(h))
24420 return -EINVAL;
24421 - if (len > TASK_SIZE)
24422 +
24423 +#ifdef CONFIG_PAX_SEGMEXEC
24424 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24425 + pax_task_size = SEGMEXEC_TASK_SIZE;
24426 +#endif
24427 +
24428 + pax_task_size -= PAGE_SIZE;
24429 +
24430 + if (len > pax_task_size)
24431 return -ENOMEM;
24432
24433 if (flags & MAP_FIXED) {
24434 @@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24435 if (addr) {
24436 addr = ALIGN(addr, huge_page_size(h));
24437 vma = find_vma(mm, addr);
24438 - if (TASK_SIZE - len >= addr &&
24439 - (!vma || addr + len <= vma->vm_start))
24440 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24441 return addr;
24442 }
24443 if (mm->get_unmapped_area == arch_get_unmapped_area)
24444 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24445 index 4f0cec7..00976ce 100644
24446 --- a/arch/x86/mm/init.c
24447 +++ b/arch/x86/mm/init.c
24448 @@ -16,6 +16,8 @@
24449 #include <asm/tlb.h>
24450 #include <asm/proto.h>
24451 #include <asm/dma.h> /* for MAX_DMA_PFN */
24452 +#include <asm/desc.h>
24453 +#include <asm/bios_ebda.h>
24454
24455 unsigned long __initdata pgt_buf_start;
24456 unsigned long __meminitdata pgt_buf_end;
24457 @@ -32,7 +34,7 @@ int direct_gbpages
24458 static void __init find_early_table_space(unsigned long end, int use_pse,
24459 int use_gbpages)
24460 {
24461 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24462 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24463 phys_addr_t base;
24464
24465 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24466 @@ -311,10 +313,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24467 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24468 * mmio resources as well as potential bios/acpi data regions.
24469 */
24470 +
24471 +#ifdef CONFIG_GRKERNSEC_KMEM
24472 +static unsigned int ebda_start __read_only;
24473 +static unsigned int ebda_end __read_only;
24474 +#endif
24475 +
24476 int devmem_is_allowed(unsigned long pagenr)
24477 {
24478 +#ifdef CONFIG_GRKERNSEC_KMEM
24479 + /* allow BDA */
24480 + if (!pagenr)
24481 + return 1;
24482 + /* allow EBDA */
24483 + if (pagenr >= ebda_start && pagenr < ebda_end)
24484 + return 1;
24485 +#else
24486 + if (!pagenr)
24487 + return 1;
24488 +#ifdef CONFIG_VM86
24489 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24490 + return 1;
24491 +#endif
24492 +#endif
24493 +
24494 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24495 + return 1;
24496 +#ifdef CONFIG_GRKERNSEC_KMEM
24497 + /* throw out everything else below 1MB */
24498 if (pagenr <= 256)
24499 - return 1;
24500 + return 0;
24501 +#endif
24502 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24503 return 0;
24504 if (!page_is_ram(pagenr))
24505 @@ -371,8 +400,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24506 #endif
24507 }
24508
24509 +#ifdef CONFIG_GRKERNSEC_KMEM
24510 +static inline void gr_init_ebda(void)
24511 +{
24512 + unsigned int ebda_addr;
24513 + unsigned int ebda_size = 0;
24514 +
24515 + ebda_addr = get_bios_ebda();
24516 + if (ebda_addr) {
24517 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24518 + ebda_size <<= 10;
24519 + }
24520 + if (ebda_addr && ebda_size) {
24521 + ebda_start = ebda_addr >> PAGE_SHIFT;
24522 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24523 + } else {
24524 + ebda_start = 0x9f000 >> PAGE_SHIFT;
24525 + ebda_end = 0xa0000 >> PAGE_SHIFT;
24526 + }
24527 +}
24528 +#else
24529 +static inline void gr_init_ebda(void) { }
24530 +#endif
24531 +
24532 void free_initmem(void)
24533 {
24534 +#ifdef CONFIG_PAX_KERNEXEC
24535 +#ifdef CONFIG_X86_32
24536 + /* PaX: limit KERNEL_CS to actual size */
24537 + unsigned long addr, limit;
24538 + struct desc_struct d;
24539 + int cpu;
24540 +#else
24541 + pgd_t *pgd;
24542 + pud_t *pud;
24543 + pmd_t *pmd;
24544 + unsigned long addr, end;
24545 +#endif
24546 +#endif
24547 +
24548 + gr_init_ebda();
24549 +
24550 +#ifdef CONFIG_PAX_KERNEXEC
24551 +#ifdef CONFIG_X86_32
24552 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24553 + limit = (limit - 1UL) >> PAGE_SHIFT;
24554 +
24555 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24556 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24557 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24558 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24559 + }
24560 +
24561 + /* PaX: make KERNEL_CS read-only */
24562 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24563 + if (!paravirt_enabled())
24564 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24565 +/*
24566 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24567 + pgd = pgd_offset_k(addr);
24568 + pud = pud_offset(pgd, addr);
24569 + pmd = pmd_offset(pud, addr);
24570 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24571 + }
24572 +*/
24573 +#ifdef CONFIG_X86_PAE
24574 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24575 +/*
24576 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24577 + pgd = pgd_offset_k(addr);
24578 + pud = pud_offset(pgd, addr);
24579 + pmd = pmd_offset(pud, addr);
24580 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24581 + }
24582 +*/
24583 +#endif
24584 +
24585 +#ifdef CONFIG_MODULES
24586 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24587 +#endif
24588 +
24589 +#else
24590 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24591 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24592 + pgd = pgd_offset_k(addr);
24593 + pud = pud_offset(pgd, addr);
24594 + pmd = pmd_offset(pud, addr);
24595 + if (!pmd_present(*pmd))
24596 + continue;
24597 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24598 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24599 + else
24600 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24601 + }
24602 +
24603 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24604 + end = addr + KERNEL_IMAGE_SIZE;
24605 + for (; addr < end; addr += PMD_SIZE) {
24606 + pgd = pgd_offset_k(addr);
24607 + pud = pud_offset(pgd, addr);
24608 + pmd = pmd_offset(pud, addr);
24609 + if (!pmd_present(*pmd))
24610 + continue;
24611 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24612 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24613 + }
24614 +#endif
24615 +
24616 + flush_tlb_all();
24617 +#endif
24618 +
24619 free_init_pages("unused kernel memory",
24620 (unsigned long)(&__init_begin),
24621 (unsigned long)(&__init_end));
24622 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24623 index 575d86f..4987469 100644
24624 --- a/arch/x86/mm/init_32.c
24625 +++ b/arch/x86/mm/init_32.c
24626 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24627 }
24628
24629 /*
24630 - * Creates a middle page table and puts a pointer to it in the
24631 - * given global directory entry. This only returns the gd entry
24632 - * in non-PAE compilation mode, since the middle layer is folded.
24633 - */
24634 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24635 -{
24636 - pud_t *pud;
24637 - pmd_t *pmd_table;
24638 -
24639 -#ifdef CONFIG_X86_PAE
24640 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24641 - if (after_bootmem)
24642 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24643 - else
24644 - pmd_table = (pmd_t *)alloc_low_page();
24645 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24646 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24647 - pud = pud_offset(pgd, 0);
24648 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24649 -
24650 - return pmd_table;
24651 - }
24652 -#endif
24653 - pud = pud_offset(pgd, 0);
24654 - pmd_table = pmd_offset(pud, 0);
24655 -
24656 - return pmd_table;
24657 -}
24658 -
24659 -/*
24660 * Create a page table and place a pointer to it in a middle page
24661 * directory entry:
24662 */
24663 @@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24664 page_table = (pte_t *)alloc_low_page();
24665
24666 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24667 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24668 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24669 +#else
24670 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24671 +#endif
24672 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24673 }
24674
24675 return pte_offset_kernel(pmd, 0);
24676 }
24677
24678 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24679 +{
24680 + pud_t *pud;
24681 + pmd_t *pmd_table;
24682 +
24683 + pud = pud_offset(pgd, 0);
24684 + pmd_table = pmd_offset(pud, 0);
24685 +
24686 + return pmd_table;
24687 +}
24688 +
24689 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24690 {
24691 int pgd_idx = pgd_index(vaddr);
24692 @@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24693 int pgd_idx, pmd_idx;
24694 unsigned long vaddr;
24695 pgd_t *pgd;
24696 + pud_t *pud;
24697 pmd_t *pmd;
24698 pte_t *pte = NULL;
24699
24700 @@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24701 pgd = pgd_base + pgd_idx;
24702
24703 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24704 - pmd = one_md_table_init(pgd);
24705 - pmd = pmd + pmd_index(vaddr);
24706 + pud = pud_offset(pgd, vaddr);
24707 + pmd = pmd_offset(pud, vaddr);
24708 +
24709 +#ifdef CONFIG_X86_PAE
24710 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24711 +#endif
24712 +
24713 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24714 pmd++, pmd_idx++) {
24715 pte = page_table_kmap_check(one_page_table_init(pmd),
24716 @@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24717 }
24718 }
24719
24720 -static inline int is_kernel_text(unsigned long addr)
24721 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24722 {
24723 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24724 - return 1;
24725 - return 0;
24726 + if ((start > ktla_ktva((unsigned long)_etext) ||
24727 + end <= ktla_ktva((unsigned long)_stext)) &&
24728 + (start > ktla_ktva((unsigned long)_einittext) ||
24729 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24730 +
24731 +#ifdef CONFIG_ACPI_SLEEP
24732 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24733 +#endif
24734 +
24735 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24736 + return 0;
24737 + return 1;
24738 }
24739
24740 /*
24741 @@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24742 unsigned long last_map_addr = end;
24743 unsigned long start_pfn, end_pfn;
24744 pgd_t *pgd_base = swapper_pg_dir;
24745 - int pgd_idx, pmd_idx, pte_ofs;
24746 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24747 unsigned long pfn;
24748 pgd_t *pgd;
24749 + pud_t *pud;
24750 pmd_t *pmd;
24751 pte_t *pte;
24752 unsigned pages_2m, pages_4k;
24753 @@ -280,8 +281,13 @@ repeat:
24754 pfn = start_pfn;
24755 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24756 pgd = pgd_base + pgd_idx;
24757 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24758 - pmd = one_md_table_init(pgd);
24759 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24760 + pud = pud_offset(pgd, 0);
24761 + pmd = pmd_offset(pud, 0);
24762 +
24763 +#ifdef CONFIG_X86_PAE
24764 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24765 +#endif
24766
24767 if (pfn >= end_pfn)
24768 continue;
24769 @@ -293,14 +299,13 @@ repeat:
24770 #endif
24771 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24772 pmd++, pmd_idx++) {
24773 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24774 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24775
24776 /*
24777 * Map with big pages if possible, otherwise
24778 * create normal page tables:
24779 */
24780 if (use_pse) {
24781 - unsigned int addr2;
24782 pgprot_t prot = PAGE_KERNEL_LARGE;
24783 /*
24784 * first pass will use the same initial
24785 @@ -310,11 +315,7 @@ repeat:
24786 __pgprot(PTE_IDENT_ATTR |
24787 _PAGE_PSE);
24788
24789 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24790 - PAGE_OFFSET + PAGE_SIZE-1;
24791 -
24792 - if (is_kernel_text(addr) ||
24793 - is_kernel_text(addr2))
24794 + if (is_kernel_text(address, address + PMD_SIZE))
24795 prot = PAGE_KERNEL_LARGE_EXEC;
24796
24797 pages_2m++;
24798 @@ -331,7 +332,7 @@ repeat:
24799 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24800 pte += pte_ofs;
24801 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24802 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24803 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24804 pgprot_t prot = PAGE_KERNEL;
24805 /*
24806 * first pass will use the same initial
24807 @@ -339,7 +340,7 @@ repeat:
24808 */
24809 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24810
24811 - if (is_kernel_text(addr))
24812 + if (is_kernel_text(address, address + PAGE_SIZE))
24813 prot = PAGE_KERNEL_EXEC;
24814
24815 pages_4k++;
24816 @@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24817
24818 pud = pud_offset(pgd, va);
24819 pmd = pmd_offset(pud, va);
24820 - if (!pmd_present(*pmd))
24821 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24822 break;
24823
24824 pte = pte_offset_kernel(pmd, va);
24825 @@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24826
24827 static void __init pagetable_init(void)
24828 {
24829 - pgd_t *pgd_base = swapper_pg_dir;
24830 -
24831 - permanent_kmaps_init(pgd_base);
24832 + permanent_kmaps_init(swapper_pg_dir);
24833 }
24834
24835 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24836 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24837 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24838
24839 /* user-defined highmem size */
24840 @@ -734,6 +733,12 @@ void __init mem_init(void)
24841
24842 pci_iommu_alloc();
24843
24844 +#ifdef CONFIG_PAX_PER_CPU_PGD
24845 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24846 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24847 + KERNEL_PGD_PTRS);
24848 +#endif
24849 +
24850 #ifdef CONFIG_FLATMEM
24851 BUG_ON(!mem_map);
24852 #endif
24853 @@ -760,7 +765,7 @@ void __init mem_init(void)
24854 reservedpages++;
24855
24856 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24857 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24858 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24859 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24860
24861 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24862 @@ -801,10 +806,10 @@ void __init mem_init(void)
24863 ((unsigned long)&__init_end -
24864 (unsigned long)&__init_begin) >> 10,
24865
24866 - (unsigned long)&_etext, (unsigned long)&_edata,
24867 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24868 + (unsigned long)&_sdata, (unsigned long)&_edata,
24869 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24870
24871 - (unsigned long)&_text, (unsigned long)&_etext,
24872 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24873 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24874
24875 /*
24876 @@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24877 if (!kernel_set_to_readonly)
24878 return;
24879
24880 + start = ktla_ktva(start);
24881 pr_debug("Set kernel text: %lx - %lx for read write\n",
24882 start, start+size);
24883
24884 @@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
24885 if (!kernel_set_to_readonly)
24886 return;
24887
24888 + start = ktla_ktva(start);
24889 pr_debug("Set kernel text: %lx - %lx for read only\n",
24890 start, start+size);
24891
24892 @@ -924,6 +931,7 @@ void mark_rodata_ro(void)
24893 unsigned long start = PFN_ALIGN(_text);
24894 unsigned long size = PFN_ALIGN(_etext) - start;
24895
24896 + start = ktla_ktva(start);
24897 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24898 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24899 size >> 10);
24900 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24901 index fc18be0..e539653 100644
24902 --- a/arch/x86/mm/init_64.c
24903 +++ b/arch/x86/mm/init_64.c
24904 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24905 * around without checking the pgd every time.
24906 */
24907
24908 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24909 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24910 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24911
24912 int force_personality32;
24913 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24914
24915 for (address = start; address <= end; address += PGDIR_SIZE) {
24916 const pgd_t *pgd_ref = pgd_offset_k(address);
24917 +
24918 +#ifdef CONFIG_PAX_PER_CPU_PGD
24919 + unsigned long cpu;
24920 +#else
24921 struct page *page;
24922 +#endif
24923
24924 if (pgd_none(*pgd_ref))
24925 continue;
24926
24927 spin_lock(&pgd_lock);
24928 +
24929 +#ifdef CONFIG_PAX_PER_CPU_PGD
24930 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24931 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24932 +#else
24933 list_for_each_entry(page, &pgd_list, lru) {
24934 pgd_t *pgd;
24935 spinlock_t *pgt_lock;
24936 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24937 /* the pgt_lock only for Xen */
24938 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24939 spin_lock(pgt_lock);
24940 +#endif
24941
24942 if (pgd_none(*pgd))
24943 set_pgd(pgd, *pgd_ref);
24944 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24945 BUG_ON(pgd_page_vaddr(*pgd)
24946 != pgd_page_vaddr(*pgd_ref));
24947
24948 +#ifndef CONFIG_PAX_PER_CPU_PGD
24949 spin_unlock(pgt_lock);
24950 +#endif
24951 +
24952 }
24953 spin_unlock(&pgd_lock);
24954 }
24955 @@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
24956 {
24957 if (pgd_none(*pgd)) {
24958 pud_t *pud = (pud_t *)spp_getpage();
24959 - pgd_populate(&init_mm, pgd, pud);
24960 + pgd_populate_kernel(&init_mm, pgd, pud);
24961 if (pud != pud_offset(pgd, 0))
24962 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
24963 pud, pud_offset(pgd, 0));
24964 @@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
24965 {
24966 if (pud_none(*pud)) {
24967 pmd_t *pmd = (pmd_t *) spp_getpage();
24968 - pud_populate(&init_mm, pud, pmd);
24969 + pud_populate_kernel(&init_mm, pud, pmd);
24970 if (pmd != pmd_offset(pud, 0))
24971 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
24972 pmd, pmd_offset(pud, 0));
24973 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
24974 pmd = fill_pmd(pud, vaddr);
24975 pte = fill_pte(pmd, vaddr);
24976
24977 + pax_open_kernel();
24978 set_pte(pte, new_pte);
24979 + pax_close_kernel();
24980
24981 /*
24982 * It's enough to flush this one mapping.
24983 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
24984 pgd = pgd_offset_k((unsigned long)__va(phys));
24985 if (pgd_none(*pgd)) {
24986 pud = (pud_t *) spp_getpage();
24987 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
24988 - _PAGE_USER));
24989 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
24990 }
24991 pud = pud_offset(pgd, (unsigned long)__va(phys));
24992 if (pud_none(*pud)) {
24993 pmd = (pmd_t *) spp_getpage();
24994 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
24995 - _PAGE_USER));
24996 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
24997 }
24998 pmd = pmd_offset(pud, phys);
24999 BUG_ON(!pmd_none(*pmd));
25000 @@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25001 if (pfn >= pgt_buf_top)
25002 panic("alloc_low_page: ran out of memory");
25003
25004 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25005 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25006 clear_page(adr);
25007 *phys = pfn * PAGE_SIZE;
25008 return adr;
25009 @@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25010
25011 phys = __pa(virt);
25012 left = phys & (PAGE_SIZE - 1);
25013 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25014 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25015 adr = (void *)(((unsigned long)adr) | left);
25016
25017 return adr;
25018 @@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25019 unmap_low_page(pmd);
25020
25021 spin_lock(&init_mm.page_table_lock);
25022 - pud_populate(&init_mm, pud, __va(pmd_phys));
25023 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25024 spin_unlock(&init_mm.page_table_lock);
25025 }
25026 __flush_tlb_all();
25027 @@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
25028 unmap_low_page(pud);
25029
25030 spin_lock(&init_mm.page_table_lock);
25031 - pgd_populate(&init_mm, pgd, __va(pud_phys));
25032 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25033 spin_unlock(&init_mm.page_table_lock);
25034 pgd_changed = true;
25035 }
25036 @@ -683,6 +697,12 @@ void __init mem_init(void)
25037
25038 pci_iommu_alloc();
25039
25040 +#ifdef CONFIG_PAX_PER_CPU_PGD
25041 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25042 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25043 + KERNEL_PGD_PTRS);
25044 +#endif
25045 +
25046 /* clear_bss() already clear the empty_zero_page */
25047
25048 reservedpages = 0;
25049 @@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
25050 static struct vm_area_struct gate_vma = {
25051 .vm_start = VSYSCALL_START,
25052 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25053 - .vm_page_prot = PAGE_READONLY_EXEC,
25054 - .vm_flags = VM_READ | VM_EXEC
25055 + .vm_page_prot = PAGE_READONLY,
25056 + .vm_flags = VM_READ
25057 };
25058
25059 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25060 @@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long addr)
25061
25062 const char *arch_vma_name(struct vm_area_struct *vma)
25063 {
25064 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25065 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25066 return "[vdso]";
25067 if (vma == &gate_vma)
25068 return "[vsyscall]";
25069 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25070 index 7b179b4..6bd1777 100644
25071 --- a/arch/x86/mm/iomap_32.c
25072 +++ b/arch/x86/mm/iomap_32.c
25073 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25074 type = kmap_atomic_idx_push();
25075 idx = type + KM_TYPE_NR * smp_processor_id();
25076 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25077 +
25078 + pax_open_kernel();
25079 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25080 + pax_close_kernel();
25081 +
25082 arch_flush_lazy_mmu_mode();
25083
25084 return (void *)vaddr;
25085 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25086 index be1ef57..55f0160 100644
25087 --- a/arch/x86/mm/ioremap.c
25088 +++ b/arch/x86/mm/ioremap.c
25089 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25090 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25091 int is_ram = page_is_ram(pfn);
25092
25093 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25094 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25095 return NULL;
25096 WARN_ON_ONCE(is_ram);
25097 }
25098 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25099
25100 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25101 if (page_is_ram(start >> PAGE_SHIFT))
25102 +#ifdef CONFIG_HIGHMEM
25103 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25104 +#endif
25105 return __va(phys);
25106
25107 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25108 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25109 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25110
25111 static __initdata int after_paging_init;
25112 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25113 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25114
25115 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25116 {
25117 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25118 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25119
25120 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25121 - memset(bm_pte, 0, sizeof(bm_pte));
25122 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25123 + pmd_populate_user(&init_mm, pmd, bm_pte);
25124
25125 /*
25126 * The boot-ioremap range spans multiple pmds, for which
25127 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25128 index d87dd6d..bf3fa66 100644
25129 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25130 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25131 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25132 * memory (e.g. tracked pages)? For now, we need this to avoid
25133 * invoking kmemcheck for PnP BIOS calls.
25134 */
25135 - if (regs->flags & X86_VM_MASK)
25136 + if (v8086_mode(regs))
25137 return false;
25138 - if (regs->cs != __KERNEL_CS)
25139 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25140 return false;
25141
25142 pte = kmemcheck_pte_lookup(address);
25143 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25144 index 845df68..1d8d29f 100644
25145 --- a/arch/x86/mm/mmap.c
25146 +++ b/arch/x86/mm/mmap.c
25147 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25148 * Leave an at least ~128 MB hole with possible stack randomization.
25149 */
25150 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25151 -#define MAX_GAP (TASK_SIZE/6*5)
25152 +#define MAX_GAP (pax_task_size/6*5)
25153
25154 static int mmap_is_legacy(void)
25155 {
25156 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25157 return rnd << PAGE_SHIFT;
25158 }
25159
25160 -static unsigned long mmap_base(void)
25161 +static unsigned long mmap_base(struct mm_struct *mm)
25162 {
25163 unsigned long gap = rlimit(RLIMIT_STACK);
25164 + unsigned long pax_task_size = TASK_SIZE;
25165 +
25166 +#ifdef CONFIG_PAX_SEGMEXEC
25167 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25168 + pax_task_size = SEGMEXEC_TASK_SIZE;
25169 +#endif
25170
25171 if (gap < MIN_GAP)
25172 gap = MIN_GAP;
25173 else if (gap > MAX_GAP)
25174 gap = MAX_GAP;
25175
25176 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25177 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25178 }
25179
25180 /*
25181 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25182 * does, but not when emulating X86_32
25183 */
25184 -static unsigned long mmap_legacy_base(void)
25185 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25186 {
25187 - if (mmap_is_ia32())
25188 + if (mmap_is_ia32()) {
25189 +
25190 +#ifdef CONFIG_PAX_SEGMEXEC
25191 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25192 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25193 + else
25194 +#endif
25195 +
25196 return TASK_UNMAPPED_BASE;
25197 - else
25198 + } else
25199 return TASK_UNMAPPED_BASE + mmap_rnd();
25200 }
25201
25202 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25203 void arch_pick_mmap_layout(struct mm_struct *mm)
25204 {
25205 if (mmap_is_legacy()) {
25206 - mm->mmap_base = mmap_legacy_base();
25207 + mm->mmap_base = mmap_legacy_base(mm);
25208 +
25209 +#ifdef CONFIG_PAX_RANDMMAP
25210 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25211 + mm->mmap_base += mm->delta_mmap;
25212 +#endif
25213 +
25214 mm->get_unmapped_area = arch_get_unmapped_area;
25215 mm->unmap_area = arch_unmap_area;
25216 } else {
25217 - mm->mmap_base = mmap_base();
25218 + mm->mmap_base = mmap_base(mm);
25219 +
25220 +#ifdef CONFIG_PAX_RANDMMAP
25221 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25222 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25223 +#endif
25224 +
25225 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25226 mm->unmap_area = arch_unmap_area_topdown;
25227 }
25228 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25229 index dc0b727..dc9d71a 100644
25230 --- a/arch/x86/mm/mmio-mod.c
25231 +++ b/arch/x86/mm/mmio-mod.c
25232 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25233 break;
25234 default:
25235 {
25236 - unsigned char *ip = (unsigned char *)instptr;
25237 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25238 my_trace->opcode = MMIO_UNKNOWN_OP;
25239 my_trace->width = 0;
25240 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25241 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25242 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25243 void __iomem *addr)
25244 {
25245 - static atomic_t next_id;
25246 + static atomic_unchecked_t next_id;
25247 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25248 /* These are page-unaligned. */
25249 struct mmiotrace_map map = {
25250 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25251 .private = trace
25252 },
25253 .phys = offset,
25254 - .id = atomic_inc_return(&next_id)
25255 + .id = atomic_inc_return_unchecked(&next_id)
25256 };
25257 map.map_id = trace->id;
25258
25259 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25260 index b008656..773eac2 100644
25261 --- a/arch/x86/mm/pageattr-test.c
25262 +++ b/arch/x86/mm/pageattr-test.c
25263 @@ -36,7 +36,7 @@ enum {
25264
25265 static int pte_testbit(pte_t pte)
25266 {
25267 - return pte_flags(pte) & _PAGE_UNUSED1;
25268 + return pte_flags(pte) & _PAGE_CPA_TEST;
25269 }
25270
25271 struct split_state {
25272 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25273 index e1ebde3..b1e1db38 100644
25274 --- a/arch/x86/mm/pageattr.c
25275 +++ b/arch/x86/mm/pageattr.c
25276 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25277 */
25278 #ifdef CONFIG_PCI_BIOS
25279 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25280 - pgprot_val(forbidden) |= _PAGE_NX;
25281 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25282 #endif
25283
25284 /*
25285 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25286 * Does not cover __inittext since that is gone later on. On
25287 * 64bit we do not enforce !NX on the low mapping
25288 */
25289 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25290 - pgprot_val(forbidden) |= _PAGE_NX;
25291 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25292 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25293
25294 +#ifdef CONFIG_DEBUG_RODATA
25295 /*
25296 * The .rodata section needs to be read-only. Using the pfn
25297 * catches all aliases.
25298 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25299 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25300 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25301 pgprot_val(forbidden) |= _PAGE_RW;
25302 +#endif
25303
25304 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25305 /*
25306 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25307 }
25308 #endif
25309
25310 +#ifdef CONFIG_PAX_KERNEXEC
25311 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25312 + pgprot_val(forbidden) |= _PAGE_RW;
25313 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25314 + }
25315 +#endif
25316 +
25317 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25318
25319 return prot;
25320 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25321 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25322 {
25323 /* change init_mm */
25324 + pax_open_kernel();
25325 set_pte_atomic(kpte, pte);
25326 +
25327 #ifdef CONFIG_X86_32
25328 if (!SHARED_KERNEL_PMD) {
25329 +
25330 +#ifdef CONFIG_PAX_PER_CPU_PGD
25331 + unsigned long cpu;
25332 +#else
25333 struct page *page;
25334 +#endif
25335
25336 +#ifdef CONFIG_PAX_PER_CPU_PGD
25337 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25338 + pgd_t *pgd = get_cpu_pgd(cpu);
25339 +#else
25340 list_for_each_entry(page, &pgd_list, lru) {
25341 - pgd_t *pgd;
25342 + pgd_t *pgd = (pgd_t *)page_address(page);
25343 +#endif
25344 +
25345 pud_t *pud;
25346 pmd_t *pmd;
25347
25348 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25349 + pgd += pgd_index(address);
25350 pud = pud_offset(pgd, address);
25351 pmd = pmd_offset(pud, address);
25352 set_pte_atomic((pte_t *)pmd, pte);
25353 }
25354 }
25355 #endif
25356 + pax_close_kernel();
25357 }
25358
25359 static int
25360 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25361 index f6ff57b..481690f 100644
25362 --- a/arch/x86/mm/pat.c
25363 +++ b/arch/x86/mm/pat.c
25364 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25365
25366 if (!entry) {
25367 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25368 - current->comm, current->pid, start, end);
25369 + current->comm, task_pid_nr(current), start, end);
25370 return -EINVAL;
25371 }
25372
25373 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25374 while (cursor < to) {
25375 if (!devmem_is_allowed(pfn)) {
25376 printk(KERN_INFO
25377 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25378 - current->comm, from, to);
25379 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25380 + current->comm, from, to, cursor);
25381 return 0;
25382 }
25383 cursor += PAGE_SIZE;
25384 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25385 printk(KERN_INFO
25386 "%s:%d ioremap_change_attr failed %s "
25387 "for %Lx-%Lx\n",
25388 - current->comm, current->pid,
25389 + current->comm, task_pid_nr(current),
25390 cattr_name(flags),
25391 base, (unsigned long long)(base + size));
25392 return -EINVAL;
25393 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25394 if (want_flags != flags) {
25395 printk(KERN_WARNING
25396 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25397 - current->comm, current->pid,
25398 + current->comm, task_pid_nr(current),
25399 cattr_name(want_flags),
25400 (unsigned long long)paddr,
25401 (unsigned long long)(paddr + size),
25402 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25403 free_memtype(paddr, paddr + size);
25404 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25405 " for %Lx-%Lx, got %s\n",
25406 - current->comm, current->pid,
25407 + current->comm, task_pid_nr(current),
25408 cattr_name(want_flags),
25409 (unsigned long long)paddr,
25410 (unsigned long long)(paddr + size),
25411 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25412 index 9f0614d..92ae64a 100644
25413 --- a/arch/x86/mm/pf_in.c
25414 +++ b/arch/x86/mm/pf_in.c
25415 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25416 int i;
25417 enum reason_type rv = OTHERS;
25418
25419 - p = (unsigned char *)ins_addr;
25420 + p = (unsigned char *)ktla_ktva(ins_addr);
25421 p += skip_prefix(p, &prf);
25422 p += get_opcode(p, &opcode);
25423
25424 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25425 struct prefix_bits prf;
25426 int i;
25427
25428 - p = (unsigned char *)ins_addr;
25429 + p = (unsigned char *)ktla_ktva(ins_addr);
25430 p += skip_prefix(p, &prf);
25431 p += get_opcode(p, &opcode);
25432
25433 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25434 struct prefix_bits prf;
25435 int i;
25436
25437 - p = (unsigned char *)ins_addr;
25438 + p = (unsigned char *)ktla_ktva(ins_addr);
25439 p += skip_prefix(p, &prf);
25440 p += get_opcode(p, &opcode);
25441
25442 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25443 struct prefix_bits prf;
25444 int i;
25445
25446 - p = (unsigned char *)ins_addr;
25447 + p = (unsigned char *)ktla_ktva(ins_addr);
25448 p += skip_prefix(p, &prf);
25449 p += get_opcode(p, &opcode);
25450 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25451 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25452 struct prefix_bits prf;
25453 int i;
25454
25455 - p = (unsigned char *)ins_addr;
25456 + p = (unsigned char *)ktla_ktva(ins_addr);
25457 p += skip_prefix(p, &prf);
25458 p += get_opcode(p, &opcode);
25459 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25460 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25461 index 8573b83..4f3ed7e 100644
25462 --- a/arch/x86/mm/pgtable.c
25463 +++ b/arch/x86/mm/pgtable.c
25464 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25465 list_del(&page->lru);
25466 }
25467
25468 -#define UNSHARED_PTRS_PER_PGD \
25469 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25470 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25471 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25472
25473 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25474 +{
25475 + unsigned int count = USER_PGD_PTRS;
25476
25477 + while (count--)
25478 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25479 +}
25480 +#endif
25481 +
25482 +#ifdef CONFIG_PAX_PER_CPU_PGD
25483 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25484 +{
25485 + unsigned int count = USER_PGD_PTRS;
25486 +
25487 + while (count--) {
25488 + pgd_t pgd;
25489 +
25490 +#ifdef CONFIG_X86_64
25491 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25492 +#else
25493 + pgd = *src++;
25494 +#endif
25495 +
25496 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25497 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25498 +#endif
25499 +
25500 + *dst++ = pgd;
25501 + }
25502 +
25503 +}
25504 +#endif
25505 +
25506 +#ifdef CONFIG_X86_64
25507 +#define pxd_t pud_t
25508 +#define pyd_t pgd_t
25509 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25510 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25511 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25512 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25513 +#define PYD_SIZE PGDIR_SIZE
25514 +#else
25515 +#define pxd_t pmd_t
25516 +#define pyd_t pud_t
25517 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25518 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25519 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25520 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25521 +#define PYD_SIZE PUD_SIZE
25522 +#endif
25523 +
25524 +#ifdef CONFIG_PAX_PER_CPU_PGD
25525 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25526 +static inline void pgd_dtor(pgd_t *pgd) {}
25527 +#else
25528 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25529 {
25530 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25531 @@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25532 pgd_list_del(pgd);
25533 spin_unlock(&pgd_lock);
25534 }
25535 +#endif
25536
25537 /*
25538 * List of all pgd's needed for non-PAE so it can invalidate entries
25539 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25540 * -- wli
25541 */
25542
25543 -#ifdef CONFIG_X86_PAE
25544 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25545 /*
25546 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25547 * updating the top-level pagetable entries to guarantee the
25548 @@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25549 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25550 * and initialize the kernel pmds here.
25551 */
25552 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25553 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25554
25555 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25556 {
25557 @@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25558 */
25559 flush_tlb_mm(mm);
25560 }
25561 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25562 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25563 #else /* !CONFIG_X86_PAE */
25564
25565 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25566 -#define PREALLOCATED_PMDS 0
25567 +#define PREALLOCATED_PXDS 0
25568
25569 #endif /* CONFIG_X86_PAE */
25570
25571 -static void free_pmds(pmd_t *pmds[])
25572 +static void free_pxds(pxd_t *pxds[])
25573 {
25574 int i;
25575
25576 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25577 - if (pmds[i])
25578 - free_page((unsigned long)pmds[i]);
25579 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25580 + if (pxds[i])
25581 + free_page((unsigned long)pxds[i]);
25582 }
25583
25584 -static int preallocate_pmds(pmd_t *pmds[])
25585 +static int preallocate_pxds(pxd_t *pxds[])
25586 {
25587 int i;
25588 bool failed = false;
25589
25590 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25591 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25592 - if (pmd == NULL)
25593 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25594 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25595 + if (pxd == NULL)
25596 failed = true;
25597 - pmds[i] = pmd;
25598 + pxds[i] = pxd;
25599 }
25600
25601 if (failed) {
25602 - free_pmds(pmds);
25603 + free_pxds(pxds);
25604 return -ENOMEM;
25605 }
25606
25607 @@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25608 * preallocate which never got a corresponding vma will need to be
25609 * freed manually.
25610 */
25611 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25612 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25613 {
25614 int i;
25615
25616 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25617 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25618 pgd_t pgd = pgdp[i];
25619
25620 if (pgd_val(pgd) != 0) {
25621 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25622 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25623
25624 - pgdp[i] = native_make_pgd(0);
25625 + set_pgd(pgdp + i, native_make_pgd(0));
25626
25627 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25628 - pmd_free(mm, pmd);
25629 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25630 + pxd_free(mm, pxd);
25631 }
25632 }
25633 }
25634
25635 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25636 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25637 {
25638 - pud_t *pud;
25639 + pyd_t *pyd;
25640 unsigned long addr;
25641 int i;
25642
25643 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25644 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25645 return;
25646
25647 - pud = pud_offset(pgd, 0);
25648 +#ifdef CONFIG_X86_64
25649 + pyd = pyd_offset(mm, 0L);
25650 +#else
25651 + pyd = pyd_offset(pgd, 0L);
25652 +#endif
25653
25654 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25655 - i++, pud++, addr += PUD_SIZE) {
25656 - pmd_t *pmd = pmds[i];
25657 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25658 + i++, pyd++, addr += PYD_SIZE) {
25659 + pxd_t *pxd = pxds[i];
25660
25661 if (i >= KERNEL_PGD_BOUNDARY)
25662 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25663 - sizeof(pmd_t) * PTRS_PER_PMD);
25664 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25665 + sizeof(pxd_t) * PTRS_PER_PMD);
25666
25667 - pud_populate(mm, pud, pmd);
25668 + pyd_populate(mm, pyd, pxd);
25669 }
25670 }
25671
25672 pgd_t *pgd_alloc(struct mm_struct *mm)
25673 {
25674 pgd_t *pgd;
25675 - pmd_t *pmds[PREALLOCATED_PMDS];
25676 + pxd_t *pxds[PREALLOCATED_PXDS];
25677
25678 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25679
25680 @@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25681
25682 mm->pgd = pgd;
25683
25684 - if (preallocate_pmds(pmds) != 0)
25685 + if (preallocate_pxds(pxds) != 0)
25686 goto out_free_pgd;
25687
25688 if (paravirt_pgd_alloc(mm) != 0)
25689 - goto out_free_pmds;
25690 + goto out_free_pxds;
25691
25692 /*
25693 * Make sure that pre-populating the pmds is atomic with
25694 @@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25695 spin_lock(&pgd_lock);
25696
25697 pgd_ctor(mm, pgd);
25698 - pgd_prepopulate_pmd(mm, pgd, pmds);
25699 + pgd_prepopulate_pxd(mm, pgd, pxds);
25700
25701 spin_unlock(&pgd_lock);
25702
25703 return pgd;
25704
25705 -out_free_pmds:
25706 - free_pmds(pmds);
25707 +out_free_pxds:
25708 + free_pxds(pxds);
25709 out_free_pgd:
25710 free_page((unsigned long)pgd);
25711 out:
25712 @@ -295,7 +356,7 @@ out:
25713
25714 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25715 {
25716 - pgd_mop_up_pmds(mm, pgd);
25717 + pgd_mop_up_pxds(mm, pgd);
25718 pgd_dtor(pgd);
25719 paravirt_pgd_free(mm, pgd);
25720 free_page((unsigned long)pgd);
25721 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25722 index a69bcb8..19068ab 100644
25723 --- a/arch/x86/mm/pgtable_32.c
25724 +++ b/arch/x86/mm/pgtable_32.c
25725 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25726 return;
25727 }
25728 pte = pte_offset_kernel(pmd, vaddr);
25729 +
25730 + pax_open_kernel();
25731 if (pte_val(pteval))
25732 set_pte_at(&init_mm, vaddr, pte, pteval);
25733 else
25734 pte_clear(&init_mm, vaddr, pte);
25735 + pax_close_kernel();
25736
25737 /*
25738 * It's enough to flush this one mapping.
25739 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25740 index 410531d..0f16030 100644
25741 --- a/arch/x86/mm/setup_nx.c
25742 +++ b/arch/x86/mm/setup_nx.c
25743 @@ -5,8 +5,10 @@
25744 #include <asm/pgtable.h>
25745 #include <asm/proto.h>
25746
25747 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25748 static int disable_nx __cpuinitdata;
25749
25750 +#ifndef CONFIG_PAX_PAGEEXEC
25751 /*
25752 * noexec = on|off
25753 *
25754 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25755 return 0;
25756 }
25757 early_param("noexec", noexec_setup);
25758 +#endif
25759 +
25760 +#endif
25761
25762 void __cpuinit x86_configure_nx(void)
25763 {
25764 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25765 if (cpu_has_nx && !disable_nx)
25766 __supported_pte_mask |= _PAGE_NX;
25767 else
25768 +#endif
25769 __supported_pte_mask &= ~_PAGE_NX;
25770 }
25771
25772 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25773 index d6c0418..06a0ad5 100644
25774 --- a/arch/x86/mm/tlb.c
25775 +++ b/arch/x86/mm/tlb.c
25776 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25777 BUG();
25778 cpumask_clear_cpu(cpu,
25779 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25780 +
25781 +#ifndef CONFIG_PAX_PER_CPU_PGD
25782 load_cr3(swapper_pg_dir);
25783 +#endif
25784 +
25785 }
25786 EXPORT_SYMBOL_GPL(leave_mm);
25787
25788 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25789 index 877b9a1..a8ecf42 100644
25790 --- a/arch/x86/net/bpf_jit.S
25791 +++ b/arch/x86/net/bpf_jit.S
25792 @@ -9,6 +9,7 @@
25793 */
25794 #include <linux/linkage.h>
25795 #include <asm/dwarf2.h>
25796 +#include <asm/alternative-asm.h>
25797
25798 /*
25799 * Calling convention :
25800 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25801 jle bpf_slow_path_word
25802 mov (SKBDATA,%rsi),%eax
25803 bswap %eax /* ntohl() */
25804 + pax_force_retaddr
25805 ret
25806
25807 sk_load_half:
25808 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25809 jle bpf_slow_path_half
25810 movzwl (SKBDATA,%rsi),%eax
25811 rol $8,%ax # ntohs()
25812 + pax_force_retaddr
25813 ret
25814
25815 sk_load_byte:
25816 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25817 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25818 jle bpf_slow_path_byte
25819 movzbl (SKBDATA,%rsi),%eax
25820 + pax_force_retaddr
25821 ret
25822
25823 /**
25824 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25825 movzbl (SKBDATA,%rsi),%ebx
25826 and $15,%bl
25827 shl $2,%bl
25828 + pax_force_retaddr
25829 ret
25830
25831 /* rsi contains offset and can be scratched */
25832 @@ -109,6 +114,7 @@ bpf_slow_path_word:
25833 js bpf_error
25834 mov -12(%rbp),%eax
25835 bswap %eax
25836 + pax_force_retaddr
25837 ret
25838
25839 bpf_slow_path_half:
25840 @@ -117,12 +123,14 @@ bpf_slow_path_half:
25841 mov -12(%rbp),%ax
25842 rol $8,%ax
25843 movzwl %ax,%eax
25844 + pax_force_retaddr
25845 ret
25846
25847 bpf_slow_path_byte:
25848 bpf_slow_path_common(1)
25849 js bpf_error
25850 movzbl -12(%rbp),%eax
25851 + pax_force_retaddr
25852 ret
25853
25854 bpf_slow_path_byte_msh:
25855 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25856 and $15,%al
25857 shl $2,%al
25858 xchg %eax,%ebx
25859 + pax_force_retaddr
25860 ret
25861
25862 #define sk_negative_common(SIZE) \
25863 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25864 sk_negative_common(4)
25865 mov (%rax), %eax
25866 bswap %eax
25867 + pax_force_retaddr
25868 ret
25869
25870 bpf_slow_path_half_neg:
25871 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25872 mov (%rax),%ax
25873 rol $8,%ax
25874 movzwl %ax,%eax
25875 + pax_force_retaddr
25876 ret
25877
25878 bpf_slow_path_byte_neg:
25879 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
25880 .globl sk_load_byte_negative_offset
25881 sk_negative_common(1)
25882 movzbl (%rax), %eax
25883 + pax_force_retaddr
25884 ret
25885
25886 bpf_slow_path_byte_msh_neg:
25887 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
25888 and $15,%al
25889 shl $2,%al
25890 xchg %eax,%ebx
25891 + pax_force_retaddr
25892 ret
25893
25894 bpf_error:
25895 @@ -197,4 +210,5 @@ bpf_error:
25896 xor %eax,%eax
25897 mov -8(%rbp),%rbx
25898 leaveq
25899 + pax_force_retaddr
25900 ret
25901 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25902 index 0597f95..a12c36e 100644
25903 --- a/arch/x86/net/bpf_jit_comp.c
25904 +++ b/arch/x86/net/bpf_jit_comp.c
25905 @@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
25906 set_fs(old_fs);
25907 }
25908
25909 +struct bpf_jit_work {
25910 + struct work_struct work;
25911 + void *image;
25912 +};
25913 +
25914 #define CHOOSE_LOAD_FUNC(K, func) \
25915 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
25916
25917 @@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25918 if (addrs == NULL)
25919 return;
25920
25921 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25922 + if (!fp->work)
25923 + goto out;
25924 +
25925 /* Before first pass, make a rough estimation of addrs[]
25926 * each bpf instruction is translated to less than 64 bytes
25927 */
25928 @@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25929 break;
25930 default:
25931 /* hmm, too complex filter, give up with jit compiler */
25932 - goto out;
25933 + goto error;
25934 }
25935 ilen = prog - temp;
25936 if (image) {
25937 if (unlikely(proglen + ilen > oldproglen)) {
25938 pr_err("bpb_jit_compile fatal error\n");
25939 - kfree(addrs);
25940 - module_free(NULL, image);
25941 - return;
25942 + module_free_exec(NULL, image);
25943 + goto error;
25944 }
25945 + pax_open_kernel();
25946 memcpy(image + proglen, temp, ilen);
25947 + pax_close_kernel();
25948 }
25949 proglen += ilen;
25950 addrs[i] = proglen;
25951 @@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25952 break;
25953 }
25954 if (proglen == oldproglen) {
25955 - image = module_alloc(max_t(unsigned int,
25956 - proglen,
25957 - sizeof(struct work_struct)));
25958 + image = module_alloc_exec(proglen);
25959 if (!image)
25960 - goto out;
25961 + goto error;
25962 }
25963 oldproglen = proglen;
25964 }
25965 @@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25966 bpf_flush_icache(image, image + proglen);
25967
25968 fp->bpf_func = (void *)image;
25969 - }
25970 + } else
25971 +error:
25972 + kfree(fp->work);
25973 +
25974 out:
25975 kfree(addrs);
25976 return;
25977 @@ -648,18 +659,20 @@ out:
25978
25979 static void jit_free_defer(struct work_struct *arg)
25980 {
25981 - module_free(NULL, arg);
25982 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
25983 + kfree(arg);
25984 }
25985
25986 /* run from softirq, we must use a work_struct to call
25987 - * module_free() from process context
25988 + * module_free_exec() from process context
25989 */
25990 void bpf_jit_free(struct sk_filter *fp)
25991 {
25992 if (fp->bpf_func != sk_run_filter) {
25993 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
25994 + struct work_struct *work = &fp->work->work;
25995
25996 INIT_WORK(work, jit_free_defer);
25997 + fp->work->image = fp->bpf_func;
25998 schedule_work(work);
25999 }
26000 }
26001 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26002 index d6aa6e8..266395a 100644
26003 --- a/arch/x86/oprofile/backtrace.c
26004 +++ b/arch/x86/oprofile/backtrace.c
26005 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26006 struct stack_frame_ia32 *fp;
26007 unsigned long bytes;
26008
26009 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26010 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26011 if (bytes != sizeof(bufhead))
26012 return NULL;
26013
26014 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26015 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26016
26017 oprofile_add_trace(bufhead[0].return_address);
26018
26019 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26020 struct stack_frame bufhead[2];
26021 unsigned long bytes;
26022
26023 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26024 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26025 if (bytes != sizeof(bufhead))
26026 return NULL;
26027
26028 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26029 {
26030 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26031
26032 - if (!user_mode_vm(regs)) {
26033 + if (!user_mode(regs)) {
26034 unsigned long stack = kernel_stack_pointer(regs);
26035 if (depth)
26036 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26037 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26038 index 140942f..8a5cc55 100644
26039 --- a/arch/x86/pci/mrst.c
26040 +++ b/arch/x86/pci/mrst.c
26041 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26042 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26043 pci_mmcfg_late_init();
26044 pcibios_enable_irq = mrst_pci_irq_enable;
26045 - pci_root_ops = pci_mrst_ops;
26046 + pax_open_kernel();
26047 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26048 + pax_close_kernel();
26049 pci_soc_mode = 1;
26050 /* Continue with standard init */
26051 return 1;
26052 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26053 index da8fe05..7ee6704 100644
26054 --- a/arch/x86/pci/pcbios.c
26055 +++ b/arch/x86/pci/pcbios.c
26056 @@ -79,50 +79,93 @@ union bios32 {
26057 static struct {
26058 unsigned long address;
26059 unsigned short segment;
26060 -} bios32_indirect = { 0, __KERNEL_CS };
26061 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26062
26063 /*
26064 * Returns the entry point for the given service, NULL on error
26065 */
26066
26067 -static unsigned long bios32_service(unsigned long service)
26068 +static unsigned long __devinit bios32_service(unsigned long service)
26069 {
26070 unsigned char return_code; /* %al */
26071 unsigned long address; /* %ebx */
26072 unsigned long length; /* %ecx */
26073 unsigned long entry; /* %edx */
26074 unsigned long flags;
26075 + struct desc_struct d, *gdt;
26076
26077 local_irq_save(flags);
26078 - __asm__("lcall *(%%edi); cld"
26079 +
26080 + gdt = get_cpu_gdt_table(smp_processor_id());
26081 +
26082 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26083 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26084 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26085 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26086 +
26087 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26088 : "=a" (return_code),
26089 "=b" (address),
26090 "=c" (length),
26091 "=d" (entry)
26092 : "0" (service),
26093 "1" (0),
26094 - "D" (&bios32_indirect));
26095 + "D" (&bios32_indirect),
26096 + "r"(__PCIBIOS_DS)
26097 + : "memory");
26098 +
26099 + pax_open_kernel();
26100 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26101 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26102 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26103 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26104 + pax_close_kernel();
26105 +
26106 local_irq_restore(flags);
26107
26108 switch (return_code) {
26109 - case 0:
26110 - return address + entry;
26111 - case 0x80: /* Not present */
26112 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26113 - return 0;
26114 - default: /* Shouldn't happen */
26115 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26116 - service, return_code);
26117 + case 0: {
26118 + int cpu;
26119 + unsigned char flags;
26120 +
26121 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26122 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26123 + printk(KERN_WARNING "bios32_service: not valid\n");
26124 return 0;
26125 + }
26126 + address = address + PAGE_OFFSET;
26127 + length += 16UL; /* some BIOSs underreport this... */
26128 + flags = 4;
26129 + if (length >= 64*1024*1024) {
26130 + length >>= PAGE_SHIFT;
26131 + flags |= 8;
26132 + }
26133 +
26134 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26135 + gdt = get_cpu_gdt_table(cpu);
26136 + pack_descriptor(&d, address, length, 0x9b, flags);
26137 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26138 + pack_descriptor(&d, address, length, 0x93, flags);
26139 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26140 + }
26141 + return entry;
26142 + }
26143 + case 0x80: /* Not present */
26144 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26145 + return 0;
26146 + default: /* Shouldn't happen */
26147 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26148 + service, return_code);
26149 + return 0;
26150 }
26151 }
26152
26153 static struct {
26154 unsigned long address;
26155 unsigned short segment;
26156 -} pci_indirect = { 0, __KERNEL_CS };
26157 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26158
26159 -static int pci_bios_present;
26160 +static int pci_bios_present __read_only;
26161
26162 static int __devinit check_pcibios(void)
26163 {
26164 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26165 unsigned long flags, pcibios_entry;
26166
26167 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26168 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26169 + pci_indirect.address = pcibios_entry;
26170
26171 local_irq_save(flags);
26172 - __asm__(
26173 - "lcall *(%%edi); cld\n\t"
26174 + __asm__("movw %w6, %%ds\n\t"
26175 + "lcall *%%ss:(%%edi); cld\n\t"
26176 + "push %%ss\n\t"
26177 + "pop %%ds\n\t"
26178 "jc 1f\n\t"
26179 "xor %%ah, %%ah\n"
26180 "1:"
26181 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26182 "=b" (ebx),
26183 "=c" (ecx)
26184 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26185 - "D" (&pci_indirect)
26186 + "D" (&pci_indirect),
26187 + "r" (__PCIBIOS_DS)
26188 : "memory");
26189 local_irq_restore(flags);
26190
26191 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26192
26193 switch (len) {
26194 case 1:
26195 - __asm__("lcall *(%%esi); cld\n\t"
26196 + __asm__("movw %w6, %%ds\n\t"
26197 + "lcall *%%ss:(%%esi); cld\n\t"
26198 + "push %%ss\n\t"
26199 + "pop %%ds\n\t"
26200 "jc 1f\n\t"
26201 "xor %%ah, %%ah\n"
26202 "1:"
26203 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26204 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26205 "b" (bx),
26206 "D" ((long)reg),
26207 - "S" (&pci_indirect));
26208 + "S" (&pci_indirect),
26209 + "r" (__PCIBIOS_DS));
26210 /*
26211 * Zero-extend the result beyond 8 bits, do not trust the
26212 * BIOS having done it:
26213 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26214 *value &= 0xff;
26215 break;
26216 case 2:
26217 - __asm__("lcall *(%%esi); cld\n\t"
26218 + __asm__("movw %w6, %%ds\n\t"
26219 + "lcall *%%ss:(%%esi); cld\n\t"
26220 + "push %%ss\n\t"
26221 + "pop %%ds\n\t"
26222 "jc 1f\n\t"
26223 "xor %%ah, %%ah\n"
26224 "1:"
26225 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26226 : "1" (PCIBIOS_READ_CONFIG_WORD),
26227 "b" (bx),
26228 "D" ((long)reg),
26229 - "S" (&pci_indirect));
26230 + "S" (&pci_indirect),
26231 + "r" (__PCIBIOS_DS));
26232 /*
26233 * Zero-extend the result beyond 16 bits, do not trust the
26234 * BIOS having done it:
26235 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26236 *value &= 0xffff;
26237 break;
26238 case 4:
26239 - __asm__("lcall *(%%esi); cld\n\t"
26240 + __asm__("movw %w6, %%ds\n\t"
26241 + "lcall *%%ss:(%%esi); cld\n\t"
26242 + "push %%ss\n\t"
26243 + "pop %%ds\n\t"
26244 "jc 1f\n\t"
26245 "xor %%ah, %%ah\n"
26246 "1:"
26247 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26248 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26249 "b" (bx),
26250 "D" ((long)reg),
26251 - "S" (&pci_indirect));
26252 + "S" (&pci_indirect),
26253 + "r" (__PCIBIOS_DS));
26254 break;
26255 }
26256
26257 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26258
26259 switch (len) {
26260 case 1:
26261 - __asm__("lcall *(%%esi); cld\n\t"
26262 + __asm__("movw %w6, %%ds\n\t"
26263 + "lcall *%%ss:(%%esi); cld\n\t"
26264 + "push %%ss\n\t"
26265 + "pop %%ds\n\t"
26266 "jc 1f\n\t"
26267 "xor %%ah, %%ah\n"
26268 "1:"
26269 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26270 "c" (value),
26271 "b" (bx),
26272 "D" ((long)reg),
26273 - "S" (&pci_indirect));
26274 + "S" (&pci_indirect),
26275 + "r" (__PCIBIOS_DS));
26276 break;
26277 case 2:
26278 - __asm__("lcall *(%%esi); cld\n\t"
26279 + __asm__("movw %w6, %%ds\n\t"
26280 + "lcall *%%ss:(%%esi); cld\n\t"
26281 + "push %%ss\n\t"
26282 + "pop %%ds\n\t"
26283 "jc 1f\n\t"
26284 "xor %%ah, %%ah\n"
26285 "1:"
26286 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26287 "c" (value),
26288 "b" (bx),
26289 "D" ((long)reg),
26290 - "S" (&pci_indirect));
26291 + "S" (&pci_indirect),
26292 + "r" (__PCIBIOS_DS));
26293 break;
26294 case 4:
26295 - __asm__("lcall *(%%esi); cld\n\t"
26296 + __asm__("movw %w6, %%ds\n\t"
26297 + "lcall *%%ss:(%%esi); cld\n\t"
26298 + "push %%ss\n\t"
26299 + "pop %%ds\n\t"
26300 "jc 1f\n\t"
26301 "xor %%ah, %%ah\n"
26302 "1:"
26303 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26304 "c" (value),
26305 "b" (bx),
26306 "D" ((long)reg),
26307 - "S" (&pci_indirect));
26308 + "S" (&pci_indirect),
26309 + "r" (__PCIBIOS_DS));
26310 break;
26311 }
26312
26313 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26314
26315 DBG("PCI: Fetching IRQ routing table... ");
26316 __asm__("push %%es\n\t"
26317 + "movw %w8, %%ds\n\t"
26318 "push %%ds\n\t"
26319 "pop %%es\n\t"
26320 - "lcall *(%%esi); cld\n\t"
26321 + "lcall *%%ss:(%%esi); cld\n\t"
26322 "pop %%es\n\t"
26323 + "push %%ss\n\t"
26324 + "pop %%ds\n"
26325 "jc 1f\n\t"
26326 "xor %%ah, %%ah\n"
26327 "1:"
26328 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26329 "1" (0),
26330 "D" ((long) &opt),
26331 "S" (&pci_indirect),
26332 - "m" (opt)
26333 + "m" (opt),
26334 + "r" (__PCIBIOS_DS)
26335 : "memory");
26336 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26337 if (ret & 0xff00)
26338 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26339 {
26340 int ret;
26341
26342 - __asm__("lcall *(%%esi); cld\n\t"
26343 + __asm__("movw %w5, %%ds\n\t"
26344 + "lcall *%%ss:(%%esi); cld\n\t"
26345 + "push %%ss\n\t"
26346 + "pop %%ds\n"
26347 "jc 1f\n\t"
26348 "xor %%ah, %%ah\n"
26349 "1:"
26350 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26351 : "0" (PCIBIOS_SET_PCI_HW_INT),
26352 "b" ((dev->bus->number << 8) | dev->devfn),
26353 "c" ((irq << 8) | (pin + 10)),
26354 - "S" (&pci_indirect));
26355 + "S" (&pci_indirect),
26356 + "r" (__PCIBIOS_DS));
26357 return !(ret & 0xff00);
26358 }
26359 EXPORT_SYMBOL(pcibios_set_irq_routing);
26360 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26361 index 40e4469..1ab536e 100644
26362 --- a/arch/x86/platform/efi/efi_32.c
26363 +++ b/arch/x86/platform/efi/efi_32.c
26364 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26365 {
26366 struct desc_ptr gdt_descr;
26367
26368 +#ifdef CONFIG_PAX_KERNEXEC
26369 + struct desc_struct d;
26370 +#endif
26371 +
26372 local_irq_save(efi_rt_eflags);
26373
26374 load_cr3(initial_page_table);
26375 __flush_tlb_all();
26376
26377 +#ifdef CONFIG_PAX_KERNEXEC
26378 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26379 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26380 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26381 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26382 +#endif
26383 +
26384 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26385 gdt_descr.size = GDT_SIZE - 1;
26386 load_gdt(&gdt_descr);
26387 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26388 {
26389 struct desc_ptr gdt_descr;
26390
26391 +#ifdef CONFIG_PAX_KERNEXEC
26392 + struct desc_struct d;
26393 +
26394 + memset(&d, 0, sizeof d);
26395 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26396 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26397 +#endif
26398 +
26399 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26400 gdt_descr.size = GDT_SIZE - 1;
26401 load_gdt(&gdt_descr);
26402 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26403 index fbe66e6..c5c0dd2 100644
26404 --- a/arch/x86/platform/efi/efi_stub_32.S
26405 +++ b/arch/x86/platform/efi/efi_stub_32.S
26406 @@ -6,7 +6,9 @@
26407 */
26408
26409 #include <linux/linkage.h>
26410 +#include <linux/init.h>
26411 #include <asm/page_types.h>
26412 +#include <asm/segment.h>
26413
26414 /*
26415 * efi_call_phys(void *, ...) is a function with variable parameters.
26416 @@ -20,7 +22,7 @@
26417 * service functions will comply with gcc calling convention, too.
26418 */
26419
26420 -.text
26421 +__INIT
26422 ENTRY(efi_call_phys)
26423 /*
26424 * 0. The function can only be called in Linux kernel. So CS has been
26425 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26426 * The mapping of lower virtual memory has been created in prelog and
26427 * epilog.
26428 */
26429 - movl $1f, %edx
26430 - subl $__PAGE_OFFSET, %edx
26431 - jmp *%edx
26432 + movl $(__KERNEXEC_EFI_DS), %edx
26433 + mov %edx, %ds
26434 + mov %edx, %es
26435 + mov %edx, %ss
26436 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26437 1:
26438
26439 /*
26440 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26441 * parameter 2, ..., param n. To make things easy, we save the return
26442 * address of efi_call_phys in a global variable.
26443 */
26444 - popl %edx
26445 - movl %edx, saved_return_addr
26446 - /* get the function pointer into ECX*/
26447 - popl %ecx
26448 - movl %ecx, efi_rt_function_ptr
26449 - movl $2f, %edx
26450 - subl $__PAGE_OFFSET, %edx
26451 - pushl %edx
26452 + popl (saved_return_addr)
26453 + popl (efi_rt_function_ptr)
26454
26455 /*
26456 * 3. Clear PG bit in %CR0.
26457 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26458 /*
26459 * 5. Call the physical function.
26460 */
26461 - jmp *%ecx
26462 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26463
26464 -2:
26465 /*
26466 * 6. After EFI runtime service returns, control will return to
26467 * following instruction. We'd better readjust stack pointer first.
26468 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26469 movl %cr0, %edx
26470 orl $0x80000000, %edx
26471 movl %edx, %cr0
26472 - jmp 1f
26473 -1:
26474 +
26475 /*
26476 * 8. Now restore the virtual mode from flat mode by
26477 * adding EIP with PAGE_OFFSET.
26478 */
26479 - movl $1f, %edx
26480 - jmp *%edx
26481 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26482 1:
26483 + movl $(__KERNEL_DS), %edx
26484 + mov %edx, %ds
26485 + mov %edx, %es
26486 + mov %edx, %ss
26487
26488 /*
26489 * 9. Balance the stack. And because EAX contain the return value,
26490 * we'd better not clobber it.
26491 */
26492 - leal efi_rt_function_ptr, %edx
26493 - movl (%edx), %ecx
26494 - pushl %ecx
26495 + pushl (efi_rt_function_ptr)
26496
26497 /*
26498 - * 10. Push the saved return address onto the stack and return.
26499 + * 10. Return to the saved return address.
26500 */
26501 - leal saved_return_addr, %edx
26502 - movl (%edx), %ecx
26503 - pushl %ecx
26504 - ret
26505 + jmpl *(saved_return_addr)
26506 ENDPROC(efi_call_phys)
26507 .previous
26508
26509 -.data
26510 +__INITDATA
26511 saved_return_addr:
26512 .long 0
26513 efi_rt_function_ptr:
26514 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26515 index 4c07cca..2c8427d 100644
26516 --- a/arch/x86/platform/efi/efi_stub_64.S
26517 +++ b/arch/x86/platform/efi/efi_stub_64.S
26518 @@ -7,6 +7,7 @@
26519 */
26520
26521 #include <linux/linkage.h>
26522 +#include <asm/alternative-asm.h>
26523
26524 #define SAVE_XMM \
26525 mov %rsp, %rax; \
26526 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26527 call *%rdi
26528 addq $32, %rsp
26529 RESTORE_XMM
26530 + pax_force_retaddr 0, 1
26531 ret
26532 ENDPROC(efi_call0)
26533
26534 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26535 call *%rdi
26536 addq $32, %rsp
26537 RESTORE_XMM
26538 + pax_force_retaddr 0, 1
26539 ret
26540 ENDPROC(efi_call1)
26541
26542 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26543 call *%rdi
26544 addq $32, %rsp
26545 RESTORE_XMM
26546 + pax_force_retaddr 0, 1
26547 ret
26548 ENDPROC(efi_call2)
26549
26550 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26551 call *%rdi
26552 addq $32, %rsp
26553 RESTORE_XMM
26554 + pax_force_retaddr 0, 1
26555 ret
26556 ENDPROC(efi_call3)
26557
26558 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26559 call *%rdi
26560 addq $32, %rsp
26561 RESTORE_XMM
26562 + pax_force_retaddr 0, 1
26563 ret
26564 ENDPROC(efi_call4)
26565
26566 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26567 call *%rdi
26568 addq $48, %rsp
26569 RESTORE_XMM
26570 + pax_force_retaddr 0, 1
26571 ret
26572 ENDPROC(efi_call5)
26573
26574 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26575 call *%rdi
26576 addq $48, %rsp
26577 RESTORE_XMM
26578 + pax_force_retaddr 0, 1
26579 ret
26580 ENDPROC(efi_call6)
26581 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26582 index e31bcd8..f12dc46 100644
26583 --- a/arch/x86/platform/mrst/mrst.c
26584 +++ b/arch/x86/platform/mrst/mrst.c
26585 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26586 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26587 int sfi_mrtc_num;
26588
26589 -static void mrst_power_off(void)
26590 +static __noreturn void mrst_power_off(void)
26591 {
26592 + BUG();
26593 }
26594
26595 -static void mrst_reboot(void)
26596 +static __noreturn void mrst_reboot(void)
26597 {
26598 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26599 + BUG();
26600 }
26601
26602 /* parse all the mtimer info to a static mtimer array */
26603 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26604 index 218cdb1..fd55c08 100644
26605 --- a/arch/x86/power/cpu.c
26606 +++ b/arch/x86/power/cpu.c
26607 @@ -132,7 +132,7 @@ static void do_fpu_end(void)
26608 static void fix_processor_context(void)
26609 {
26610 int cpu = smp_processor_id();
26611 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26612 + struct tss_struct *t = init_tss + cpu;
26613
26614 set_tss_desc(cpu, t); /*
26615 * This just modifies memory; should not be
26616 @@ -142,7 +142,9 @@ static void fix_processor_context(void)
26617 */
26618
26619 #ifdef CONFIG_X86_64
26620 + pax_open_kernel();
26621 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26622 + pax_close_kernel();
26623
26624 syscall_init(); /* This sets MSR_*STAR and related */
26625 #endif
26626 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26627 index b685296..0180fa9 100644
26628 --- a/arch/x86/tools/relocs.c
26629 +++ b/arch/x86/tools/relocs.c
26630 @@ -12,10 +12,13 @@
26631 #include <regex.h>
26632 #include <tools/le_byteshift.h>
26633
26634 +#include "../../../include/generated/autoconf.h"
26635 +
26636 static void die(char *fmt, ...);
26637
26638 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26639 static Elf32_Ehdr ehdr;
26640 +static Elf32_Phdr *phdr;
26641 static unsigned long reloc_count, reloc_idx;
26642 static unsigned long *relocs;
26643 static unsigned long reloc16_count, reloc16_idx;
26644 @@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
26645 }
26646 }
26647
26648 +static void read_phdrs(FILE *fp)
26649 +{
26650 + unsigned int i;
26651 +
26652 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26653 + if (!phdr) {
26654 + die("Unable to allocate %d program headers\n",
26655 + ehdr.e_phnum);
26656 + }
26657 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26658 + die("Seek to %d failed: %s\n",
26659 + ehdr.e_phoff, strerror(errno));
26660 + }
26661 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26662 + die("Cannot read ELF program headers: %s\n",
26663 + strerror(errno));
26664 + }
26665 + for(i = 0; i < ehdr.e_phnum; i++) {
26666 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26667 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26668 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26669 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26670 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26671 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26672 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26673 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26674 + }
26675 +
26676 +}
26677 +
26678 static void read_shdrs(FILE *fp)
26679 {
26680 - int i;
26681 + unsigned int i;
26682 Elf32_Shdr shdr;
26683
26684 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26685 @@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
26686
26687 static void read_strtabs(FILE *fp)
26688 {
26689 - int i;
26690 + unsigned int i;
26691 for (i = 0; i < ehdr.e_shnum; i++) {
26692 struct section *sec = &secs[i];
26693 if (sec->shdr.sh_type != SHT_STRTAB) {
26694 @@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
26695
26696 static void read_symtabs(FILE *fp)
26697 {
26698 - int i,j;
26699 + unsigned int i,j;
26700 for (i = 0; i < ehdr.e_shnum; i++) {
26701 struct section *sec = &secs[i];
26702 if (sec->shdr.sh_type != SHT_SYMTAB) {
26703 @@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
26704
26705 static void read_relocs(FILE *fp)
26706 {
26707 - int i,j;
26708 + unsigned int i,j;
26709 + uint32_t base;
26710 +
26711 for (i = 0; i < ehdr.e_shnum; i++) {
26712 struct section *sec = &secs[i];
26713 if (sec->shdr.sh_type != SHT_REL) {
26714 @@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
26715 die("Cannot read symbol table: %s\n",
26716 strerror(errno));
26717 }
26718 + base = 0;
26719 +
26720 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26721 + for (j = 0; j < ehdr.e_phnum; j++) {
26722 + if (phdr[j].p_type != PT_LOAD )
26723 + continue;
26724 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26725 + continue;
26726 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26727 + break;
26728 + }
26729 +#endif
26730 +
26731 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26732 Elf32_Rel *rel = &sec->reltab[j];
26733 - rel->r_offset = elf32_to_cpu(rel->r_offset);
26734 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26735 rel->r_info = elf32_to_cpu(rel->r_info);
26736 }
26737 }
26738 @@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
26739
26740 static void print_absolute_symbols(void)
26741 {
26742 - int i;
26743 + unsigned int i;
26744 printf("Absolute symbols\n");
26745 printf(" Num: Value Size Type Bind Visibility Name\n");
26746 for (i = 0; i < ehdr.e_shnum; i++) {
26747 struct section *sec = &secs[i];
26748 char *sym_strtab;
26749 - int j;
26750 + unsigned int j;
26751
26752 if (sec->shdr.sh_type != SHT_SYMTAB) {
26753 continue;
26754 @@ -482,7 +530,7 @@ static void print_absolute_symbols(void)
26755
26756 static void print_absolute_relocs(void)
26757 {
26758 - int i, printed = 0;
26759 + unsigned int i, printed = 0;
26760
26761 for (i = 0; i < ehdr.e_shnum; i++) {
26762 struct section *sec = &secs[i];
26763 @@ -551,7 +599,7 @@ static void print_absolute_relocs(void)
26764 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26765 int use_real_mode)
26766 {
26767 - int i;
26768 + unsigned int i;
26769 /* Walk through the relocations */
26770 for (i = 0; i < ehdr.e_shnum; i++) {
26771 char *sym_strtab;
26772 @@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26773 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26774 r_type = ELF32_R_TYPE(rel->r_info);
26775
26776 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26777 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26778 + continue;
26779 +
26780 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26781 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26782 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26783 + continue;
26784 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26785 + continue;
26786 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26787 + continue;
26788 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26789 + continue;
26790 +#endif
26791 +
26792 shn_abs = sym->st_shndx == SHN_ABS;
26793
26794 switch (r_type) {
26795 @@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
26796
26797 static void emit_relocs(int as_text, int use_real_mode)
26798 {
26799 - int i;
26800 + unsigned int i;
26801 /* Count how many relocations I have and allocate space for them. */
26802 reloc_count = 0;
26803 walk_relocs(count_reloc, use_real_mode);
26804 @@ -801,6 +865,7 @@ int main(int argc, char **argv)
26805 fname, strerror(errno));
26806 }
26807 read_ehdr(fp);
26808 + read_phdrs(fp);
26809 read_shdrs(fp);
26810 read_strtabs(fp);
26811 read_symtabs(fp);
26812 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26813 index fd14be1..e3c79c0 100644
26814 --- a/arch/x86/vdso/Makefile
26815 +++ b/arch/x86/vdso/Makefile
26816 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
26817 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26818 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26819
26820 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26821 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26822 GCOV_PROFILE := n
26823
26824 #
26825 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26826 index 66e6d93..587f435 100644
26827 --- a/arch/x86/vdso/vdso32-setup.c
26828 +++ b/arch/x86/vdso/vdso32-setup.c
26829 @@ -25,6 +25,7 @@
26830 #include <asm/tlbflush.h>
26831 #include <asm/vdso.h>
26832 #include <asm/proto.h>
26833 +#include <asm/mman.h>
26834
26835 enum {
26836 VDSO_DISABLED = 0,
26837 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26838 void enable_sep_cpu(void)
26839 {
26840 int cpu = get_cpu();
26841 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26842 + struct tss_struct *tss = init_tss + cpu;
26843
26844 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26845 put_cpu();
26846 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26847 gate_vma.vm_start = FIXADDR_USER_START;
26848 gate_vma.vm_end = FIXADDR_USER_END;
26849 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26850 - gate_vma.vm_page_prot = __P101;
26851 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26852
26853 return 0;
26854 }
26855 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26856 if (compat)
26857 addr = VDSO_HIGH_BASE;
26858 else {
26859 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26860 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26861 if (IS_ERR_VALUE(addr)) {
26862 ret = addr;
26863 goto up_fail;
26864 }
26865 }
26866
26867 - current->mm->context.vdso = (void *)addr;
26868 + current->mm->context.vdso = addr;
26869
26870 if (compat_uses_vma || !compat) {
26871 /*
26872 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26873 }
26874
26875 current_thread_info()->sysenter_return =
26876 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26877 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26878
26879 up_fail:
26880 if (ret)
26881 - current->mm->context.vdso = NULL;
26882 + current->mm->context.vdso = 0;
26883
26884 up_write(&mm->mmap_sem);
26885
26886 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
26887
26888 const char *arch_vma_name(struct vm_area_struct *vma)
26889 {
26890 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26891 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26892 return "[vdso]";
26893 +
26894 +#ifdef CONFIG_PAX_SEGMEXEC
26895 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26896 + return "[vdso]";
26897 +#endif
26898 +
26899 return NULL;
26900 }
26901
26902 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26903 * Check to see if the corresponding task was created in compat vdso
26904 * mode.
26905 */
26906 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26907 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26908 return &gate_vma;
26909 return NULL;
26910 }
26911 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26912 index 00aaf04..4a26505 100644
26913 --- a/arch/x86/vdso/vma.c
26914 +++ b/arch/x86/vdso/vma.c
26915 @@ -16,8 +16,6 @@
26916 #include <asm/vdso.h>
26917 #include <asm/page.h>
26918
26919 -unsigned int __read_mostly vdso_enabled = 1;
26920 -
26921 extern char vdso_start[], vdso_end[];
26922 extern unsigned short vdso_sync_cpuid;
26923
26924 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26925 * unaligned here as a result of stack start randomization.
26926 */
26927 addr = PAGE_ALIGN(addr);
26928 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26929
26930 return addr;
26931 }
26932 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
26933 unsigned size)
26934 {
26935 struct mm_struct *mm = current->mm;
26936 - unsigned long addr;
26937 + unsigned long addr = 0;
26938 int ret;
26939
26940 - if (!vdso_enabled)
26941 - return 0;
26942 -
26943 down_write(&mm->mmap_sem);
26944 +
26945 +#ifdef CONFIG_PAX_RANDMMAP
26946 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26947 +#endif
26948 +
26949 addr = vdso_addr(mm->start_stack, size);
26950 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26951 addr = get_unmapped_area(NULL, addr, size, 0, 0);
26952 if (IS_ERR_VALUE(addr)) {
26953 ret = addr;
26954 goto up_fail;
26955 }
26956
26957 - current->mm->context.vdso = (void *)addr;
26958 + mm->context.vdso = addr;
26959
26960 ret = install_special_mapping(mm, addr, size,
26961 VM_READ|VM_EXEC|
26962 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
26963 pages);
26964 - if (ret) {
26965 - current->mm->context.vdso = NULL;
26966 - goto up_fail;
26967 - }
26968 + if (ret)
26969 + mm->context.vdso = 0;
26970
26971 up_fail:
26972 up_write(&mm->mmap_sem);
26973 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26974 vdsox32_size);
26975 }
26976 #endif
26977 -
26978 -static __init int vdso_setup(char *s)
26979 -{
26980 - vdso_enabled = simple_strtoul(s, NULL, 0);
26981 - return 0;
26982 -}
26983 -__setup("vdso=", vdso_setup);
26984 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26985 index 95dccce..de96944 100644
26986 --- a/arch/x86/xen/enlighten.c
26987 +++ b/arch/x86/xen/enlighten.c
26988 @@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26989
26990 struct shared_info xen_dummy_shared_info;
26991
26992 -void *xen_initial_gdt;
26993 -
26994 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26995 __read_mostly int xen_have_vector_callback;
26996 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26997 @@ -1106,7 +1104,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
26998 .wbinvd = native_wbinvd,
26999
27000 .read_msr = native_read_msr_safe,
27001 + .rdmsr_regs = native_rdmsr_safe_regs,
27002 .write_msr = xen_write_msr_safe,
27003 + .wrmsr_regs = native_wrmsr_safe_regs,
27004 +
27005 .read_tsc = native_read_tsc,
27006 .read_pmc = native_read_pmc,
27007
27008 @@ -1154,30 +1155,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27009 #endif
27010 };
27011
27012 -static void xen_reboot(int reason)
27013 +static __noreturn void xen_reboot(int reason)
27014 {
27015 struct sched_shutdown r = { .reason = reason };
27016
27017 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27018 - BUG();
27019 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27020 + BUG();
27021 }
27022
27023 -static void xen_restart(char *msg)
27024 +static __noreturn void xen_restart(char *msg)
27025 {
27026 xen_reboot(SHUTDOWN_reboot);
27027 }
27028
27029 -static void xen_emergency_restart(void)
27030 +static __noreturn void xen_emergency_restart(void)
27031 {
27032 xen_reboot(SHUTDOWN_reboot);
27033 }
27034
27035 -static void xen_machine_halt(void)
27036 +static __noreturn void xen_machine_halt(void)
27037 {
27038 xen_reboot(SHUTDOWN_poweroff);
27039 }
27040
27041 -static void xen_machine_power_off(void)
27042 +static __noreturn void xen_machine_power_off(void)
27043 {
27044 if (pm_power_off)
27045 pm_power_off();
27046 @@ -1280,7 +1281,17 @@ asmlinkage void __init xen_start_kernel(void)
27047 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27048
27049 /* Work out if we support NX */
27050 - x86_configure_nx();
27051 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27052 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27053 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27054 + unsigned l, h;
27055 +
27056 + __supported_pte_mask |= _PAGE_NX;
27057 + rdmsr(MSR_EFER, l, h);
27058 + l |= EFER_NX;
27059 + wrmsr(MSR_EFER, l, h);
27060 + }
27061 +#endif
27062
27063 xen_setup_features();
27064
27065 @@ -1311,13 +1322,6 @@ asmlinkage void __init xen_start_kernel(void)
27066
27067 machine_ops = xen_machine_ops;
27068
27069 - /*
27070 - * The only reliable way to retain the initial address of the
27071 - * percpu gdt_page is to remember it here, so we can go and
27072 - * mark it RW later, when the initial percpu area is freed.
27073 - */
27074 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27075 -
27076 xen_smp_init();
27077
27078 #ifdef CONFIG_ACPI_NUMA
27079 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27080 index 69f5857..0699dc5 100644
27081 --- a/arch/x86/xen/mmu.c
27082 +++ b/arch/x86/xen/mmu.c
27083 @@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27084 convert_pfn_mfn(init_level4_pgt);
27085 convert_pfn_mfn(level3_ident_pgt);
27086 convert_pfn_mfn(level3_kernel_pgt);
27087 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27088 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27089 + convert_pfn_mfn(level3_vmemmap_pgt);
27090
27091 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27092 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27093 @@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27094 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27095 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27096 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27097 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27098 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27099 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27100 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27101 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27102 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27103 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27104
27105 @@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_init(void)
27106 pv_mmu_ops.set_pud = xen_set_pud;
27107 #if PAGETABLE_LEVELS == 4
27108 pv_mmu_ops.set_pgd = xen_set_pgd;
27109 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27110 #endif
27111
27112 /* This will work as long as patching hasn't happened yet
27113 @@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27114 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27115 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27116 .set_pgd = xen_set_pgd_hyper,
27117 + .set_pgd_batched = xen_set_pgd_hyper,
27118
27119 .alloc_pud = xen_alloc_pmd_init,
27120 .release_pud = xen_release_pmd_init,
27121 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27122 index 0503c0c..ceb2d16 100644
27123 --- a/arch/x86/xen/smp.c
27124 +++ b/arch/x86/xen/smp.c
27125 @@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27126 {
27127 BUG_ON(smp_processor_id() != 0);
27128 native_smp_prepare_boot_cpu();
27129 -
27130 - /* We've switched to the "real" per-cpu gdt, so make sure the
27131 - old memory can be recycled */
27132 - make_lowmem_page_readwrite(xen_initial_gdt);
27133 -
27134 xen_filter_cpu_maps();
27135 xen_setup_vcpu_info_placement();
27136 }
27137 @@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27138 gdt = get_cpu_gdt_table(cpu);
27139
27140 ctxt->flags = VGCF_IN_KERNEL;
27141 - ctxt->user_regs.ds = __USER_DS;
27142 - ctxt->user_regs.es = __USER_DS;
27143 + ctxt->user_regs.ds = __KERNEL_DS;
27144 + ctxt->user_regs.es = __KERNEL_DS;
27145 ctxt->user_regs.ss = __KERNEL_DS;
27146 #ifdef CONFIG_X86_32
27147 ctxt->user_regs.fs = __KERNEL_PERCPU;
27148 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27149 + savesegment(gs, ctxt->user_regs.gs);
27150 #else
27151 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27152 #endif
27153 @@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27154 int rc;
27155
27156 per_cpu(current_task, cpu) = idle;
27157 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27158 #ifdef CONFIG_X86_32
27159 irq_ctx_init(cpu);
27160 #else
27161 clear_tsk_thread_flag(idle, TIF_FORK);
27162 - per_cpu(kernel_stack, cpu) =
27163 - (unsigned long)task_stack_page(idle) -
27164 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27165 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27166 #endif
27167 xen_setup_runstate_info(cpu);
27168 xen_setup_timer(cpu);
27169 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27170 index b040b0e..8cc4fe0 100644
27171 --- a/arch/x86/xen/xen-asm_32.S
27172 +++ b/arch/x86/xen/xen-asm_32.S
27173 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27174 ESP_OFFSET=4 # bytes pushed onto stack
27175
27176 /*
27177 - * Store vcpu_info pointer for easy access. Do it this way to
27178 - * avoid having to reload %fs
27179 + * Store vcpu_info pointer for easy access.
27180 */
27181 #ifdef CONFIG_SMP
27182 - GET_THREAD_INFO(%eax)
27183 - movl TI_cpu(%eax), %eax
27184 - movl __per_cpu_offset(,%eax,4), %eax
27185 - mov xen_vcpu(%eax), %eax
27186 + push %fs
27187 + mov $(__KERNEL_PERCPU), %eax
27188 + mov %eax, %fs
27189 + mov PER_CPU_VAR(xen_vcpu), %eax
27190 + pop %fs
27191 #else
27192 movl xen_vcpu, %eax
27193 #endif
27194 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27195 index aaa7291..3f77960 100644
27196 --- a/arch/x86/xen/xen-head.S
27197 +++ b/arch/x86/xen/xen-head.S
27198 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27199 #ifdef CONFIG_X86_32
27200 mov %esi,xen_start_info
27201 mov $init_thread_union+THREAD_SIZE,%esp
27202 +#ifdef CONFIG_SMP
27203 + movl $cpu_gdt_table,%edi
27204 + movl $__per_cpu_load,%eax
27205 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27206 + rorl $16,%eax
27207 + movb %al,__KERNEL_PERCPU + 4(%edi)
27208 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27209 + movl $__per_cpu_end - 1,%eax
27210 + subl $__per_cpu_start,%eax
27211 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27212 +#endif
27213 #else
27214 mov %rsi,xen_start_info
27215 mov $init_thread_union+THREAD_SIZE,%rsp
27216 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27217 index b095739..8c17bcd 100644
27218 --- a/arch/x86/xen/xen-ops.h
27219 +++ b/arch/x86/xen/xen-ops.h
27220 @@ -10,8 +10,6 @@
27221 extern const char xen_hypervisor_callback[];
27222 extern const char xen_failsafe_callback[];
27223
27224 -extern void *xen_initial_gdt;
27225 -
27226 struct trap_info;
27227 void xen_copy_trap_info(struct trap_info *traps);
27228
27229 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27230 index 525bd3d..ef888b1 100644
27231 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27232 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27233 @@ -119,9 +119,9 @@
27234 ----------------------------------------------------------------------*/
27235
27236 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27237 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27238 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27239 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27240 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27241
27242 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27243 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27244 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27245 index 2f33760..835e50a 100644
27246 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27247 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27248 @@ -11,6 +11,7 @@
27249 #ifndef _XTENSA_CORE_H
27250 #define _XTENSA_CORE_H
27251
27252 +#include <linux/const.h>
27253
27254 /****************************************************************************
27255 Parameters Useful for Any Code, USER or PRIVILEGED
27256 @@ -112,9 +113,9 @@
27257 ----------------------------------------------------------------------*/
27258
27259 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27260 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27261 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27262 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27263 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27264
27265 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27266 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27267 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27268 index af00795..2bb8105 100644
27269 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27270 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27271 @@ -11,6 +11,7 @@
27272 #ifndef _XTENSA_CORE_CONFIGURATION_H
27273 #define _XTENSA_CORE_CONFIGURATION_H
27274
27275 +#include <linux/const.h>
27276
27277 /****************************************************************************
27278 Parameters Useful for Any Code, USER or PRIVILEGED
27279 @@ -118,9 +119,9 @@
27280 ----------------------------------------------------------------------*/
27281
27282 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27283 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27284 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27285 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27286 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27287
27288 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27289 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27290 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27291 index 58916af..9cb880b 100644
27292 --- a/block/blk-iopoll.c
27293 +++ b/block/blk-iopoll.c
27294 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27295 }
27296 EXPORT_SYMBOL(blk_iopoll_complete);
27297
27298 -static void blk_iopoll_softirq(struct softirq_action *h)
27299 +static void blk_iopoll_softirq(void)
27300 {
27301 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27302 int rearm = 0, budget = blk_iopoll_budget;
27303 diff --git a/block/blk-map.c b/block/blk-map.c
27304 index 623e1cd..ca1e109 100644
27305 --- a/block/blk-map.c
27306 +++ b/block/blk-map.c
27307 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27308 if (!len || !kbuf)
27309 return -EINVAL;
27310
27311 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27312 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27313 if (do_copy)
27314 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27315 else
27316 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27317 index 467c8de..4bddc6d 100644
27318 --- a/block/blk-softirq.c
27319 +++ b/block/blk-softirq.c
27320 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27321 * Softirq action handler - move entries to local list and loop over them
27322 * while passing them to the queue registered handler.
27323 */
27324 -static void blk_done_softirq(struct softirq_action *h)
27325 +static void blk_done_softirq(void)
27326 {
27327 struct list_head *cpu_list, local_list;
27328
27329 diff --git a/block/bsg.c b/block/bsg.c
27330 index ff64ae3..593560c 100644
27331 --- a/block/bsg.c
27332 +++ b/block/bsg.c
27333 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27334 struct sg_io_v4 *hdr, struct bsg_device *bd,
27335 fmode_t has_write_perm)
27336 {
27337 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27338 + unsigned char *cmdptr;
27339 +
27340 if (hdr->request_len > BLK_MAX_CDB) {
27341 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27342 if (!rq->cmd)
27343 return -ENOMEM;
27344 - }
27345 + cmdptr = rq->cmd;
27346 + } else
27347 + cmdptr = tmpcmd;
27348
27349 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27350 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27351 hdr->request_len))
27352 return -EFAULT;
27353
27354 + if (cmdptr != rq->cmd)
27355 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27356 +
27357 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27358 if (blk_verify_command(rq->cmd, has_write_perm))
27359 return -EPERM;
27360 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27361 index 7c668c8..db3521c 100644
27362 --- a/block/compat_ioctl.c
27363 +++ b/block/compat_ioctl.c
27364 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27365 err |= __get_user(f->spec1, &uf->spec1);
27366 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27367 err |= __get_user(name, &uf->name);
27368 - f->name = compat_ptr(name);
27369 + f->name = (void __force_kernel *)compat_ptr(name);
27370 if (err) {
27371 err = -EFAULT;
27372 goto out;
27373 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27374 index 6296b40..417c00f 100644
27375 --- a/block/partitions/efi.c
27376 +++ b/block/partitions/efi.c
27377 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27378 if (!gpt)
27379 return NULL;
27380
27381 + if (!le32_to_cpu(gpt->num_partition_entries))
27382 + return NULL;
27383 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27384 + if (!pte)
27385 + return NULL;
27386 +
27387 count = le32_to_cpu(gpt->num_partition_entries) *
27388 le32_to_cpu(gpt->sizeof_partition_entry);
27389 - if (!count)
27390 - return NULL;
27391 - pte = kzalloc(count, GFP_KERNEL);
27392 - if (!pte)
27393 - return NULL;
27394 -
27395 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27396 (u8 *) pte,
27397 count) < count) {
27398 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27399 index 260fa80..e8f3caf 100644
27400 --- a/block/scsi_ioctl.c
27401 +++ b/block/scsi_ioctl.c
27402 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27403 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27404 struct sg_io_hdr *hdr, fmode_t mode)
27405 {
27406 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27407 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27408 + unsigned char *cmdptr;
27409 +
27410 + if (rq->cmd != rq->__cmd)
27411 + cmdptr = rq->cmd;
27412 + else
27413 + cmdptr = tmpcmd;
27414 +
27415 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27416 return -EFAULT;
27417 +
27418 + if (cmdptr != rq->cmd)
27419 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27420 +
27421 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27422 return -EPERM;
27423
27424 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27425 int err;
27426 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27427 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27428 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27429 + unsigned char *cmdptr;
27430
27431 if (!sic)
27432 return -EINVAL;
27433 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27434 */
27435 err = -EFAULT;
27436 rq->cmd_len = cmdlen;
27437 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27438 +
27439 + if (rq->cmd != rq->__cmd)
27440 + cmdptr = rq->cmd;
27441 + else
27442 + cmdptr = tmpcmd;
27443 +
27444 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27445 goto error;
27446
27447 + if (rq->cmd != cmdptr)
27448 + memcpy(rq->cmd, cmdptr, cmdlen);
27449 +
27450 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27451 goto error;
27452
27453 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27454 index 671d4d6..5f24030 100644
27455 --- a/crypto/cryptd.c
27456 +++ b/crypto/cryptd.c
27457 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27458
27459 struct cryptd_blkcipher_request_ctx {
27460 crypto_completion_t complete;
27461 -};
27462 +} __no_const;
27463
27464 struct cryptd_hash_ctx {
27465 struct crypto_shash *child;
27466 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27467
27468 struct cryptd_aead_request_ctx {
27469 crypto_completion_t complete;
27470 -};
27471 +} __no_const;
27472
27473 static void cryptd_queue_worker(struct work_struct *work);
27474
27475 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27476 index e6defd8..c26a225 100644
27477 --- a/drivers/acpi/apei/cper.c
27478 +++ b/drivers/acpi/apei/cper.c
27479 @@ -38,12 +38,12 @@
27480 */
27481 u64 cper_next_record_id(void)
27482 {
27483 - static atomic64_t seq;
27484 + static atomic64_unchecked_t seq;
27485
27486 - if (!atomic64_read(&seq))
27487 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27488 + if (!atomic64_read_unchecked(&seq))
27489 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27490
27491 - return atomic64_inc_return(&seq);
27492 + return atomic64_inc_return_unchecked(&seq);
27493 }
27494 EXPORT_SYMBOL_GPL(cper_next_record_id);
27495
27496 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27497 index 7586544..636a2f0 100644
27498 --- a/drivers/acpi/ec_sys.c
27499 +++ b/drivers/acpi/ec_sys.c
27500 @@ -12,6 +12,7 @@
27501 #include <linux/acpi.h>
27502 #include <linux/debugfs.h>
27503 #include <linux/module.h>
27504 +#include <linux/uaccess.h>
27505 #include "internal.h"
27506
27507 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27508 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27509 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27510 */
27511 unsigned int size = EC_SPACE_SIZE;
27512 - u8 *data = (u8 *) buf;
27513 + u8 data;
27514 loff_t init_off = *off;
27515 int err = 0;
27516
27517 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27518 size = count;
27519
27520 while (size) {
27521 - err = ec_read(*off, &data[*off - init_off]);
27522 + err = ec_read(*off, &data);
27523 if (err)
27524 return err;
27525 + if (put_user(data, &buf[*off - init_off]))
27526 + return -EFAULT;
27527 *off += 1;
27528 size--;
27529 }
27530 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27531
27532 unsigned int size = count;
27533 loff_t init_off = *off;
27534 - u8 *data = (u8 *) buf;
27535 int err = 0;
27536
27537 if (*off >= EC_SPACE_SIZE)
27538 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27539 }
27540
27541 while (size) {
27542 - u8 byte_write = data[*off - init_off];
27543 + u8 byte_write;
27544 + if (get_user(byte_write, &buf[*off - init_off]))
27545 + return -EFAULT;
27546 err = ec_write(*off, byte_write);
27547 if (err)
27548 return err;
27549 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27550 index 251c7b62..000462d 100644
27551 --- a/drivers/acpi/proc.c
27552 +++ b/drivers/acpi/proc.c
27553 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27554 size_t count, loff_t * ppos)
27555 {
27556 struct list_head *node, *next;
27557 - char strbuf[5];
27558 - char str[5] = "";
27559 - unsigned int len = count;
27560 + char strbuf[5] = {0};
27561
27562 - if (len > 4)
27563 - len = 4;
27564 - if (len < 0)
27565 + if (count > 4)
27566 + count = 4;
27567 + if (copy_from_user(strbuf, buffer, count))
27568 return -EFAULT;
27569 -
27570 - if (copy_from_user(strbuf, buffer, len))
27571 - return -EFAULT;
27572 - strbuf[len] = '\0';
27573 - sscanf(strbuf, "%s", str);
27574 + strbuf[count] = '\0';
27575
27576 mutex_lock(&acpi_device_lock);
27577 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27578 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27579 if (!dev->wakeup.flags.valid)
27580 continue;
27581
27582 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27583 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27584 if (device_can_wakeup(&dev->dev)) {
27585 bool enable = !device_may_wakeup(&dev->dev);
27586 device_set_wakeup_enable(&dev->dev, enable);
27587 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27588 index 0734086..3ad3e4c 100644
27589 --- a/drivers/acpi/processor_driver.c
27590 +++ b/drivers/acpi/processor_driver.c
27591 @@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27592 return 0;
27593 #endif
27594
27595 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27596 + BUG_ON(pr->id >= nr_cpu_ids);
27597
27598 /*
27599 * Buggy BIOS check
27600 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27601 index 23763a1..6375e67 100644
27602 --- a/drivers/ata/libata-core.c
27603 +++ b/drivers/ata/libata-core.c
27604 @@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27605 struct ata_port *ap;
27606 unsigned int tag;
27607
27608 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27609 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27610 ap = qc->ap;
27611
27612 qc->flags = 0;
27613 @@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27614 struct ata_port *ap;
27615 struct ata_link *link;
27616
27617 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27618 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27619 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27620 ap = qc->ap;
27621 link = qc->dev->link;
27622 @@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27623 return;
27624
27625 spin_lock(&lock);
27626 + pax_open_kernel();
27627
27628 for (cur = ops->inherits; cur; cur = cur->inherits) {
27629 void **inherit = (void **)cur;
27630 @@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27631 if (IS_ERR(*pp))
27632 *pp = NULL;
27633
27634 - ops->inherits = NULL;
27635 + *(struct ata_port_operations **)&ops->inherits = NULL;
27636
27637 + pax_close_kernel();
27638 spin_unlock(&lock);
27639 }
27640
27641 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27642 index 3239517..343b5f6 100644
27643 --- a/drivers/ata/pata_arasan_cf.c
27644 +++ b/drivers/ata/pata_arasan_cf.c
27645 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27646 /* Handle platform specific quirks */
27647 if (pdata->quirk) {
27648 if (pdata->quirk & CF_BROKEN_PIO) {
27649 - ap->ops->set_piomode = NULL;
27650 + pax_open_kernel();
27651 + *(void **)&ap->ops->set_piomode = NULL;
27652 + pax_close_kernel();
27653 ap->pio_mask = 0;
27654 }
27655 if (pdata->quirk & CF_BROKEN_MWDMA)
27656 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27657 index f9b983a..887b9d8 100644
27658 --- a/drivers/atm/adummy.c
27659 +++ b/drivers/atm/adummy.c
27660 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27661 vcc->pop(vcc, skb);
27662 else
27663 dev_kfree_skb_any(skb);
27664 - atomic_inc(&vcc->stats->tx);
27665 + atomic_inc_unchecked(&vcc->stats->tx);
27666
27667 return 0;
27668 }
27669 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27670 index f8f41e0..1f987dd 100644
27671 --- a/drivers/atm/ambassador.c
27672 +++ b/drivers/atm/ambassador.c
27673 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27674 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27675
27676 // VC layer stats
27677 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27678 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27679
27680 // free the descriptor
27681 kfree (tx_descr);
27682 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27683 dump_skb ("<<<", vc, skb);
27684
27685 // VC layer stats
27686 - atomic_inc(&atm_vcc->stats->rx);
27687 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27688 __net_timestamp(skb);
27689 // end of our responsibility
27690 atm_vcc->push (atm_vcc, skb);
27691 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27692 } else {
27693 PRINTK (KERN_INFO, "dropped over-size frame");
27694 // should we count this?
27695 - atomic_inc(&atm_vcc->stats->rx_drop);
27696 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27697 }
27698
27699 } else {
27700 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27701 }
27702
27703 if (check_area (skb->data, skb->len)) {
27704 - atomic_inc(&atm_vcc->stats->tx_err);
27705 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27706 return -ENOMEM; // ?
27707 }
27708
27709 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27710 index b22d71c..d6e1049 100644
27711 --- a/drivers/atm/atmtcp.c
27712 +++ b/drivers/atm/atmtcp.c
27713 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27714 if (vcc->pop) vcc->pop(vcc,skb);
27715 else dev_kfree_skb(skb);
27716 if (dev_data) return 0;
27717 - atomic_inc(&vcc->stats->tx_err);
27718 + atomic_inc_unchecked(&vcc->stats->tx_err);
27719 return -ENOLINK;
27720 }
27721 size = skb->len+sizeof(struct atmtcp_hdr);
27722 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27723 if (!new_skb) {
27724 if (vcc->pop) vcc->pop(vcc,skb);
27725 else dev_kfree_skb(skb);
27726 - atomic_inc(&vcc->stats->tx_err);
27727 + atomic_inc_unchecked(&vcc->stats->tx_err);
27728 return -ENOBUFS;
27729 }
27730 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27731 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27732 if (vcc->pop) vcc->pop(vcc,skb);
27733 else dev_kfree_skb(skb);
27734 out_vcc->push(out_vcc,new_skb);
27735 - atomic_inc(&vcc->stats->tx);
27736 - atomic_inc(&out_vcc->stats->rx);
27737 + atomic_inc_unchecked(&vcc->stats->tx);
27738 + atomic_inc_unchecked(&out_vcc->stats->rx);
27739 return 0;
27740 }
27741
27742 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27743 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27744 read_unlock(&vcc_sklist_lock);
27745 if (!out_vcc) {
27746 - atomic_inc(&vcc->stats->tx_err);
27747 + atomic_inc_unchecked(&vcc->stats->tx_err);
27748 goto done;
27749 }
27750 skb_pull(skb,sizeof(struct atmtcp_hdr));
27751 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27752 __net_timestamp(new_skb);
27753 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27754 out_vcc->push(out_vcc,new_skb);
27755 - atomic_inc(&vcc->stats->tx);
27756 - atomic_inc(&out_vcc->stats->rx);
27757 + atomic_inc_unchecked(&vcc->stats->tx);
27758 + atomic_inc_unchecked(&out_vcc->stats->rx);
27759 done:
27760 if (vcc->pop) vcc->pop(vcc,skb);
27761 else dev_kfree_skb(skb);
27762 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27763 index 2059ee4..faf51c7 100644
27764 --- a/drivers/atm/eni.c
27765 +++ b/drivers/atm/eni.c
27766 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27767 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27768 vcc->dev->number);
27769 length = 0;
27770 - atomic_inc(&vcc->stats->rx_err);
27771 + atomic_inc_unchecked(&vcc->stats->rx_err);
27772 }
27773 else {
27774 length = ATM_CELL_SIZE-1; /* no HEC */
27775 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27776 size);
27777 }
27778 eff = length = 0;
27779 - atomic_inc(&vcc->stats->rx_err);
27780 + atomic_inc_unchecked(&vcc->stats->rx_err);
27781 }
27782 else {
27783 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27784 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27785 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27786 vcc->dev->number,vcc->vci,length,size << 2,descr);
27787 length = eff = 0;
27788 - atomic_inc(&vcc->stats->rx_err);
27789 + atomic_inc_unchecked(&vcc->stats->rx_err);
27790 }
27791 }
27792 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27793 @@ -767,7 +767,7 @@ rx_dequeued++;
27794 vcc->push(vcc,skb);
27795 pushed++;
27796 }
27797 - atomic_inc(&vcc->stats->rx);
27798 + atomic_inc_unchecked(&vcc->stats->rx);
27799 }
27800 wake_up(&eni_dev->rx_wait);
27801 }
27802 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
27803 PCI_DMA_TODEVICE);
27804 if (vcc->pop) vcc->pop(vcc,skb);
27805 else dev_kfree_skb_irq(skb);
27806 - atomic_inc(&vcc->stats->tx);
27807 + atomic_inc_unchecked(&vcc->stats->tx);
27808 wake_up(&eni_dev->tx_wait);
27809 dma_complete++;
27810 }
27811 @@ -1567,7 +1567,7 @@ tx_complete++;
27812 /*--------------------------------- entries ---------------------------------*/
27813
27814
27815 -static const char *media_name[] __devinitdata = {
27816 +static const char *media_name[] __devinitconst = {
27817 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27818 "UTP", "05?", "06?", "07?", /* 4- 7 */
27819 "TAXI","09?", "10?", "11?", /* 8-11 */
27820 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27821 index 86fed1b..6dc4721 100644
27822 --- a/drivers/atm/firestream.c
27823 +++ b/drivers/atm/firestream.c
27824 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27825 }
27826 }
27827
27828 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27829 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27830
27831 fs_dprintk (FS_DEBUG_TXMEM, "i");
27832 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27833 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27834 #endif
27835 skb_put (skb, qe->p1 & 0xffff);
27836 ATM_SKB(skb)->vcc = atm_vcc;
27837 - atomic_inc(&atm_vcc->stats->rx);
27838 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27839 __net_timestamp(skb);
27840 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27841 atm_vcc->push (atm_vcc, skb);
27842 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27843 kfree (pe);
27844 }
27845 if (atm_vcc)
27846 - atomic_inc(&atm_vcc->stats->rx_drop);
27847 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27848 break;
27849 case 0x1f: /* Reassembly abort: no buffers. */
27850 /* Silently increment error counter. */
27851 if (atm_vcc)
27852 - atomic_inc(&atm_vcc->stats->rx_drop);
27853 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27854 break;
27855 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27856 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27857 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27858 index 361f5ae..7fc552d 100644
27859 --- a/drivers/atm/fore200e.c
27860 +++ b/drivers/atm/fore200e.c
27861 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27862 #endif
27863 /* check error condition */
27864 if (*entry->status & STATUS_ERROR)
27865 - atomic_inc(&vcc->stats->tx_err);
27866 + atomic_inc_unchecked(&vcc->stats->tx_err);
27867 else
27868 - atomic_inc(&vcc->stats->tx);
27869 + atomic_inc_unchecked(&vcc->stats->tx);
27870 }
27871 }
27872
27873 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27874 if (skb == NULL) {
27875 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27876
27877 - atomic_inc(&vcc->stats->rx_drop);
27878 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27879 return -ENOMEM;
27880 }
27881
27882 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27883
27884 dev_kfree_skb_any(skb);
27885
27886 - atomic_inc(&vcc->stats->rx_drop);
27887 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27888 return -ENOMEM;
27889 }
27890
27891 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27892
27893 vcc->push(vcc, skb);
27894 - atomic_inc(&vcc->stats->rx);
27895 + atomic_inc_unchecked(&vcc->stats->rx);
27896
27897 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27898
27899 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27900 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27901 fore200e->atm_dev->number,
27902 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27903 - atomic_inc(&vcc->stats->rx_err);
27904 + atomic_inc_unchecked(&vcc->stats->rx_err);
27905 }
27906 }
27907
27908 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27909 goto retry_here;
27910 }
27911
27912 - atomic_inc(&vcc->stats->tx_err);
27913 + atomic_inc_unchecked(&vcc->stats->tx_err);
27914
27915 fore200e->tx_sat++;
27916 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27917 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27918 index b182c2f..1c6fa8a 100644
27919 --- a/drivers/atm/he.c
27920 +++ b/drivers/atm/he.c
27921 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27922
27923 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27924 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27925 - atomic_inc(&vcc->stats->rx_drop);
27926 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27927 goto return_host_buffers;
27928 }
27929
27930 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27931 RBRQ_LEN_ERR(he_dev->rbrq_head)
27932 ? "LEN_ERR" : "",
27933 vcc->vpi, vcc->vci);
27934 - atomic_inc(&vcc->stats->rx_err);
27935 + atomic_inc_unchecked(&vcc->stats->rx_err);
27936 goto return_host_buffers;
27937 }
27938
27939 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27940 vcc->push(vcc, skb);
27941 spin_lock(&he_dev->global_lock);
27942
27943 - atomic_inc(&vcc->stats->rx);
27944 + atomic_inc_unchecked(&vcc->stats->rx);
27945
27946 return_host_buffers:
27947 ++pdus_assembled;
27948 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27949 tpd->vcc->pop(tpd->vcc, tpd->skb);
27950 else
27951 dev_kfree_skb_any(tpd->skb);
27952 - atomic_inc(&tpd->vcc->stats->tx_err);
27953 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27954 }
27955 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27956 return;
27957 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27958 vcc->pop(vcc, skb);
27959 else
27960 dev_kfree_skb_any(skb);
27961 - atomic_inc(&vcc->stats->tx_err);
27962 + atomic_inc_unchecked(&vcc->stats->tx_err);
27963 return -EINVAL;
27964 }
27965
27966 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27967 vcc->pop(vcc, skb);
27968 else
27969 dev_kfree_skb_any(skb);
27970 - atomic_inc(&vcc->stats->tx_err);
27971 + atomic_inc_unchecked(&vcc->stats->tx_err);
27972 return -EINVAL;
27973 }
27974 #endif
27975 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27976 vcc->pop(vcc, skb);
27977 else
27978 dev_kfree_skb_any(skb);
27979 - atomic_inc(&vcc->stats->tx_err);
27980 + atomic_inc_unchecked(&vcc->stats->tx_err);
27981 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27982 return -ENOMEM;
27983 }
27984 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27985 vcc->pop(vcc, skb);
27986 else
27987 dev_kfree_skb_any(skb);
27988 - atomic_inc(&vcc->stats->tx_err);
27989 + atomic_inc_unchecked(&vcc->stats->tx_err);
27990 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27991 return -ENOMEM;
27992 }
27993 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27994 __enqueue_tpd(he_dev, tpd, cid);
27995 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27996
27997 - atomic_inc(&vcc->stats->tx);
27998 + atomic_inc_unchecked(&vcc->stats->tx);
27999
28000 return 0;
28001 }
28002 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28003 index 75fd691..2d20b14 100644
28004 --- a/drivers/atm/horizon.c
28005 +++ b/drivers/atm/horizon.c
28006 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28007 {
28008 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28009 // VC layer stats
28010 - atomic_inc(&vcc->stats->rx);
28011 + atomic_inc_unchecked(&vcc->stats->rx);
28012 __net_timestamp(skb);
28013 // end of our responsibility
28014 vcc->push (vcc, skb);
28015 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28016 dev->tx_iovec = NULL;
28017
28018 // VC layer stats
28019 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28020 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28021
28022 // free the skb
28023 hrz_kfree_skb (skb);
28024 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28025 index 1c05212..c28e200 100644
28026 --- a/drivers/atm/idt77252.c
28027 +++ b/drivers/atm/idt77252.c
28028 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28029 else
28030 dev_kfree_skb(skb);
28031
28032 - atomic_inc(&vcc->stats->tx);
28033 + atomic_inc_unchecked(&vcc->stats->tx);
28034 }
28035
28036 atomic_dec(&scq->used);
28037 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28038 if ((sb = dev_alloc_skb(64)) == NULL) {
28039 printk("%s: Can't allocate buffers for aal0.\n",
28040 card->name);
28041 - atomic_add(i, &vcc->stats->rx_drop);
28042 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28043 break;
28044 }
28045 if (!atm_charge(vcc, sb->truesize)) {
28046 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28047 card->name);
28048 - atomic_add(i - 1, &vcc->stats->rx_drop);
28049 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28050 dev_kfree_skb(sb);
28051 break;
28052 }
28053 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28054 ATM_SKB(sb)->vcc = vcc;
28055 __net_timestamp(sb);
28056 vcc->push(vcc, sb);
28057 - atomic_inc(&vcc->stats->rx);
28058 + atomic_inc_unchecked(&vcc->stats->rx);
28059
28060 cell += ATM_CELL_PAYLOAD;
28061 }
28062 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28063 "(CDC: %08x)\n",
28064 card->name, len, rpp->len, readl(SAR_REG_CDC));
28065 recycle_rx_pool_skb(card, rpp);
28066 - atomic_inc(&vcc->stats->rx_err);
28067 + atomic_inc_unchecked(&vcc->stats->rx_err);
28068 return;
28069 }
28070 if (stat & SAR_RSQE_CRC) {
28071 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28072 recycle_rx_pool_skb(card, rpp);
28073 - atomic_inc(&vcc->stats->rx_err);
28074 + atomic_inc_unchecked(&vcc->stats->rx_err);
28075 return;
28076 }
28077 if (skb_queue_len(&rpp->queue) > 1) {
28078 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28079 RXPRINTK("%s: Can't alloc RX skb.\n",
28080 card->name);
28081 recycle_rx_pool_skb(card, rpp);
28082 - atomic_inc(&vcc->stats->rx_err);
28083 + atomic_inc_unchecked(&vcc->stats->rx_err);
28084 return;
28085 }
28086 if (!atm_charge(vcc, skb->truesize)) {
28087 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28088 __net_timestamp(skb);
28089
28090 vcc->push(vcc, skb);
28091 - atomic_inc(&vcc->stats->rx);
28092 + atomic_inc_unchecked(&vcc->stats->rx);
28093
28094 return;
28095 }
28096 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28097 __net_timestamp(skb);
28098
28099 vcc->push(vcc, skb);
28100 - atomic_inc(&vcc->stats->rx);
28101 + atomic_inc_unchecked(&vcc->stats->rx);
28102
28103 if (skb->truesize > SAR_FB_SIZE_3)
28104 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28105 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28106 if (vcc->qos.aal != ATM_AAL0) {
28107 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28108 card->name, vpi, vci);
28109 - atomic_inc(&vcc->stats->rx_drop);
28110 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28111 goto drop;
28112 }
28113
28114 if ((sb = dev_alloc_skb(64)) == NULL) {
28115 printk("%s: Can't allocate buffers for AAL0.\n",
28116 card->name);
28117 - atomic_inc(&vcc->stats->rx_err);
28118 + atomic_inc_unchecked(&vcc->stats->rx_err);
28119 goto drop;
28120 }
28121
28122 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28123 ATM_SKB(sb)->vcc = vcc;
28124 __net_timestamp(sb);
28125 vcc->push(vcc, sb);
28126 - atomic_inc(&vcc->stats->rx);
28127 + atomic_inc_unchecked(&vcc->stats->rx);
28128
28129 drop:
28130 skb_pull(queue, 64);
28131 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28132
28133 if (vc == NULL) {
28134 printk("%s: NULL connection in send().\n", card->name);
28135 - atomic_inc(&vcc->stats->tx_err);
28136 + atomic_inc_unchecked(&vcc->stats->tx_err);
28137 dev_kfree_skb(skb);
28138 return -EINVAL;
28139 }
28140 if (!test_bit(VCF_TX, &vc->flags)) {
28141 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28142 - atomic_inc(&vcc->stats->tx_err);
28143 + atomic_inc_unchecked(&vcc->stats->tx_err);
28144 dev_kfree_skb(skb);
28145 return -EINVAL;
28146 }
28147 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28148 break;
28149 default:
28150 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28151 - atomic_inc(&vcc->stats->tx_err);
28152 + atomic_inc_unchecked(&vcc->stats->tx_err);
28153 dev_kfree_skb(skb);
28154 return -EINVAL;
28155 }
28156
28157 if (skb_shinfo(skb)->nr_frags != 0) {
28158 printk("%s: No scatter-gather yet.\n", card->name);
28159 - atomic_inc(&vcc->stats->tx_err);
28160 + atomic_inc_unchecked(&vcc->stats->tx_err);
28161 dev_kfree_skb(skb);
28162 return -EINVAL;
28163 }
28164 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28165
28166 err = queue_skb(card, vc, skb, oam);
28167 if (err) {
28168 - atomic_inc(&vcc->stats->tx_err);
28169 + atomic_inc_unchecked(&vcc->stats->tx_err);
28170 dev_kfree_skb(skb);
28171 return err;
28172 }
28173 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28174 skb = dev_alloc_skb(64);
28175 if (!skb) {
28176 printk("%s: Out of memory in send_oam().\n", card->name);
28177 - atomic_inc(&vcc->stats->tx_err);
28178 + atomic_inc_unchecked(&vcc->stats->tx_err);
28179 return -ENOMEM;
28180 }
28181 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28182 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28183 index d438601..8b98495 100644
28184 --- a/drivers/atm/iphase.c
28185 +++ b/drivers/atm/iphase.c
28186 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28187 status = (u_short) (buf_desc_ptr->desc_mode);
28188 if (status & (RX_CER | RX_PTE | RX_OFL))
28189 {
28190 - atomic_inc(&vcc->stats->rx_err);
28191 + atomic_inc_unchecked(&vcc->stats->rx_err);
28192 IF_ERR(printk("IA: bad packet, dropping it");)
28193 if (status & RX_CER) {
28194 IF_ERR(printk(" cause: packet CRC error\n");)
28195 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28196 len = dma_addr - buf_addr;
28197 if (len > iadev->rx_buf_sz) {
28198 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28199 - atomic_inc(&vcc->stats->rx_err);
28200 + atomic_inc_unchecked(&vcc->stats->rx_err);
28201 goto out_free_desc;
28202 }
28203
28204 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28205 ia_vcc = INPH_IA_VCC(vcc);
28206 if (ia_vcc == NULL)
28207 {
28208 - atomic_inc(&vcc->stats->rx_err);
28209 + atomic_inc_unchecked(&vcc->stats->rx_err);
28210 atm_return(vcc, skb->truesize);
28211 dev_kfree_skb_any(skb);
28212 goto INCR_DLE;
28213 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28214 if ((length > iadev->rx_buf_sz) || (length >
28215 (skb->len - sizeof(struct cpcs_trailer))))
28216 {
28217 - atomic_inc(&vcc->stats->rx_err);
28218 + atomic_inc_unchecked(&vcc->stats->rx_err);
28219 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28220 length, skb->len);)
28221 atm_return(vcc, skb->truesize);
28222 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28223
28224 IF_RX(printk("rx_dle_intr: skb push");)
28225 vcc->push(vcc,skb);
28226 - atomic_inc(&vcc->stats->rx);
28227 + atomic_inc_unchecked(&vcc->stats->rx);
28228 iadev->rx_pkt_cnt++;
28229 }
28230 INCR_DLE:
28231 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28232 {
28233 struct k_sonet_stats *stats;
28234 stats = &PRIV(_ia_dev[board])->sonet_stats;
28235 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28236 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28237 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28238 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28239 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28240 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28241 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28242 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28243 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28244 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28245 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28246 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28247 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28248 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28249 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28250 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28251 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28252 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28253 }
28254 ia_cmds.status = 0;
28255 break;
28256 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28257 if ((desc == 0) || (desc > iadev->num_tx_desc))
28258 {
28259 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28260 - atomic_inc(&vcc->stats->tx);
28261 + atomic_inc_unchecked(&vcc->stats->tx);
28262 if (vcc->pop)
28263 vcc->pop(vcc, skb);
28264 else
28265 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28266 ATM_DESC(skb) = vcc->vci;
28267 skb_queue_tail(&iadev->tx_dma_q, skb);
28268
28269 - atomic_inc(&vcc->stats->tx);
28270 + atomic_inc_unchecked(&vcc->stats->tx);
28271 iadev->tx_pkt_cnt++;
28272 /* Increment transaction counter */
28273 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28274
28275 #if 0
28276 /* add flow control logic */
28277 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28278 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28279 if (iavcc->vc_desc_cnt > 10) {
28280 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28281 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28282 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28283 index 68c7588..7036683 100644
28284 --- a/drivers/atm/lanai.c
28285 +++ b/drivers/atm/lanai.c
28286 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28287 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28288 lanai_endtx(lanai, lvcc);
28289 lanai_free_skb(lvcc->tx.atmvcc, skb);
28290 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28291 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28292 }
28293
28294 /* Try to fill the buffer - don't call unless there is backlog */
28295 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28296 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28297 __net_timestamp(skb);
28298 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28299 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28300 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28301 out:
28302 lvcc->rx.buf.ptr = end;
28303 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28304 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28305 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28306 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28307 lanai->stats.service_rxnotaal5++;
28308 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28309 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28310 return 0;
28311 }
28312 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28313 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28314 int bytes;
28315 read_unlock(&vcc_sklist_lock);
28316 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28317 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28318 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28319 lvcc->stats.x.aal5.service_trash++;
28320 bytes = (SERVICE_GET_END(s) * 16) -
28321 (((unsigned long) lvcc->rx.buf.ptr) -
28322 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28323 }
28324 if (s & SERVICE_STREAM) {
28325 read_unlock(&vcc_sklist_lock);
28326 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28327 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28328 lvcc->stats.x.aal5.service_stream++;
28329 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28330 "PDU on VCI %d!\n", lanai->number, vci);
28331 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28332 return 0;
28333 }
28334 DPRINTK("got rx crc error on vci %d\n", vci);
28335 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28336 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28337 lvcc->stats.x.aal5.service_rxcrc++;
28338 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28339 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28340 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28341 index 1c70c45..300718d 100644
28342 --- a/drivers/atm/nicstar.c
28343 +++ b/drivers/atm/nicstar.c
28344 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28345 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28346 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28347 card->index);
28348 - atomic_inc(&vcc->stats->tx_err);
28349 + atomic_inc_unchecked(&vcc->stats->tx_err);
28350 dev_kfree_skb_any(skb);
28351 return -EINVAL;
28352 }
28353 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28354 if (!vc->tx) {
28355 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28356 card->index);
28357 - atomic_inc(&vcc->stats->tx_err);
28358 + atomic_inc_unchecked(&vcc->stats->tx_err);
28359 dev_kfree_skb_any(skb);
28360 return -EINVAL;
28361 }
28362 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28363 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28364 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28365 card->index);
28366 - atomic_inc(&vcc->stats->tx_err);
28367 + atomic_inc_unchecked(&vcc->stats->tx_err);
28368 dev_kfree_skb_any(skb);
28369 return -EINVAL;
28370 }
28371
28372 if (skb_shinfo(skb)->nr_frags != 0) {
28373 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28374 - atomic_inc(&vcc->stats->tx_err);
28375 + atomic_inc_unchecked(&vcc->stats->tx_err);
28376 dev_kfree_skb_any(skb);
28377 return -EINVAL;
28378 }
28379 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28380 }
28381
28382 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28383 - atomic_inc(&vcc->stats->tx_err);
28384 + atomic_inc_unchecked(&vcc->stats->tx_err);
28385 dev_kfree_skb_any(skb);
28386 return -EIO;
28387 }
28388 - atomic_inc(&vcc->stats->tx);
28389 + atomic_inc_unchecked(&vcc->stats->tx);
28390
28391 return 0;
28392 }
28393 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28394 printk
28395 ("nicstar%d: Can't allocate buffers for aal0.\n",
28396 card->index);
28397 - atomic_add(i, &vcc->stats->rx_drop);
28398 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28399 break;
28400 }
28401 if (!atm_charge(vcc, sb->truesize)) {
28402 RXPRINTK
28403 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28404 card->index);
28405 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28406 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28407 dev_kfree_skb_any(sb);
28408 break;
28409 }
28410 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28411 ATM_SKB(sb)->vcc = vcc;
28412 __net_timestamp(sb);
28413 vcc->push(vcc, sb);
28414 - atomic_inc(&vcc->stats->rx);
28415 + atomic_inc_unchecked(&vcc->stats->rx);
28416 cell += ATM_CELL_PAYLOAD;
28417 }
28418
28419 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28420 if (iovb == NULL) {
28421 printk("nicstar%d: Out of iovec buffers.\n",
28422 card->index);
28423 - atomic_inc(&vcc->stats->rx_drop);
28424 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28425 recycle_rx_buf(card, skb);
28426 return;
28427 }
28428 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28429 small or large buffer itself. */
28430 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28431 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28432 - atomic_inc(&vcc->stats->rx_err);
28433 + atomic_inc_unchecked(&vcc->stats->rx_err);
28434 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28435 NS_MAX_IOVECS);
28436 NS_PRV_IOVCNT(iovb) = 0;
28437 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28438 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28439 card->index);
28440 which_list(card, skb);
28441 - atomic_inc(&vcc->stats->rx_err);
28442 + atomic_inc_unchecked(&vcc->stats->rx_err);
28443 recycle_rx_buf(card, skb);
28444 vc->rx_iov = NULL;
28445 recycle_iov_buf(card, iovb);
28446 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28447 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28448 card->index);
28449 which_list(card, skb);
28450 - atomic_inc(&vcc->stats->rx_err);
28451 + atomic_inc_unchecked(&vcc->stats->rx_err);
28452 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28453 NS_PRV_IOVCNT(iovb));
28454 vc->rx_iov = NULL;
28455 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28456 printk(" - PDU size mismatch.\n");
28457 else
28458 printk(".\n");
28459 - atomic_inc(&vcc->stats->rx_err);
28460 + atomic_inc_unchecked(&vcc->stats->rx_err);
28461 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28462 NS_PRV_IOVCNT(iovb));
28463 vc->rx_iov = NULL;
28464 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28465 /* skb points to a small buffer */
28466 if (!atm_charge(vcc, skb->truesize)) {
28467 push_rxbufs(card, skb);
28468 - atomic_inc(&vcc->stats->rx_drop);
28469 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28470 } else {
28471 skb_put(skb, len);
28472 dequeue_sm_buf(card, skb);
28473 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28474 ATM_SKB(skb)->vcc = vcc;
28475 __net_timestamp(skb);
28476 vcc->push(vcc, skb);
28477 - atomic_inc(&vcc->stats->rx);
28478 + atomic_inc_unchecked(&vcc->stats->rx);
28479 }
28480 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28481 struct sk_buff *sb;
28482 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28483 if (len <= NS_SMBUFSIZE) {
28484 if (!atm_charge(vcc, sb->truesize)) {
28485 push_rxbufs(card, sb);
28486 - atomic_inc(&vcc->stats->rx_drop);
28487 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28488 } else {
28489 skb_put(sb, len);
28490 dequeue_sm_buf(card, sb);
28491 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28492 ATM_SKB(sb)->vcc = vcc;
28493 __net_timestamp(sb);
28494 vcc->push(vcc, sb);
28495 - atomic_inc(&vcc->stats->rx);
28496 + atomic_inc_unchecked(&vcc->stats->rx);
28497 }
28498
28499 push_rxbufs(card, skb);
28500 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28501
28502 if (!atm_charge(vcc, skb->truesize)) {
28503 push_rxbufs(card, skb);
28504 - atomic_inc(&vcc->stats->rx_drop);
28505 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28506 } else {
28507 dequeue_lg_buf(card, skb);
28508 #ifdef NS_USE_DESTRUCTORS
28509 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28510 ATM_SKB(skb)->vcc = vcc;
28511 __net_timestamp(skb);
28512 vcc->push(vcc, skb);
28513 - atomic_inc(&vcc->stats->rx);
28514 + atomic_inc_unchecked(&vcc->stats->rx);
28515 }
28516
28517 push_rxbufs(card, sb);
28518 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28519 printk
28520 ("nicstar%d: Out of huge buffers.\n",
28521 card->index);
28522 - atomic_inc(&vcc->stats->rx_drop);
28523 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28524 recycle_iovec_rx_bufs(card,
28525 (struct iovec *)
28526 iovb->data,
28527 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28528 card->hbpool.count++;
28529 } else
28530 dev_kfree_skb_any(hb);
28531 - atomic_inc(&vcc->stats->rx_drop);
28532 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28533 } else {
28534 /* Copy the small buffer to the huge buffer */
28535 sb = (struct sk_buff *)iov->iov_base;
28536 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28537 #endif /* NS_USE_DESTRUCTORS */
28538 __net_timestamp(hb);
28539 vcc->push(vcc, hb);
28540 - atomic_inc(&vcc->stats->rx);
28541 + atomic_inc_unchecked(&vcc->stats->rx);
28542 }
28543 }
28544
28545 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28546 index e8cd652..bbbd1fc 100644
28547 --- a/drivers/atm/solos-pci.c
28548 +++ b/drivers/atm/solos-pci.c
28549 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28550 }
28551 atm_charge(vcc, skb->truesize);
28552 vcc->push(vcc, skb);
28553 - atomic_inc(&vcc->stats->rx);
28554 + atomic_inc_unchecked(&vcc->stats->rx);
28555 break;
28556
28557 case PKT_STATUS:
28558 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28559 vcc = SKB_CB(oldskb)->vcc;
28560
28561 if (vcc) {
28562 - atomic_inc(&vcc->stats->tx);
28563 + atomic_inc_unchecked(&vcc->stats->tx);
28564 solos_pop(vcc, oldskb);
28565 } else
28566 dev_kfree_skb_irq(oldskb);
28567 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28568 index 0215934..ce9f5b1 100644
28569 --- a/drivers/atm/suni.c
28570 +++ b/drivers/atm/suni.c
28571 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28572
28573
28574 #define ADD_LIMITED(s,v) \
28575 - atomic_add((v),&stats->s); \
28576 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28577 + atomic_add_unchecked((v),&stats->s); \
28578 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28579
28580
28581 static void suni_hz(unsigned long from_timer)
28582 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28583 index 5120a96..e2572bd 100644
28584 --- a/drivers/atm/uPD98402.c
28585 +++ b/drivers/atm/uPD98402.c
28586 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28587 struct sonet_stats tmp;
28588 int error = 0;
28589
28590 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28591 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28592 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28593 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28594 if (zero && !error) {
28595 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28596
28597
28598 #define ADD_LIMITED(s,v) \
28599 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28600 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28601 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28602 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28603 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28604 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28605
28606
28607 static void stat_event(struct atm_dev *dev)
28608 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28609 if (reason & uPD98402_INT_PFM) stat_event(dev);
28610 if (reason & uPD98402_INT_PCO) {
28611 (void) GET(PCOCR); /* clear interrupt cause */
28612 - atomic_add(GET(HECCT),
28613 + atomic_add_unchecked(GET(HECCT),
28614 &PRIV(dev)->sonet_stats.uncorr_hcs);
28615 }
28616 if ((reason & uPD98402_INT_RFO) &&
28617 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28618 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28619 uPD98402_INT_LOS),PIMR); /* enable them */
28620 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28621 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28622 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28623 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28624 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28625 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28626 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28627 return 0;
28628 }
28629
28630 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28631 index abe4e20..83c4727 100644
28632 --- a/drivers/atm/zatm.c
28633 +++ b/drivers/atm/zatm.c
28634 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28635 }
28636 if (!size) {
28637 dev_kfree_skb_irq(skb);
28638 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28639 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28640 continue;
28641 }
28642 if (!atm_charge(vcc,skb->truesize)) {
28643 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28644 skb->len = size;
28645 ATM_SKB(skb)->vcc = vcc;
28646 vcc->push(vcc,skb);
28647 - atomic_inc(&vcc->stats->rx);
28648 + atomic_inc_unchecked(&vcc->stats->rx);
28649 }
28650 zout(pos & 0xffff,MTA(mbx));
28651 #if 0 /* probably a stupid idea */
28652 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28653 skb_queue_head(&zatm_vcc->backlog,skb);
28654 break;
28655 }
28656 - atomic_inc(&vcc->stats->tx);
28657 + atomic_inc_unchecked(&vcc->stats->tx);
28658 wake_up(&zatm_vcc->tx_wait);
28659 }
28660
28661 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28662 index 8493536..31adee0 100644
28663 --- a/drivers/base/devtmpfs.c
28664 +++ b/drivers/base/devtmpfs.c
28665 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28666 if (!thread)
28667 return 0;
28668
28669 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28670 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28671 if (err)
28672 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28673 else
28674 diff --git a/drivers/base/node.c b/drivers/base/node.c
28675 index 90aa2a1..af1a177 100644
28676 --- a/drivers/base/node.c
28677 +++ b/drivers/base/node.c
28678 @@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
28679 {
28680 int n;
28681
28682 - n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
28683 - if (n > 0 && PAGE_SIZE > n + 1) {
28684 - *(buf + n++) = '\n';
28685 - *(buf + n++) = '\0';
28686 - }
28687 + n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
28688 + buf[n++] = '\n';
28689 + buf[n] = '\0';
28690 return n;
28691 }
28692
28693 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28694 index 2a3e581..3d6a73f 100644
28695 --- a/drivers/base/power/wakeup.c
28696 +++ b/drivers/base/power/wakeup.c
28697 @@ -30,14 +30,14 @@ bool events_check_enabled;
28698 * They need to be modified together atomically, so it's better to use one
28699 * atomic variable to hold them both.
28700 */
28701 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28702 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28703
28704 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28705 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28706
28707 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28708 {
28709 - unsigned int comb = atomic_read(&combined_event_count);
28710 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28711
28712 *cnt = (comb >> IN_PROGRESS_BITS);
28713 *inpr = comb & MAX_IN_PROGRESS;
28714 @@ -379,7 +379,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28715 ws->last_time = ktime_get();
28716
28717 /* Increment the counter of events in progress. */
28718 - atomic_inc(&combined_event_count);
28719 + atomic_inc_unchecked(&combined_event_count);
28720 }
28721
28722 /**
28723 @@ -475,7 +475,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28724 * Increment the counter of registered wakeup events and decrement the
28725 * couter of wakeup events in progress simultaneously.
28726 */
28727 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28728 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28729 }
28730
28731 /**
28732 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28733 index b0f553b..77b928b 100644
28734 --- a/drivers/block/cciss.c
28735 +++ b/drivers/block/cciss.c
28736 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28737 int err;
28738 u32 cp;
28739
28740 + memset(&arg64, 0, sizeof(arg64));
28741 +
28742 err = 0;
28743 err |=
28744 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28745 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28746 while (!list_empty(&h->reqQ)) {
28747 c = list_entry(h->reqQ.next, CommandList_struct, list);
28748 /* can't do anything if fifo is full */
28749 - if ((h->access.fifo_full(h))) {
28750 + if ((h->access->fifo_full(h))) {
28751 dev_warn(&h->pdev->dev, "fifo full\n");
28752 break;
28753 }
28754 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28755 h->Qdepth--;
28756
28757 /* Tell the controller execute command */
28758 - h->access.submit_command(h, c);
28759 + h->access->submit_command(h, c);
28760
28761 /* Put job onto the completed Q */
28762 addQ(&h->cmpQ, c);
28763 @@ -3443,17 +3445,17 @@ startio:
28764
28765 static inline unsigned long get_next_completion(ctlr_info_t *h)
28766 {
28767 - return h->access.command_completed(h);
28768 + return h->access->command_completed(h);
28769 }
28770
28771 static inline int interrupt_pending(ctlr_info_t *h)
28772 {
28773 - return h->access.intr_pending(h);
28774 + return h->access->intr_pending(h);
28775 }
28776
28777 static inline long interrupt_not_for_us(ctlr_info_t *h)
28778 {
28779 - return ((h->access.intr_pending(h) == 0) ||
28780 + return ((h->access->intr_pending(h) == 0) ||
28781 (h->interrupts_enabled == 0));
28782 }
28783
28784 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28785 u32 a;
28786
28787 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28788 - return h->access.command_completed(h);
28789 + return h->access->command_completed(h);
28790
28791 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28792 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28793 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28794 trans_support & CFGTBL_Trans_use_short_tags);
28795
28796 /* Change the access methods to the performant access methods */
28797 - h->access = SA5_performant_access;
28798 + h->access = &SA5_performant_access;
28799 h->transMethod = CFGTBL_Trans_Performant;
28800
28801 return;
28802 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28803 if (prod_index < 0)
28804 return -ENODEV;
28805 h->product_name = products[prod_index].product_name;
28806 - h->access = *(products[prod_index].access);
28807 + h->access = products[prod_index].access;
28808
28809 if (cciss_board_disabled(h)) {
28810 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28811 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28812 }
28813
28814 /* make sure the board interrupts are off */
28815 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28816 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28817 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28818 if (rc)
28819 goto clean2;
28820 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28821 * fake ones to scoop up any residual completions.
28822 */
28823 spin_lock_irqsave(&h->lock, flags);
28824 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28825 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28826 spin_unlock_irqrestore(&h->lock, flags);
28827 free_irq(h->intr[h->intr_mode], h);
28828 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28829 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28830 dev_info(&h->pdev->dev, "Board READY.\n");
28831 dev_info(&h->pdev->dev,
28832 "Waiting for stale completions to drain.\n");
28833 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28834 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28835 msleep(10000);
28836 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28837 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28838
28839 rc = controller_reset_failed(h->cfgtable);
28840 if (rc)
28841 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28842 cciss_scsi_setup(h);
28843
28844 /* Turn the interrupts on so we can service requests */
28845 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28846 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28847
28848 /* Get the firmware version */
28849 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28850 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28851 kfree(flush_buf);
28852 if (return_code != IO_OK)
28853 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28854 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28855 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28856 free_irq(h->intr[h->intr_mode], h);
28857 }
28858
28859 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28860 index 7fda30e..eb5dfe0 100644
28861 --- a/drivers/block/cciss.h
28862 +++ b/drivers/block/cciss.h
28863 @@ -101,7 +101,7 @@ struct ctlr_info
28864 /* information about each logical volume */
28865 drive_info_struct *drv[CISS_MAX_LUN];
28866
28867 - struct access_method access;
28868 + struct access_method *access;
28869
28870 /* queue and queue Info */
28871 struct list_head reqQ;
28872 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28873 index 9125bbe..eede5c8 100644
28874 --- a/drivers/block/cpqarray.c
28875 +++ b/drivers/block/cpqarray.c
28876 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28877 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28878 goto Enomem4;
28879 }
28880 - hba[i]->access.set_intr_mask(hba[i], 0);
28881 + hba[i]->access->set_intr_mask(hba[i], 0);
28882 if (request_irq(hba[i]->intr, do_ida_intr,
28883 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28884 {
28885 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28886 add_timer(&hba[i]->timer);
28887
28888 /* Enable IRQ now that spinlock and rate limit timer are set up */
28889 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28890 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28891
28892 for(j=0; j<NWD; j++) {
28893 struct gendisk *disk = ida_gendisk[i][j];
28894 @@ -694,7 +694,7 @@ DBGINFO(
28895 for(i=0; i<NR_PRODUCTS; i++) {
28896 if (board_id == products[i].board_id) {
28897 c->product_name = products[i].product_name;
28898 - c->access = *(products[i].access);
28899 + c->access = products[i].access;
28900 break;
28901 }
28902 }
28903 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28904 hba[ctlr]->intr = intr;
28905 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28906 hba[ctlr]->product_name = products[j].product_name;
28907 - hba[ctlr]->access = *(products[j].access);
28908 + hba[ctlr]->access = products[j].access;
28909 hba[ctlr]->ctlr = ctlr;
28910 hba[ctlr]->board_id = board_id;
28911 hba[ctlr]->pci_dev = NULL; /* not PCI */
28912 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28913
28914 while((c = h->reqQ) != NULL) {
28915 /* Can't do anything if we're busy */
28916 - if (h->access.fifo_full(h) == 0)
28917 + if (h->access->fifo_full(h) == 0)
28918 return;
28919
28920 /* Get the first entry from the request Q */
28921 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28922 h->Qdepth--;
28923
28924 /* Tell the controller to do our bidding */
28925 - h->access.submit_command(h, c);
28926 + h->access->submit_command(h, c);
28927
28928 /* Get onto the completion Q */
28929 addQ(&h->cmpQ, c);
28930 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28931 unsigned long flags;
28932 __u32 a,a1;
28933
28934 - istat = h->access.intr_pending(h);
28935 + istat = h->access->intr_pending(h);
28936 /* Is this interrupt for us? */
28937 if (istat == 0)
28938 return IRQ_NONE;
28939 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28940 */
28941 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28942 if (istat & FIFO_NOT_EMPTY) {
28943 - while((a = h->access.command_completed(h))) {
28944 + while((a = h->access->command_completed(h))) {
28945 a1 = a; a &= ~3;
28946 if ((c = h->cmpQ) == NULL)
28947 {
28948 @@ -1449,11 +1449,11 @@ static int sendcmd(
28949 /*
28950 * Disable interrupt
28951 */
28952 - info_p->access.set_intr_mask(info_p, 0);
28953 + info_p->access->set_intr_mask(info_p, 0);
28954 /* Make sure there is room in the command FIFO */
28955 /* Actually it should be completely empty at this time. */
28956 for (i = 200000; i > 0; i--) {
28957 - temp = info_p->access.fifo_full(info_p);
28958 + temp = info_p->access->fifo_full(info_p);
28959 if (temp != 0) {
28960 break;
28961 }
28962 @@ -1466,7 +1466,7 @@ DBG(
28963 /*
28964 * Send the cmd
28965 */
28966 - info_p->access.submit_command(info_p, c);
28967 + info_p->access->submit_command(info_p, c);
28968 complete = pollcomplete(ctlr);
28969
28970 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28971 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28972 * we check the new geometry. Then turn interrupts back on when
28973 * we're done.
28974 */
28975 - host->access.set_intr_mask(host, 0);
28976 + host->access->set_intr_mask(host, 0);
28977 getgeometry(ctlr);
28978 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28979 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28980
28981 for(i=0; i<NWD; i++) {
28982 struct gendisk *disk = ida_gendisk[ctlr][i];
28983 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28984 /* Wait (up to 2 seconds) for a command to complete */
28985
28986 for (i = 200000; i > 0; i--) {
28987 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
28988 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
28989 if (done == 0) {
28990 udelay(10); /* a short fixed delay */
28991 } else
28992 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
28993 index be73e9d..7fbf140 100644
28994 --- a/drivers/block/cpqarray.h
28995 +++ b/drivers/block/cpqarray.h
28996 @@ -99,7 +99,7 @@ struct ctlr_info {
28997 drv_info_t drv[NWD];
28998 struct proc_dir_entry *proc;
28999
29000 - struct access_method access;
29001 + struct access_method *access;
29002
29003 cmdlist_t *reqQ;
29004 cmdlist_t *cmpQ;
29005 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29006 index 8d68056..e67050f 100644
29007 --- a/drivers/block/drbd/drbd_int.h
29008 +++ b/drivers/block/drbd/drbd_int.h
29009 @@ -736,7 +736,7 @@ struct drbd_request;
29010 struct drbd_epoch {
29011 struct list_head list;
29012 unsigned int barrier_nr;
29013 - atomic_t epoch_size; /* increased on every request added. */
29014 + atomic_unchecked_t epoch_size; /* increased on every request added. */
29015 atomic_t active; /* increased on every req. added, and dec on every finished. */
29016 unsigned long flags;
29017 };
29018 @@ -1108,7 +1108,7 @@ struct drbd_conf {
29019 void *int_dig_in;
29020 void *int_dig_vv;
29021 wait_queue_head_t seq_wait;
29022 - atomic_t packet_seq;
29023 + atomic_unchecked_t packet_seq;
29024 unsigned int peer_seq;
29025 spinlock_t peer_seq_lock;
29026 unsigned int minor;
29027 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29028
29029 static inline void drbd_tcp_cork(struct socket *sock)
29030 {
29031 - int __user val = 1;
29032 + int val = 1;
29033 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29034 - (char __user *)&val, sizeof(val));
29035 + (char __force_user *)&val, sizeof(val));
29036 }
29037
29038 static inline void drbd_tcp_uncork(struct socket *sock)
29039 {
29040 - int __user val = 0;
29041 + int val = 0;
29042 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29043 - (char __user *)&val, sizeof(val));
29044 + (char __force_user *)&val, sizeof(val));
29045 }
29046
29047 static inline void drbd_tcp_nodelay(struct socket *sock)
29048 {
29049 - int __user val = 1;
29050 + int val = 1;
29051 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29052 - (char __user *)&val, sizeof(val));
29053 + (char __force_user *)&val, sizeof(val));
29054 }
29055
29056 static inline void drbd_tcp_quickack(struct socket *sock)
29057 {
29058 - int __user val = 2;
29059 + int val = 2;
29060 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29061 - (char __user *)&val, sizeof(val));
29062 + (char __force_user *)&val, sizeof(val));
29063 }
29064
29065 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29066 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29067 index 211fc44..c5116f1 100644
29068 --- a/drivers/block/drbd/drbd_main.c
29069 +++ b/drivers/block/drbd/drbd_main.c
29070 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29071 p.sector = sector;
29072 p.block_id = block_id;
29073 p.blksize = blksize;
29074 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29075 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29076
29077 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29078 return false;
29079 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29080 p.sector = cpu_to_be64(req->sector);
29081 p.block_id = (unsigned long)req;
29082 p.seq_num = cpu_to_be32(req->seq_num =
29083 - atomic_add_return(1, &mdev->packet_seq));
29084 + atomic_add_return_unchecked(1, &mdev->packet_seq));
29085
29086 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29087
29088 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29089 atomic_set(&mdev->unacked_cnt, 0);
29090 atomic_set(&mdev->local_cnt, 0);
29091 atomic_set(&mdev->net_cnt, 0);
29092 - atomic_set(&mdev->packet_seq, 0);
29093 + atomic_set_unchecked(&mdev->packet_seq, 0);
29094 atomic_set(&mdev->pp_in_use, 0);
29095 atomic_set(&mdev->pp_in_use_by_net, 0);
29096 atomic_set(&mdev->rs_sect_in, 0);
29097 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29098 mdev->receiver.t_state);
29099
29100 /* no need to lock it, I'm the only thread alive */
29101 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29102 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29103 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29104 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29105 mdev->al_writ_cnt =
29106 mdev->bm_writ_cnt =
29107 mdev->read_cnt =
29108 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29109 index 946166e..356b39a 100644
29110 --- a/drivers/block/drbd/drbd_nl.c
29111 +++ b/drivers/block/drbd/drbd_nl.c
29112 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29113 module_put(THIS_MODULE);
29114 }
29115
29116 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29117 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29118
29119 static unsigned short *
29120 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29121 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29122 cn_reply->id.idx = CN_IDX_DRBD;
29123 cn_reply->id.val = CN_VAL_DRBD;
29124
29125 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29126 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29127 cn_reply->ack = 0; /* not used here. */
29128 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29129 (int)((char *)tl - (char *)reply->tag_list);
29130 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29131 cn_reply->id.idx = CN_IDX_DRBD;
29132 cn_reply->id.val = CN_VAL_DRBD;
29133
29134 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29135 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29136 cn_reply->ack = 0; /* not used here. */
29137 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29138 (int)((char *)tl - (char *)reply->tag_list);
29139 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29140 cn_reply->id.idx = CN_IDX_DRBD;
29141 cn_reply->id.val = CN_VAL_DRBD;
29142
29143 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29144 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29145 cn_reply->ack = 0; // not used here.
29146 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29147 (int)((char*)tl - (char*)reply->tag_list);
29148 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29149 cn_reply->id.idx = CN_IDX_DRBD;
29150 cn_reply->id.val = CN_VAL_DRBD;
29151
29152 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29153 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29154 cn_reply->ack = 0; /* not used here. */
29155 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29156 (int)((char *)tl - (char *)reply->tag_list);
29157 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29158 index 43beaca..4a5b1dd 100644
29159 --- a/drivers/block/drbd/drbd_receiver.c
29160 +++ b/drivers/block/drbd/drbd_receiver.c
29161 @@ -894,7 +894,7 @@ retry:
29162 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29163 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29164
29165 - atomic_set(&mdev->packet_seq, 0);
29166 + atomic_set_unchecked(&mdev->packet_seq, 0);
29167 mdev->peer_seq = 0;
29168
29169 drbd_thread_start(&mdev->asender);
29170 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29171 do {
29172 next_epoch = NULL;
29173
29174 - epoch_size = atomic_read(&epoch->epoch_size);
29175 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29176
29177 switch (ev & ~EV_CLEANUP) {
29178 case EV_PUT:
29179 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29180 rv = FE_DESTROYED;
29181 } else {
29182 epoch->flags = 0;
29183 - atomic_set(&epoch->epoch_size, 0);
29184 + atomic_set_unchecked(&epoch->epoch_size, 0);
29185 /* atomic_set(&epoch->active, 0); is already zero */
29186 if (rv == FE_STILL_LIVE)
29187 rv = FE_RECYCLED;
29188 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29189 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29190 drbd_flush(mdev);
29191
29192 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29193 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29194 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29195 if (epoch)
29196 break;
29197 }
29198
29199 epoch = mdev->current_epoch;
29200 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29201 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29202
29203 D_ASSERT(atomic_read(&epoch->active) == 0);
29204 D_ASSERT(epoch->flags == 0);
29205 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29206 }
29207
29208 epoch->flags = 0;
29209 - atomic_set(&epoch->epoch_size, 0);
29210 + atomic_set_unchecked(&epoch->epoch_size, 0);
29211 atomic_set(&epoch->active, 0);
29212
29213 spin_lock(&mdev->epoch_lock);
29214 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29215 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29216 list_add(&epoch->list, &mdev->current_epoch->list);
29217 mdev->current_epoch = epoch;
29218 mdev->epochs++;
29219 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29220 spin_unlock(&mdev->peer_seq_lock);
29221
29222 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29223 - atomic_inc(&mdev->current_epoch->epoch_size);
29224 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29225 return drbd_drain_block(mdev, data_size);
29226 }
29227
29228 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29229
29230 spin_lock(&mdev->epoch_lock);
29231 e->epoch = mdev->current_epoch;
29232 - atomic_inc(&e->epoch->epoch_size);
29233 + atomic_inc_unchecked(&e->epoch->epoch_size);
29234 atomic_inc(&e->epoch->active);
29235 spin_unlock(&mdev->epoch_lock);
29236
29237 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29238 D_ASSERT(list_empty(&mdev->done_ee));
29239
29240 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29241 - atomic_set(&mdev->current_epoch->epoch_size, 0);
29242 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29243 D_ASSERT(list_empty(&mdev->current_epoch->list));
29244 }
29245
29246 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29247 index bbca966..65e37dd 100644
29248 --- a/drivers/block/loop.c
29249 +++ b/drivers/block/loop.c
29250 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29251 mm_segment_t old_fs = get_fs();
29252
29253 set_fs(get_ds());
29254 - bw = file->f_op->write(file, buf, len, &pos);
29255 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29256 set_fs(old_fs);
29257 if (likely(bw == len))
29258 return 0;
29259 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29260 index ee94686..3e09ad3 100644
29261 --- a/drivers/char/Kconfig
29262 +++ b/drivers/char/Kconfig
29263 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29264
29265 config DEVKMEM
29266 bool "/dev/kmem virtual device support"
29267 - default y
29268 + default n
29269 + depends on !GRKERNSEC_KMEM
29270 help
29271 Say Y here if you want to support the /dev/kmem device. The
29272 /dev/kmem device is rarely used, but can be used for certain
29273 @@ -581,6 +582,7 @@ config DEVPORT
29274 bool
29275 depends on !M68K
29276 depends on ISA || PCI
29277 + depends on !GRKERNSEC_KMEM
29278 default y
29279
29280 source "drivers/s390/char/Kconfig"
29281 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29282 index 2e04433..22afc64 100644
29283 --- a/drivers/char/agp/frontend.c
29284 +++ b/drivers/char/agp/frontend.c
29285 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29286 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29287 return -EFAULT;
29288
29289 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29290 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29291 return -EFAULT;
29292
29293 client = agp_find_client_by_pid(reserve.pid);
29294 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29295 index 21cb980..f15107c 100644
29296 --- a/drivers/char/genrtc.c
29297 +++ b/drivers/char/genrtc.c
29298 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29299 switch (cmd) {
29300
29301 case RTC_PLL_GET:
29302 + memset(&pll, 0, sizeof(pll));
29303 if (get_rtc_pll(&pll))
29304 return -EINVAL;
29305 else
29306 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29307 index dfd7876..c0b0885 100644
29308 --- a/drivers/char/hpet.c
29309 +++ b/drivers/char/hpet.c
29310 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29311 }
29312
29313 static int
29314 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29315 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29316 struct hpet_info *info)
29317 {
29318 struct hpet_timer __iomem *timer;
29319 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29320 index 2c29942..604c5ba 100644
29321 --- a/drivers/char/ipmi/ipmi_msghandler.c
29322 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29323 @@ -420,7 +420,7 @@ struct ipmi_smi {
29324 struct proc_dir_entry *proc_dir;
29325 char proc_dir_name[10];
29326
29327 - atomic_t stats[IPMI_NUM_STATS];
29328 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29329
29330 /*
29331 * run_to_completion duplicate of smb_info, smi_info
29332 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29333
29334
29335 #define ipmi_inc_stat(intf, stat) \
29336 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29337 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29338 #define ipmi_get_stat(intf, stat) \
29339 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29340 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29341
29342 static int is_lan_addr(struct ipmi_addr *addr)
29343 {
29344 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29345 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29346 init_waitqueue_head(&intf->waitq);
29347 for (i = 0; i < IPMI_NUM_STATS; i++)
29348 - atomic_set(&intf->stats[i], 0);
29349 + atomic_set_unchecked(&intf->stats[i], 0);
29350
29351 intf->proc_dir = NULL;
29352
29353 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29354 index 1e638ff..a869ef5 100644
29355 --- a/drivers/char/ipmi/ipmi_si_intf.c
29356 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29357 @@ -275,7 +275,7 @@ struct smi_info {
29358 unsigned char slave_addr;
29359
29360 /* Counters and things for the proc filesystem. */
29361 - atomic_t stats[SI_NUM_STATS];
29362 + atomic_unchecked_t stats[SI_NUM_STATS];
29363
29364 struct task_struct *thread;
29365
29366 @@ -284,9 +284,9 @@ struct smi_info {
29367 };
29368
29369 #define smi_inc_stat(smi, stat) \
29370 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29371 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29372 #define smi_get_stat(smi, stat) \
29373 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29374 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29375
29376 #define SI_MAX_PARMS 4
29377
29378 @@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29379 atomic_set(&new_smi->req_events, 0);
29380 new_smi->run_to_completion = 0;
29381 for (i = 0; i < SI_NUM_STATS; i++)
29382 - atomic_set(&new_smi->stats[i], 0);
29383 + atomic_set_unchecked(&new_smi->stats[i], 0);
29384
29385 new_smi->interrupt_disabled = 1;
29386 atomic_set(&new_smi->stop_operation, 0);
29387 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29388 index 47ff7e4..0c7d340 100644
29389 --- a/drivers/char/mbcs.c
29390 +++ b/drivers/char/mbcs.c
29391 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29392 return 0;
29393 }
29394
29395 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29396 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29397 {
29398 .part_num = MBCS_PART_NUM,
29399 .mfg_num = MBCS_MFG_NUM,
29400 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29401 index d6e9d08..4493e89 100644
29402 --- a/drivers/char/mem.c
29403 +++ b/drivers/char/mem.c
29404 @@ -18,6 +18,7 @@
29405 #include <linux/raw.h>
29406 #include <linux/tty.h>
29407 #include <linux/capability.h>
29408 +#include <linux/security.h>
29409 #include <linux/ptrace.h>
29410 #include <linux/device.h>
29411 #include <linux/highmem.h>
29412 @@ -35,6 +36,10 @@
29413 # include <linux/efi.h>
29414 #endif
29415
29416 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29417 +extern const struct file_operations grsec_fops;
29418 +#endif
29419 +
29420 static inline unsigned long size_inside_page(unsigned long start,
29421 unsigned long size)
29422 {
29423 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29424
29425 while (cursor < to) {
29426 if (!devmem_is_allowed(pfn)) {
29427 +#ifdef CONFIG_GRKERNSEC_KMEM
29428 + gr_handle_mem_readwrite(from, to);
29429 +#else
29430 printk(KERN_INFO
29431 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29432 current->comm, from, to);
29433 +#endif
29434 return 0;
29435 }
29436 cursor += PAGE_SIZE;
29437 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29438 }
29439 return 1;
29440 }
29441 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29442 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29443 +{
29444 + return 0;
29445 +}
29446 #else
29447 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29448 {
29449 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29450
29451 while (count > 0) {
29452 unsigned long remaining;
29453 + char *temp;
29454
29455 sz = size_inside_page(p, count);
29456
29457 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29458 if (!ptr)
29459 return -EFAULT;
29460
29461 - remaining = copy_to_user(buf, ptr, sz);
29462 +#ifdef CONFIG_PAX_USERCOPY
29463 + temp = kmalloc(sz, GFP_KERNEL);
29464 + if (!temp) {
29465 + unxlate_dev_mem_ptr(p, ptr);
29466 + return -ENOMEM;
29467 + }
29468 + memcpy(temp, ptr, sz);
29469 +#else
29470 + temp = ptr;
29471 +#endif
29472 +
29473 + remaining = copy_to_user(buf, temp, sz);
29474 +
29475 +#ifdef CONFIG_PAX_USERCOPY
29476 + kfree(temp);
29477 +#endif
29478 +
29479 unxlate_dev_mem_ptr(p, ptr);
29480 if (remaining)
29481 return -EFAULT;
29482 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29483 size_t count, loff_t *ppos)
29484 {
29485 unsigned long p = *ppos;
29486 - ssize_t low_count, read, sz;
29487 + ssize_t low_count, read, sz, err = 0;
29488 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29489 - int err = 0;
29490
29491 read = 0;
29492 if (p < (unsigned long) high_memory) {
29493 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29494 }
29495 #endif
29496 while (low_count > 0) {
29497 + char *temp;
29498 +
29499 sz = size_inside_page(p, low_count);
29500
29501 /*
29502 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29503 */
29504 kbuf = xlate_dev_kmem_ptr((char *)p);
29505
29506 - if (copy_to_user(buf, kbuf, sz))
29507 +#ifdef CONFIG_PAX_USERCOPY
29508 + temp = kmalloc(sz, GFP_KERNEL);
29509 + if (!temp)
29510 + return -ENOMEM;
29511 + memcpy(temp, kbuf, sz);
29512 +#else
29513 + temp = kbuf;
29514 +#endif
29515 +
29516 + err = copy_to_user(buf, temp, sz);
29517 +
29518 +#ifdef CONFIG_PAX_USERCOPY
29519 + kfree(temp);
29520 +#endif
29521 +
29522 + if (err)
29523 return -EFAULT;
29524 buf += sz;
29525 p += sz;
29526 @@ -867,6 +914,9 @@ static const struct memdev {
29527 #ifdef CONFIG_CRASH_DUMP
29528 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29529 #endif
29530 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29531 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29532 +#endif
29533 };
29534
29535 static int memory_open(struct inode *inode, struct file *filp)
29536 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29537 index 9df78e2..01ba9ae 100644
29538 --- a/drivers/char/nvram.c
29539 +++ b/drivers/char/nvram.c
29540 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29541
29542 spin_unlock_irq(&rtc_lock);
29543
29544 - if (copy_to_user(buf, contents, tmp - contents))
29545 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29546 return -EFAULT;
29547
29548 *ppos = i;
29549 diff --git a/drivers/char/random.c b/drivers/char/random.c
29550 index 4ec04a7..4a092ed 100644
29551 --- a/drivers/char/random.c
29552 +++ b/drivers/char/random.c
29553 @@ -261,8 +261,13 @@
29554 /*
29555 * Configuration information
29556 */
29557 +#ifdef CONFIG_GRKERNSEC_RANDNET
29558 +#define INPUT_POOL_WORDS 512
29559 +#define OUTPUT_POOL_WORDS 128
29560 +#else
29561 #define INPUT_POOL_WORDS 128
29562 #define OUTPUT_POOL_WORDS 32
29563 +#endif
29564 #define SEC_XFER_SIZE 512
29565 #define EXTRACT_SIZE 10
29566
29567 @@ -300,10 +305,17 @@ static struct poolinfo {
29568 int poolwords;
29569 int tap1, tap2, tap3, tap4, tap5;
29570 } poolinfo_table[] = {
29571 +#ifdef CONFIG_GRKERNSEC_RANDNET
29572 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29573 + { 512, 411, 308, 208, 104, 1 },
29574 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29575 + { 128, 103, 76, 51, 25, 1 },
29576 +#else
29577 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29578 { 128, 103, 76, 51, 25, 1 },
29579 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29580 { 32, 26, 20, 14, 7, 1 },
29581 +#endif
29582 #if 0
29583 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29584 { 2048, 1638, 1231, 819, 411, 1 },
29585 @@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29586
29587 extract_buf(r, tmp);
29588 i = min_t(int, nbytes, EXTRACT_SIZE);
29589 - if (copy_to_user(buf, tmp, i)) {
29590 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29591 ret = -EFAULT;
29592 break;
29593 }
29594 @@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29595 #include <linux/sysctl.h>
29596
29597 static int min_read_thresh = 8, min_write_thresh;
29598 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29599 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29600 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29601 static char sysctl_bootid[16];
29602
29603 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29604 index 45713f0..8286d21 100644
29605 --- a/drivers/char/sonypi.c
29606 +++ b/drivers/char/sonypi.c
29607 @@ -54,6 +54,7 @@
29608
29609 #include <asm/uaccess.h>
29610 #include <asm/io.h>
29611 +#include <asm/local.h>
29612
29613 #include <linux/sonypi.h>
29614
29615 @@ -490,7 +491,7 @@ static struct sonypi_device {
29616 spinlock_t fifo_lock;
29617 wait_queue_head_t fifo_proc_list;
29618 struct fasync_struct *fifo_async;
29619 - int open_count;
29620 + local_t open_count;
29621 int model;
29622 struct input_dev *input_jog_dev;
29623 struct input_dev *input_key_dev;
29624 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29625 static int sonypi_misc_release(struct inode *inode, struct file *file)
29626 {
29627 mutex_lock(&sonypi_device.lock);
29628 - sonypi_device.open_count--;
29629 + local_dec(&sonypi_device.open_count);
29630 mutex_unlock(&sonypi_device.lock);
29631 return 0;
29632 }
29633 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29634 {
29635 mutex_lock(&sonypi_device.lock);
29636 /* Flush input queue on first open */
29637 - if (!sonypi_device.open_count)
29638 + if (!local_read(&sonypi_device.open_count))
29639 kfifo_reset(&sonypi_device.fifo);
29640 - sonypi_device.open_count++;
29641 + local_inc(&sonypi_device.open_count);
29642 mutex_unlock(&sonypi_device.lock);
29643
29644 return 0;
29645 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29646 index ad7c732..5aa8054 100644
29647 --- a/drivers/char/tpm/tpm.c
29648 +++ b/drivers/char/tpm/tpm.c
29649 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29650 chip->vendor.req_complete_val)
29651 goto out_recv;
29652
29653 - if ((status == chip->vendor.req_canceled)) {
29654 + if (status == chip->vendor.req_canceled) {
29655 dev_err(chip->dev, "Operation Canceled\n");
29656 rc = -ECANCELED;
29657 goto out;
29658 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29659 index 0636520..169c1d0 100644
29660 --- a/drivers/char/tpm/tpm_bios.c
29661 +++ b/drivers/char/tpm/tpm_bios.c
29662 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29663 event = addr;
29664
29665 if ((event->event_type == 0 && event->event_size == 0) ||
29666 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29667 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29668 return NULL;
29669
29670 return addr;
29671 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29672 return NULL;
29673
29674 if ((event->event_type == 0 && event->event_size == 0) ||
29675 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29676 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29677 return NULL;
29678
29679 (*pos)++;
29680 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29681 int i;
29682
29683 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29684 - seq_putc(m, data[i]);
29685 + if (!seq_putc(m, data[i]))
29686 + return -EFAULT;
29687
29688 return 0;
29689 }
29690 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29691 log->bios_event_log_end = log->bios_event_log + len;
29692
29693 virt = acpi_os_map_memory(start, len);
29694 + if (!virt) {
29695 + kfree(log->bios_event_log);
29696 + log->bios_event_log = NULL;
29697 + return -EFAULT;
29698 + }
29699
29700 - memcpy(log->bios_event_log, virt, len);
29701 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29702
29703 acpi_os_unmap_memory(virt, len);
29704 return 0;
29705 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29706 index cdf2f54..e55c197 100644
29707 --- a/drivers/char/virtio_console.c
29708 +++ b/drivers/char/virtio_console.c
29709 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29710 if (to_user) {
29711 ssize_t ret;
29712
29713 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29714 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29715 if (ret)
29716 return -EFAULT;
29717 } else {
29718 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29719 if (!port_has_data(port) && !port->host_connected)
29720 return 0;
29721
29722 - return fill_readbuf(port, ubuf, count, true);
29723 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29724 }
29725
29726 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29727 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29728 index 97f5064..202b6e6 100644
29729 --- a/drivers/edac/edac_pci_sysfs.c
29730 +++ b/drivers/edac/edac_pci_sysfs.c
29731 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29732 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29733 static int edac_pci_poll_msec = 1000; /* one second workq period */
29734
29735 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
29736 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29737 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29738 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29739
29740 static struct kobject *edac_pci_top_main_kobj;
29741 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29742 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29743 edac_printk(KERN_CRIT, EDAC_PCI,
29744 "Signaled System Error on %s\n",
29745 pci_name(dev));
29746 - atomic_inc(&pci_nonparity_count);
29747 + atomic_inc_unchecked(&pci_nonparity_count);
29748 }
29749
29750 if (status & (PCI_STATUS_PARITY)) {
29751 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29752 "Master Data Parity Error on %s\n",
29753 pci_name(dev));
29754
29755 - atomic_inc(&pci_parity_count);
29756 + atomic_inc_unchecked(&pci_parity_count);
29757 }
29758
29759 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29760 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29761 "Detected Parity Error on %s\n",
29762 pci_name(dev));
29763
29764 - atomic_inc(&pci_parity_count);
29765 + atomic_inc_unchecked(&pci_parity_count);
29766 }
29767 }
29768
29769 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29770 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29771 "Signaled System Error on %s\n",
29772 pci_name(dev));
29773 - atomic_inc(&pci_nonparity_count);
29774 + atomic_inc_unchecked(&pci_nonparity_count);
29775 }
29776
29777 if (status & (PCI_STATUS_PARITY)) {
29778 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29779 "Master Data Parity Error on "
29780 "%s\n", pci_name(dev));
29781
29782 - atomic_inc(&pci_parity_count);
29783 + atomic_inc_unchecked(&pci_parity_count);
29784 }
29785
29786 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29787 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29788 "Detected Parity Error on %s\n",
29789 pci_name(dev));
29790
29791 - atomic_inc(&pci_parity_count);
29792 + atomic_inc_unchecked(&pci_parity_count);
29793 }
29794 }
29795 }
29796 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29797 if (!check_pci_errors)
29798 return;
29799
29800 - before_count = atomic_read(&pci_parity_count);
29801 + before_count = atomic_read_unchecked(&pci_parity_count);
29802
29803 /* scan all PCI devices looking for a Parity Error on devices and
29804 * bridges.
29805 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29806 /* Only if operator has selected panic on PCI Error */
29807 if (edac_pci_get_panic_on_pe()) {
29808 /* If the count is different 'after' from 'before' */
29809 - if (before_count != atomic_read(&pci_parity_count))
29810 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29811 panic("EDAC: PCI Parity Error");
29812 }
29813 }
29814 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29815 index c6074c5..88a9e2e 100644
29816 --- a/drivers/edac/mce_amd.h
29817 +++ b/drivers/edac/mce_amd.h
29818 @@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
29819 struct amd_decoder_ops {
29820 bool (*dc_mce)(u16, u8);
29821 bool (*ic_mce)(u16, u8);
29822 -};
29823 +} __no_const;
29824
29825 void amd_report_gart_errors(bool);
29826 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29827 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29828 index cc595eb..4ec702a 100644
29829 --- a/drivers/firewire/core-card.c
29830 +++ b/drivers/firewire/core-card.c
29831 @@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
29832
29833 void fw_core_remove_card(struct fw_card *card)
29834 {
29835 - struct fw_card_driver dummy_driver = dummy_driver_template;
29836 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29837
29838 card->driver->update_phy_reg(card, 4,
29839 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29840 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29841 index 2e6b245..c3857d9 100644
29842 --- a/drivers/firewire/core-cdev.c
29843 +++ b/drivers/firewire/core-cdev.c
29844 @@ -1341,8 +1341,7 @@ static int init_iso_resource(struct client *client,
29845 int ret;
29846
29847 if ((request->channels == 0 && request->bandwidth == 0) ||
29848 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29849 - request->bandwidth < 0)
29850 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29851 return -EINVAL;
29852
29853 r = kmalloc(sizeof(*r), GFP_KERNEL);
29854 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29855 index dea2dcc..a4fb978 100644
29856 --- a/drivers/firewire/core-transaction.c
29857 +++ b/drivers/firewire/core-transaction.c
29858 @@ -37,6 +37,7 @@
29859 #include <linux/timer.h>
29860 #include <linux/types.h>
29861 #include <linux/workqueue.h>
29862 +#include <linux/sched.h>
29863
29864 #include <asm/byteorder.h>
29865
29866 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29867 index 9047f55..e47c7ff 100644
29868 --- a/drivers/firewire/core.h
29869 +++ b/drivers/firewire/core.h
29870 @@ -110,6 +110,7 @@ struct fw_card_driver {
29871
29872 int (*stop_iso)(struct fw_iso_context *ctx);
29873 };
29874 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29875
29876 void fw_card_initialize(struct fw_card *card,
29877 const struct fw_card_driver *driver, struct device *device);
29878 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29879 index 153980b..4b4d046 100644
29880 --- a/drivers/firmware/dmi_scan.c
29881 +++ b/drivers/firmware/dmi_scan.c
29882 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29883 }
29884 }
29885 else {
29886 - /*
29887 - * no iounmap() for that ioremap(); it would be a no-op, but
29888 - * it's so early in setup that sucker gets confused into doing
29889 - * what it shouldn't if we actually call it.
29890 - */
29891 p = dmi_ioremap(0xF0000, 0x10000);
29892 if (p == NULL)
29893 goto error;
29894 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29895 if (buf == NULL)
29896 return -1;
29897
29898 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29899 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29900
29901 iounmap(buf);
29902 return 0;
29903 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29904 index 82d5c20..44a7177 100644
29905 --- a/drivers/gpio/gpio-vr41xx.c
29906 +++ b/drivers/gpio/gpio-vr41xx.c
29907 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29908 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29909 maskl, pendl, maskh, pendh);
29910
29911 - atomic_inc(&irq_err_count);
29912 + atomic_inc_unchecked(&irq_err_count);
29913
29914 return -EINVAL;
29915 }
29916 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29917 index 8111889..367b253 100644
29918 --- a/drivers/gpu/drm/drm_crtc_helper.c
29919 +++ b/drivers/gpu/drm/drm_crtc_helper.c
29920 @@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29921 struct drm_crtc *tmp;
29922 int crtc_mask = 1;
29923
29924 - WARN(!crtc, "checking null crtc?\n");
29925 + BUG_ON(!crtc);
29926
29927 dev = crtc->dev;
29928
29929 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29930 index 6116e3b..c29dd16 100644
29931 --- a/drivers/gpu/drm/drm_drv.c
29932 +++ b/drivers/gpu/drm/drm_drv.c
29933 @@ -316,7 +316,7 @@ module_exit(drm_core_exit);
29934 /**
29935 * Copy and IOCTL return string to user space
29936 */
29937 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29938 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29939 {
29940 int len;
29941
29942 @@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
29943 return -ENODEV;
29944
29945 atomic_inc(&dev->ioctl_count);
29946 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29947 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29948 ++file_priv->ioctl_count;
29949
29950 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29951 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29952 index 123de28..43a0897 100644
29953 --- a/drivers/gpu/drm/drm_fops.c
29954 +++ b/drivers/gpu/drm/drm_fops.c
29955 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29956 }
29957
29958 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29959 - atomic_set(&dev->counts[i], 0);
29960 + atomic_set_unchecked(&dev->counts[i], 0);
29961
29962 dev->sigdata.lock = NULL;
29963
29964 @@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
29965
29966 retcode = drm_open_helper(inode, filp, dev);
29967 if (!retcode) {
29968 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29969 - if (!dev->open_count++)
29970 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29971 + if (local_inc_return(&dev->open_count) == 1)
29972 retcode = drm_setup(dev);
29973 }
29974 if (!retcode) {
29975 @@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
29976
29977 mutex_lock(&drm_global_mutex);
29978
29979 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29980 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
29981
29982 if (dev->driver->preclose)
29983 dev->driver->preclose(dev, file_priv);
29984 @@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
29985 * Begin inline drm_release
29986 */
29987
29988 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29989 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
29990 task_pid_nr(current),
29991 (long)old_encode_dev(file_priv->minor->device),
29992 - dev->open_count);
29993 + local_read(&dev->open_count));
29994
29995 /* Release any auth tokens that might point to this file_priv,
29996 (do that under the drm_global_mutex) */
29997 @@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
29998 * End inline drm_release
29999 */
30000
30001 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30002 - if (!--dev->open_count) {
30003 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30004 + if (local_dec_and_test(&dev->open_count)) {
30005 if (atomic_read(&dev->ioctl_count)) {
30006 DRM_ERROR("Device busy: %d\n",
30007 atomic_read(&dev->ioctl_count));
30008 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30009 index c87dc96..326055d 100644
30010 --- a/drivers/gpu/drm/drm_global.c
30011 +++ b/drivers/gpu/drm/drm_global.c
30012 @@ -36,7 +36,7 @@
30013 struct drm_global_item {
30014 struct mutex mutex;
30015 void *object;
30016 - int refcount;
30017 + atomic_t refcount;
30018 };
30019
30020 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30021 @@ -49,7 +49,7 @@ void drm_global_init(void)
30022 struct drm_global_item *item = &glob[i];
30023 mutex_init(&item->mutex);
30024 item->object = NULL;
30025 - item->refcount = 0;
30026 + atomic_set(&item->refcount, 0);
30027 }
30028 }
30029
30030 @@ -59,7 +59,7 @@ void drm_global_release(void)
30031 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30032 struct drm_global_item *item = &glob[i];
30033 BUG_ON(item->object != NULL);
30034 - BUG_ON(item->refcount != 0);
30035 + BUG_ON(atomic_read(&item->refcount) != 0);
30036 }
30037 }
30038
30039 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30040 void *object;
30041
30042 mutex_lock(&item->mutex);
30043 - if (item->refcount == 0) {
30044 + if (atomic_read(&item->refcount) == 0) {
30045 item->object = kzalloc(ref->size, GFP_KERNEL);
30046 if (unlikely(item->object == NULL)) {
30047 ret = -ENOMEM;
30048 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30049 goto out_err;
30050
30051 }
30052 - ++item->refcount;
30053 + atomic_inc(&item->refcount);
30054 ref->object = item->object;
30055 object = item->object;
30056 mutex_unlock(&item->mutex);
30057 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30058 struct drm_global_item *item = &glob[ref->global_type];
30059
30060 mutex_lock(&item->mutex);
30061 - BUG_ON(item->refcount == 0);
30062 + BUG_ON(atomic_read(&item->refcount) == 0);
30063 BUG_ON(ref->object != item->object);
30064 - if (--item->refcount == 0) {
30065 + if (atomic_dec_and_test(&item->refcount)) {
30066 ref->release(ref);
30067 item->object = NULL;
30068 }
30069 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30070 index ab1162d..42587b2 100644
30071 --- a/drivers/gpu/drm/drm_info.c
30072 +++ b/drivers/gpu/drm/drm_info.c
30073 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30074 struct drm_local_map *map;
30075 struct drm_map_list *r_list;
30076
30077 - /* Hardcoded from _DRM_FRAME_BUFFER,
30078 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30079 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30080 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30081 + static const char * const types[] = {
30082 + [_DRM_FRAME_BUFFER] = "FB",
30083 + [_DRM_REGISTERS] = "REG",
30084 + [_DRM_SHM] = "SHM",
30085 + [_DRM_AGP] = "AGP",
30086 + [_DRM_SCATTER_GATHER] = "SG",
30087 + [_DRM_CONSISTENT] = "PCI",
30088 + [_DRM_GEM] = "GEM" };
30089 const char *type;
30090 int i;
30091
30092 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30093 map = r_list->map;
30094 if (!map)
30095 continue;
30096 - if (map->type < 0 || map->type > 5)
30097 + if (map->type >= ARRAY_SIZE(types))
30098 type = "??";
30099 else
30100 type = types[map->type];
30101 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30102 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30103 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30104 vma->vm_flags & VM_IO ? 'i' : '-',
30105 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30106 + 0);
30107 +#else
30108 vma->vm_pgoff);
30109 +#endif
30110
30111 #if defined(__i386__)
30112 pgprot = pgprot_val(vma->vm_page_prot);
30113 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30114 index 637fcc3..e890b33 100644
30115 --- a/drivers/gpu/drm/drm_ioc32.c
30116 +++ b/drivers/gpu/drm/drm_ioc32.c
30117 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30118 request = compat_alloc_user_space(nbytes);
30119 if (!access_ok(VERIFY_WRITE, request, nbytes))
30120 return -EFAULT;
30121 - list = (struct drm_buf_desc *) (request + 1);
30122 + list = (struct drm_buf_desc __user *) (request + 1);
30123
30124 if (__put_user(count, &request->count)
30125 || __put_user(list, &request->list))
30126 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30127 request = compat_alloc_user_space(nbytes);
30128 if (!access_ok(VERIFY_WRITE, request, nbytes))
30129 return -EFAULT;
30130 - list = (struct drm_buf_pub *) (request + 1);
30131 + list = (struct drm_buf_pub __user *) (request + 1);
30132
30133 if (__put_user(count, &request->count)
30134 || __put_user(list, &request->list))
30135 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30136 index cf85155..f2665cb 100644
30137 --- a/drivers/gpu/drm/drm_ioctl.c
30138 +++ b/drivers/gpu/drm/drm_ioctl.c
30139 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30140 stats->data[i].value =
30141 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30142 else
30143 - stats->data[i].value = atomic_read(&dev->counts[i]);
30144 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30145 stats->data[i].type = dev->types[i];
30146 }
30147
30148 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30149 index c79c713..2048588 100644
30150 --- a/drivers/gpu/drm/drm_lock.c
30151 +++ b/drivers/gpu/drm/drm_lock.c
30152 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30153 if (drm_lock_take(&master->lock, lock->context)) {
30154 master->lock.file_priv = file_priv;
30155 master->lock.lock_time = jiffies;
30156 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30157 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30158 break; /* Got lock */
30159 }
30160
30161 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30162 return -EINVAL;
30163 }
30164
30165 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30166 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30167
30168 if (drm_lock_free(&master->lock, lock->context)) {
30169 /* FIXME: Should really bail out here. */
30170 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30171 index aa454f8..6d38580 100644
30172 --- a/drivers/gpu/drm/drm_stub.c
30173 +++ b/drivers/gpu/drm/drm_stub.c
30174 @@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
30175
30176 drm_device_set_unplugged(dev);
30177
30178 - if (dev->open_count == 0) {
30179 + if (local_read(&dev->open_count) == 0) {
30180 drm_put_dev(dev);
30181 }
30182 mutex_unlock(&drm_global_mutex);
30183 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30184 index f920fb5..001c52d 100644
30185 --- a/drivers/gpu/drm/i810/i810_dma.c
30186 +++ b/drivers/gpu/drm/i810/i810_dma.c
30187 @@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30188 dma->buflist[vertex->idx],
30189 vertex->discard, vertex->used);
30190
30191 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30192 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30193 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30194 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30195 sarea_priv->last_enqueue = dev_priv->counter - 1;
30196 sarea_priv->last_dispatch = (int)hw_status[5];
30197
30198 @@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30199 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30200 mc->last_render);
30201
30202 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30203 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30204 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30205 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30206 sarea_priv->last_enqueue = dev_priv->counter - 1;
30207 sarea_priv->last_dispatch = (int)hw_status[5];
30208
30209 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30210 index c9339f4..f5e1b9d 100644
30211 --- a/drivers/gpu/drm/i810/i810_drv.h
30212 +++ b/drivers/gpu/drm/i810/i810_drv.h
30213 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30214 int page_flipping;
30215
30216 wait_queue_head_t irq_queue;
30217 - atomic_t irq_received;
30218 - atomic_t irq_emitted;
30219 + atomic_unchecked_t irq_received;
30220 + atomic_unchecked_t irq_emitted;
30221
30222 int front_offset;
30223 } drm_i810_private_t;
30224 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30225 index e6162a1..b2ff486 100644
30226 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30227 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30228 @@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30229 I915_READ(GTIMR));
30230 }
30231 seq_printf(m, "Interrupts received: %d\n",
30232 - atomic_read(&dev_priv->irq_received));
30233 + atomic_read_unchecked(&dev_priv->irq_received));
30234 for (i = 0; i < I915_NUM_RINGS; i++) {
30235 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30236 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30237 @@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30238 return ret;
30239
30240 if (opregion->header)
30241 - seq_write(m, opregion->header, OPREGION_SIZE);
30242 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30243
30244 mutex_unlock(&dev->struct_mutex);
30245
30246 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30247 index ba60f3c..e2dff7f 100644
30248 --- a/drivers/gpu/drm/i915/i915_dma.c
30249 +++ b/drivers/gpu/drm/i915/i915_dma.c
30250 @@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30251 bool can_switch;
30252
30253 spin_lock(&dev->count_lock);
30254 - can_switch = (dev->open_count == 0);
30255 + can_switch = (local_read(&dev->open_count) == 0);
30256 spin_unlock(&dev->count_lock);
30257 return can_switch;
30258 }
30259 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30260 index 5fabc6c..0b08aa1 100644
30261 --- a/drivers/gpu/drm/i915/i915_drv.h
30262 +++ b/drivers/gpu/drm/i915/i915_drv.h
30263 @@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
30264 /* render clock increase/decrease */
30265 /* display clock increase/decrease */
30266 /* pll clock increase/decrease */
30267 -};
30268 +} __no_const;
30269
30270 struct intel_device_info {
30271 u8 gen;
30272 @@ -350,7 +350,7 @@ typedef struct drm_i915_private {
30273 int current_page;
30274 int page_flipping;
30275
30276 - atomic_t irq_received;
30277 + atomic_unchecked_t irq_received;
30278
30279 /* protects the irq masks */
30280 spinlock_t irq_lock;
30281 @@ -937,7 +937,7 @@ struct drm_i915_gem_object {
30282 * will be page flipped away on the next vblank. When it
30283 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30284 */
30285 - atomic_t pending_flip;
30286 + atomic_unchecked_t pending_flip;
30287 };
30288
30289 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30290 @@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30291 extern void intel_teardown_gmbus(struct drm_device *dev);
30292 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30293 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30294 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30295 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30296 {
30297 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30298 }
30299 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30300 index de43194..a14c4cc 100644
30301 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30302 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30303 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30304 i915_gem_clflush_object(obj);
30305
30306 if (obj->base.pending_write_domain)
30307 - cd->flips |= atomic_read(&obj->pending_flip);
30308 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30309
30310 /* The actual obj->write_domain will be updated with
30311 * pending_write_domain after we emit the accumulated flush for all
30312 @@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30313
30314 static int
30315 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30316 - int count)
30317 + unsigned int count)
30318 {
30319 - int i;
30320 + unsigned int i;
30321
30322 for (i = 0; i < count; i++) {
30323 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30324 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30325 index f57e5cf..c82f79d 100644
30326 --- a/drivers/gpu/drm/i915/i915_irq.c
30327 +++ b/drivers/gpu/drm/i915/i915_irq.c
30328 @@ -472,7 +472,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30329 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30330 struct drm_i915_master_private *master_priv;
30331
30332 - atomic_inc(&dev_priv->irq_received);
30333 + atomic_inc_unchecked(&dev_priv->irq_received);
30334
30335 /* disable master interrupt before clearing iir */
30336 de_ier = I915_READ(DEIER);
30337 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30338 struct drm_i915_master_private *master_priv;
30339 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30340
30341 - atomic_inc(&dev_priv->irq_received);
30342 + atomic_inc_unchecked(&dev_priv->irq_received);
30343
30344 if (IS_GEN6(dev))
30345 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30346 @@ -1292,7 +1292,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30347 int ret = IRQ_NONE, pipe;
30348 bool blc_event = false;
30349
30350 - atomic_inc(&dev_priv->irq_received);
30351 + atomic_inc_unchecked(&dev_priv->irq_received);
30352
30353 iir = I915_READ(IIR);
30354
30355 @@ -1803,7 +1803,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30356 {
30357 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30358
30359 - atomic_set(&dev_priv->irq_received, 0);
30360 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30361
30362 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30363 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30364 @@ -1980,7 +1980,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30365 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30366 int pipe;
30367
30368 - atomic_set(&dev_priv->irq_received, 0);
30369 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30370
30371 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30372 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30373 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30374 index 79a7de1..56f2f3e 100644
30375 --- a/drivers/gpu/drm/i915/intel_display.c
30376 +++ b/drivers/gpu/drm/i915/intel_display.c
30377 @@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30378
30379 wait_event(dev_priv->pending_flip_queue,
30380 atomic_read(&dev_priv->mm.wedged) ||
30381 - atomic_read(&obj->pending_flip) == 0);
30382 + atomic_read_unchecked(&obj->pending_flip) == 0);
30383
30384 /* Big Hammer, we also need to ensure that any pending
30385 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30386 @@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30387 obj = to_intel_framebuffer(crtc->fb)->obj;
30388 dev_priv = crtc->dev->dev_private;
30389 wait_event(dev_priv->pending_flip_queue,
30390 - atomic_read(&obj->pending_flip) == 0);
30391 + atomic_read_unchecked(&obj->pending_flip) == 0);
30392 }
30393
30394 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30395 @@ -7286,7 +7286,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30396
30397 atomic_clear_mask(1 << intel_crtc->plane,
30398 &obj->pending_flip.counter);
30399 - if (atomic_read(&obj->pending_flip) == 0)
30400 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30401 wake_up(&dev_priv->pending_flip_queue);
30402
30403 schedule_work(&work->work);
30404 @@ -7582,7 +7582,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30405 /* Block clients from rendering to the new back buffer until
30406 * the flip occurs and the object is no longer visible.
30407 */
30408 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30409 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30410
30411 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30412 if (ret)
30413 @@ -7596,7 +7596,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30414 return 0;
30415
30416 cleanup_pending:
30417 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30418 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30419 drm_gem_object_unreference(&work->old_fb_obj->base);
30420 drm_gem_object_unreference(&obj->base);
30421 mutex_unlock(&dev->struct_mutex);
30422 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30423 index 54558a0..2d97005 100644
30424 --- a/drivers/gpu/drm/mga/mga_drv.h
30425 +++ b/drivers/gpu/drm/mga/mga_drv.h
30426 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30427 u32 clear_cmd;
30428 u32 maccess;
30429
30430 - atomic_t vbl_received; /**< Number of vblanks received. */
30431 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30432 wait_queue_head_t fence_queue;
30433 - atomic_t last_fence_retired;
30434 + atomic_unchecked_t last_fence_retired;
30435 u32 next_fence_to_post;
30436
30437 unsigned int fb_cpp;
30438 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30439 index 2581202..f230a8d9 100644
30440 --- a/drivers/gpu/drm/mga/mga_irq.c
30441 +++ b/drivers/gpu/drm/mga/mga_irq.c
30442 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30443 if (crtc != 0)
30444 return 0;
30445
30446 - return atomic_read(&dev_priv->vbl_received);
30447 + return atomic_read_unchecked(&dev_priv->vbl_received);
30448 }
30449
30450
30451 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30452 /* VBLANK interrupt */
30453 if (status & MGA_VLINEPEN) {
30454 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30455 - atomic_inc(&dev_priv->vbl_received);
30456 + atomic_inc_unchecked(&dev_priv->vbl_received);
30457 drm_handle_vblank(dev, 0);
30458 handled = 1;
30459 }
30460 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30461 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30462 MGA_WRITE(MGA_PRIMEND, prim_end);
30463
30464 - atomic_inc(&dev_priv->last_fence_retired);
30465 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30466 DRM_WAKEUP(&dev_priv->fence_queue);
30467 handled = 1;
30468 }
30469 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30470 * using fences.
30471 */
30472 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30473 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30474 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30475 - *sequence) <= (1 << 23)));
30476
30477 *sequence = cur_fence;
30478 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30479 index 0be4a81..7464804 100644
30480 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30481 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30482 @@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30483 struct bit_table {
30484 const char id;
30485 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30486 -};
30487 +} __no_const;
30488
30489 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30490
30491 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30492 index 3aef353..0ad1322 100644
30493 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30494 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30495 @@ -240,7 +240,7 @@ struct nouveau_channel {
30496 struct list_head pending;
30497 uint32_t sequence;
30498 uint32_t sequence_ack;
30499 - atomic_t last_sequence_irq;
30500 + atomic_unchecked_t last_sequence_irq;
30501 struct nouveau_vma vma;
30502 } fence;
30503
30504 @@ -321,7 +321,7 @@ struct nouveau_exec_engine {
30505 u32 handle, u16 class);
30506 void (*set_tile_region)(struct drm_device *dev, int i);
30507 void (*tlb_flush)(struct drm_device *, int engine);
30508 -};
30509 +} __no_const;
30510
30511 struct nouveau_instmem_engine {
30512 void *priv;
30513 @@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
30514 struct nouveau_mc_engine {
30515 int (*init)(struct drm_device *dev);
30516 void (*takedown)(struct drm_device *dev);
30517 -};
30518 +} __no_const;
30519
30520 struct nouveau_timer_engine {
30521 int (*init)(struct drm_device *dev);
30522 void (*takedown)(struct drm_device *dev);
30523 uint64_t (*read)(struct drm_device *dev);
30524 -};
30525 +} __no_const;
30526
30527 struct nouveau_fb_engine {
30528 int num_tiles;
30529 @@ -590,7 +590,7 @@ struct nouveau_vram_engine {
30530 void (*put)(struct drm_device *, struct nouveau_mem **);
30531
30532 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30533 -};
30534 +} __no_const;
30535
30536 struct nouveau_engine {
30537 struct nouveau_instmem_engine instmem;
30538 @@ -739,7 +739,7 @@ struct drm_nouveau_private {
30539 struct drm_global_reference mem_global_ref;
30540 struct ttm_bo_global_ref bo_global_ref;
30541 struct ttm_bo_device bdev;
30542 - atomic_t validate_sequence;
30543 + atomic_unchecked_t validate_sequence;
30544 } ttm;
30545
30546 struct {
30547 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30548 index c1dc20f..4df673c 100644
30549 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30550 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30551 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30552 if (USE_REFCNT(dev))
30553 sequence = nvchan_rd32(chan, 0x48);
30554 else
30555 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30556 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30557
30558 if (chan->fence.sequence_ack == sequence)
30559 goto out;
30560 @@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30561 return ret;
30562 }
30563
30564 - atomic_set(&chan->fence.last_sequence_irq, 0);
30565 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30566 return 0;
30567 }
30568
30569 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30570 index ed52a6f..484acdc 100644
30571 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30572 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30573 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30574 int trycnt = 0;
30575 int ret, i;
30576
30577 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30578 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30579 retry:
30580 if (++trycnt > 100000) {
30581 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30582 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30583 index c2a8511..4b996f9 100644
30584 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30585 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30586 @@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30587 bool can_switch;
30588
30589 spin_lock(&dev->count_lock);
30590 - can_switch = (dev->open_count == 0);
30591 + can_switch = (local_read(&dev->open_count) == 0);
30592 spin_unlock(&dev->count_lock);
30593 return can_switch;
30594 }
30595 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30596 index dbdea8e..cd6eeeb 100644
30597 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30598 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30599 @@ -554,7 +554,7 @@ static int
30600 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30601 u32 class, u32 mthd, u32 data)
30602 {
30603 - atomic_set(&chan->fence.last_sequence_irq, data);
30604 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30605 return 0;
30606 }
30607
30608 diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30609 index 2746402..c8dc4a4 100644
30610 --- a/drivers/gpu/drm/nouveau/nv50_sor.c
30611 +++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30612 @@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30613 }
30614
30615 if (nv_encoder->dcb->type == OUTPUT_DP) {
30616 - struct dp_train_func func = {
30617 + static struct dp_train_func func = {
30618 .link_set = nv50_sor_dp_link_set,
30619 .train_set = nv50_sor_dp_train_set,
30620 .train_adj = nv50_sor_dp_train_adj
30621 diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30622 index 0247250..d2f6aaf 100644
30623 --- a/drivers/gpu/drm/nouveau/nvd0_display.c
30624 +++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30625 @@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30626 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30627
30628 if (nv_encoder->dcb->type == OUTPUT_DP) {
30629 - struct dp_train_func func = {
30630 + static struct dp_train_func func = {
30631 .link_set = nvd0_sor_dp_link_set,
30632 .train_set = nvd0_sor_dp_train_set,
30633 .train_adj = nvd0_sor_dp_train_adj
30634 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30635 index bcac90b..53bfc76 100644
30636 --- a/drivers/gpu/drm/r128/r128_cce.c
30637 +++ b/drivers/gpu/drm/r128/r128_cce.c
30638 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30639
30640 /* GH: Simple idle check.
30641 */
30642 - atomic_set(&dev_priv->idle_count, 0);
30643 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30644
30645 /* We don't support anything other than bus-mastering ring mode,
30646 * but the ring can be in either AGP or PCI space for the ring
30647 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30648 index 930c71b..499aded 100644
30649 --- a/drivers/gpu/drm/r128/r128_drv.h
30650 +++ b/drivers/gpu/drm/r128/r128_drv.h
30651 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30652 int is_pci;
30653 unsigned long cce_buffers_offset;
30654
30655 - atomic_t idle_count;
30656 + atomic_unchecked_t idle_count;
30657
30658 int page_flipping;
30659 int current_page;
30660 u32 crtc_offset;
30661 u32 crtc_offset_cntl;
30662
30663 - atomic_t vbl_received;
30664 + atomic_unchecked_t vbl_received;
30665
30666 u32 color_fmt;
30667 unsigned int front_offset;
30668 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30669 index 429d5a0..7e899ed 100644
30670 --- a/drivers/gpu/drm/r128/r128_irq.c
30671 +++ b/drivers/gpu/drm/r128/r128_irq.c
30672 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30673 if (crtc != 0)
30674 return 0;
30675
30676 - return atomic_read(&dev_priv->vbl_received);
30677 + return atomic_read_unchecked(&dev_priv->vbl_received);
30678 }
30679
30680 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30681 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30682 /* VBLANK interrupt */
30683 if (status & R128_CRTC_VBLANK_INT) {
30684 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30685 - atomic_inc(&dev_priv->vbl_received);
30686 + atomic_inc_unchecked(&dev_priv->vbl_received);
30687 drm_handle_vblank(dev, 0);
30688 return IRQ_HANDLED;
30689 }
30690 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30691 index a9e33ce..09edd4b 100644
30692 --- a/drivers/gpu/drm/r128/r128_state.c
30693 +++ b/drivers/gpu/drm/r128/r128_state.c
30694 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30695
30696 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30697 {
30698 - if (atomic_read(&dev_priv->idle_count) == 0)
30699 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30700 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30701 else
30702 - atomic_set(&dev_priv->idle_count, 0);
30703 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30704 }
30705
30706 #endif
30707 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30708 index 5a82b6b..9e69c73 100644
30709 --- a/drivers/gpu/drm/radeon/mkregtable.c
30710 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30711 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30712 regex_t mask_rex;
30713 regmatch_t match[4];
30714 char buf[1024];
30715 - size_t end;
30716 + long end;
30717 int len;
30718 int done = 0;
30719 int r;
30720 unsigned o;
30721 struct offset *offset;
30722 char last_reg_s[10];
30723 - int last_reg;
30724 + unsigned long last_reg;
30725
30726 if (regcomp
30727 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30728 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30729 index 138b952..d74f9cb 100644
30730 --- a/drivers/gpu/drm/radeon/radeon.h
30731 +++ b/drivers/gpu/drm/radeon/radeon.h
30732 @@ -253,7 +253,7 @@ struct radeon_fence_driver {
30733 uint32_t scratch_reg;
30734 uint64_t gpu_addr;
30735 volatile uint32_t *cpu_addr;
30736 - atomic_t seq;
30737 + atomic_unchecked_t seq;
30738 uint32_t last_seq;
30739 unsigned long last_jiffies;
30740 unsigned long last_timeout;
30741 @@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
30742 int x2, int y2);
30743 void (*draw_auto)(struct radeon_device *rdev);
30744 void (*set_default_state)(struct radeon_device *rdev);
30745 -};
30746 +} __no_const;
30747
30748 struct r600_blit {
30749 struct mutex mutex;
30750 @@ -1246,7 +1246,7 @@ struct radeon_asic {
30751 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30752 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30753 } pflip;
30754 -};
30755 +} __no_const;
30756
30757 /*
30758 * Asic structures
30759 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30760 index 5992502..c19c633 100644
30761 --- a/drivers/gpu/drm/radeon/radeon_device.c
30762 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30763 @@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30764 bool can_switch;
30765
30766 spin_lock(&dev->count_lock);
30767 - can_switch = (dev->open_count == 0);
30768 + can_switch = (local_read(&dev->open_count) == 0);
30769 spin_unlock(&dev->count_lock);
30770 return can_switch;
30771 }
30772 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30773 index a1b59ca..86f2d44 100644
30774 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30775 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30776 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30777
30778 /* SW interrupt */
30779 wait_queue_head_t swi_queue;
30780 - atomic_t swi_emitted;
30781 + atomic_unchecked_t swi_emitted;
30782 int vblank_crtc;
30783 uint32_t irq_enable_reg;
30784 uint32_t r500_disp_irq_reg;
30785 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30786 index 4bd36a3..e66fe9c 100644
30787 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30788 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30789 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30790 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30791 return 0;
30792 }
30793 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30794 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30795 if (!rdev->ring[fence->ring].ready)
30796 /* FIXME: cp is not running assume everythings is done right
30797 * away
30798 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30799 }
30800 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30801 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30802 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30803 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30804 rdev->fence_drv[ring].initialized = true;
30805 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30806 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30807 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30808 rdev->fence_drv[ring].scratch_reg = -1;
30809 rdev->fence_drv[ring].cpu_addr = NULL;
30810 rdev->fence_drv[ring].gpu_addr = 0;
30811 - atomic_set(&rdev->fence_drv[ring].seq, 0);
30812 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30813 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30814 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30815 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30816 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30817 index 48b7cea..342236f 100644
30818 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30819 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30820 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30821 request = compat_alloc_user_space(sizeof(*request));
30822 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30823 || __put_user(req32.param, &request->param)
30824 - || __put_user((void __user *)(unsigned long)req32.value,
30825 + || __put_user((unsigned long)req32.value,
30826 &request->value))
30827 return -EFAULT;
30828
30829 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30830 index 00da384..32f972d 100644
30831 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30832 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30833 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30834 unsigned int ret;
30835 RING_LOCALS;
30836
30837 - atomic_inc(&dev_priv->swi_emitted);
30838 - ret = atomic_read(&dev_priv->swi_emitted);
30839 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30840 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30841
30842 BEGIN_RING(4);
30843 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30844 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30845 drm_radeon_private_t *dev_priv =
30846 (drm_radeon_private_t *) dev->dev_private;
30847
30848 - atomic_set(&dev_priv->swi_emitted, 0);
30849 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30850 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30851
30852 dev->max_vblank_count = 0x001fffff;
30853 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30854 index e8422ae..d22d4a8 100644
30855 --- a/drivers/gpu/drm/radeon/radeon_state.c
30856 +++ b/drivers/gpu/drm/radeon/radeon_state.c
30857 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30858 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30859 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30860
30861 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30862 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30863 sarea_priv->nbox * sizeof(depth_boxes[0])))
30864 return -EFAULT;
30865
30866 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30867 {
30868 drm_radeon_private_t *dev_priv = dev->dev_private;
30869 drm_radeon_getparam_t *param = data;
30870 - int value;
30871 + int value = 0;
30872
30873 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30874
30875 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30876 index f493c64..524ab6b 100644
30877 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
30878 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30879 @@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30880 }
30881 if (unlikely(ttm_vm_ops == NULL)) {
30882 ttm_vm_ops = vma->vm_ops;
30883 - radeon_ttm_vm_ops = *ttm_vm_ops;
30884 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30885 + pax_open_kernel();
30886 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30887 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30888 + pax_close_kernel();
30889 }
30890 vma->vm_ops = &radeon_ttm_vm_ops;
30891 return 0;
30892 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30893 index f2c3b9d..d5a376b 100644
30894 --- a/drivers/gpu/drm/radeon/rs690.c
30895 +++ b/drivers/gpu/drm/radeon/rs690.c
30896 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30897 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30898 rdev->pm.sideport_bandwidth.full)
30899 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30900 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30901 + read_delay_latency.full = dfixed_const(800 * 1000);
30902 read_delay_latency.full = dfixed_div(read_delay_latency,
30903 rdev->pm.igp_sideport_mclk);
30904 + a.full = dfixed_const(370);
30905 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30906 } else {
30907 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30908 rdev->pm.k8_bandwidth.full)
30909 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30910 index ebc6fac..a8313ed 100644
30911 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30912 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30913 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
30914 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30915 struct shrink_control *sc)
30916 {
30917 - static atomic_t start_pool = ATOMIC_INIT(0);
30918 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30919 unsigned i;
30920 - unsigned pool_offset = atomic_add_return(1, &start_pool);
30921 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30922 struct ttm_page_pool *pool;
30923 int shrink_pages = sc->nr_to_scan;
30924
30925 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30926 index 88edacc..1e5412b 100644
30927 --- a/drivers/gpu/drm/via/via_drv.h
30928 +++ b/drivers/gpu/drm/via/via_drv.h
30929 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30930 typedef uint32_t maskarray_t[5];
30931
30932 typedef struct drm_via_irq {
30933 - atomic_t irq_received;
30934 + atomic_unchecked_t irq_received;
30935 uint32_t pending_mask;
30936 uint32_t enable_mask;
30937 wait_queue_head_t irq_queue;
30938 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30939 struct timeval last_vblank;
30940 int last_vblank_valid;
30941 unsigned usec_per_vblank;
30942 - atomic_t vbl_received;
30943 + atomic_unchecked_t vbl_received;
30944 drm_via_state_t hc_state;
30945 char pci_buf[VIA_PCI_BUF_SIZE];
30946 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30947 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30948 index d391f48..10c8ca3 100644
30949 --- a/drivers/gpu/drm/via/via_irq.c
30950 +++ b/drivers/gpu/drm/via/via_irq.c
30951 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30952 if (crtc != 0)
30953 return 0;
30954
30955 - return atomic_read(&dev_priv->vbl_received);
30956 + return atomic_read_unchecked(&dev_priv->vbl_received);
30957 }
30958
30959 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30960 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30961
30962 status = VIA_READ(VIA_REG_INTERRUPT);
30963 if (status & VIA_IRQ_VBLANK_PENDING) {
30964 - atomic_inc(&dev_priv->vbl_received);
30965 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30966 + atomic_inc_unchecked(&dev_priv->vbl_received);
30967 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30968 do_gettimeofday(&cur_vblank);
30969 if (dev_priv->last_vblank_valid) {
30970 dev_priv->usec_per_vblank =
30971 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30972 dev_priv->last_vblank = cur_vblank;
30973 dev_priv->last_vblank_valid = 1;
30974 }
30975 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30976 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30977 DRM_DEBUG("US per vblank is: %u\n",
30978 dev_priv->usec_per_vblank);
30979 }
30980 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30981
30982 for (i = 0; i < dev_priv->num_irqs; ++i) {
30983 if (status & cur_irq->pending_mask) {
30984 - atomic_inc(&cur_irq->irq_received);
30985 + atomic_inc_unchecked(&cur_irq->irq_received);
30986 DRM_WAKEUP(&cur_irq->irq_queue);
30987 handled = 1;
30988 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
30989 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
30990 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30991 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30992 masks[irq][4]));
30993 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30994 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30995 } else {
30996 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30997 (((cur_irq_sequence =
30998 - atomic_read(&cur_irq->irq_received)) -
30999 + atomic_read_unchecked(&cur_irq->irq_received)) -
31000 *sequence) <= (1 << 23)));
31001 }
31002 *sequence = cur_irq_sequence;
31003 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31004 }
31005
31006 for (i = 0; i < dev_priv->num_irqs; ++i) {
31007 - atomic_set(&cur_irq->irq_received, 0);
31008 + atomic_set_unchecked(&cur_irq->irq_received, 0);
31009 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31010 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31011 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31012 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31013 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31014 case VIA_IRQ_RELATIVE:
31015 irqwait->request.sequence +=
31016 - atomic_read(&cur_irq->irq_received);
31017 + atomic_read_unchecked(&cur_irq->irq_received);
31018 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31019 case VIA_IRQ_ABSOLUTE:
31020 break;
31021 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31022 index d0f2c07..9ebd9c3 100644
31023 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31024 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31025 @@ -263,7 +263,7 @@ struct vmw_private {
31026 * Fencing and IRQs.
31027 */
31028
31029 - atomic_t marker_seq;
31030 + atomic_unchecked_t marker_seq;
31031 wait_queue_head_t fence_queue;
31032 wait_queue_head_t fifo_queue;
31033 int fence_queue_waiters; /* Protected by hw_mutex */
31034 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31035 index a0c2f12..68ae6cb 100644
31036 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31037 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31038 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31039 (unsigned int) min,
31040 (unsigned int) fifo->capabilities);
31041
31042 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31043 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31044 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31045 vmw_marker_queue_init(&fifo->marker_queue);
31046 return vmw_fifo_send_fence(dev_priv, &dummy);
31047 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31048 if (reserveable)
31049 iowrite32(bytes, fifo_mem +
31050 SVGA_FIFO_RESERVED);
31051 - return fifo_mem + (next_cmd >> 2);
31052 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31053 } else {
31054 need_bounce = true;
31055 }
31056 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31057
31058 fm = vmw_fifo_reserve(dev_priv, bytes);
31059 if (unlikely(fm == NULL)) {
31060 - *seqno = atomic_read(&dev_priv->marker_seq);
31061 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31062 ret = -ENOMEM;
31063 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31064 false, 3*HZ);
31065 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31066 }
31067
31068 do {
31069 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31070 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31071 } while (*seqno == 0);
31072
31073 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31074 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31075 index cabc95f..14b3d77 100644
31076 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31077 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31078 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31079 * emitted. Then the fence is stale and signaled.
31080 */
31081
31082 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31083 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31084 > VMW_FENCE_WRAP);
31085
31086 return ret;
31087 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31088
31089 if (fifo_idle)
31090 down_read(&fifo_state->rwsem);
31091 - signal_seq = atomic_read(&dev_priv->marker_seq);
31092 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31093 ret = 0;
31094
31095 for (;;) {
31096 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31097 index 8a8725c..afed796 100644
31098 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31099 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31100 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31101 while (!vmw_lag_lt(queue, us)) {
31102 spin_lock(&queue->lock);
31103 if (list_empty(&queue->head))
31104 - seqno = atomic_read(&dev_priv->marker_seq);
31105 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31106 else {
31107 marker = list_first_entry(&queue->head,
31108 struct vmw_marker, head);
31109 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31110 index 4da66b4..e948655 100644
31111 --- a/drivers/hid/hid-core.c
31112 +++ b/drivers/hid/hid-core.c
31113 @@ -2063,7 +2063,7 @@ static bool hid_ignore(struct hid_device *hdev)
31114
31115 int hid_add_device(struct hid_device *hdev)
31116 {
31117 - static atomic_t id = ATOMIC_INIT(0);
31118 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31119 int ret;
31120
31121 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31122 @@ -2078,7 +2078,7 @@ int hid_add_device(struct hid_device *hdev)
31123 /* XXX hack, any other cleaner solution after the driver core
31124 * is converted to allow more than 20 bytes as the device name? */
31125 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31126 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31127 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31128
31129 hid_debug_register(hdev, dev_name(&hdev->dev));
31130 ret = device_add(&hdev->dev);
31131 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31132 index eec3291..8ed706b 100644
31133 --- a/drivers/hid/hid-wiimote-debug.c
31134 +++ b/drivers/hid/hid-wiimote-debug.c
31135 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31136 else if (size == 0)
31137 return -EIO;
31138
31139 - if (copy_to_user(u, buf, size))
31140 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
31141 return -EFAULT;
31142
31143 *off += size;
31144 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31145 index b1ec0e2..c295a61 100644
31146 --- a/drivers/hid/usbhid/hiddev.c
31147 +++ b/drivers/hid/usbhid/hiddev.c
31148 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31149 break;
31150
31151 case HIDIOCAPPLICATION:
31152 - if (arg < 0 || arg >= hid->maxapplication)
31153 + if (arg >= hid->maxapplication)
31154 break;
31155
31156 for (i = 0; i < hid->maxcollection; i++)
31157 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31158 index 4065374..10ed7dc 100644
31159 --- a/drivers/hv/channel.c
31160 +++ b/drivers/hv/channel.c
31161 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31162 int ret = 0;
31163 int t;
31164
31165 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31166 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31167 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31168 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31169
31170 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31171 if (ret)
31172 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31173 index 15956bd..ea34398 100644
31174 --- a/drivers/hv/hv.c
31175 +++ b/drivers/hv/hv.c
31176 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31177 u64 output_address = (output) ? virt_to_phys(output) : 0;
31178 u32 output_address_hi = output_address >> 32;
31179 u32 output_address_lo = output_address & 0xFFFFFFFF;
31180 - void *hypercall_page = hv_context.hypercall_page;
31181 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31182
31183 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31184 "=a"(hv_status_lo) : "d" (control_hi),
31185 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31186 index 699f0d8..f4f19250 100644
31187 --- a/drivers/hv/hyperv_vmbus.h
31188 +++ b/drivers/hv/hyperv_vmbus.h
31189 @@ -555,7 +555,7 @@ enum vmbus_connect_state {
31190 struct vmbus_connection {
31191 enum vmbus_connect_state conn_state;
31192
31193 - atomic_t next_gpadl_handle;
31194 + atomic_unchecked_t next_gpadl_handle;
31195
31196 /*
31197 * Represents channel interrupts. Each bit position represents a
31198 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31199 index a220e57..428f54d 100644
31200 --- a/drivers/hv/vmbus_drv.c
31201 +++ b/drivers/hv/vmbus_drv.c
31202 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31203 {
31204 int ret = 0;
31205
31206 - static atomic_t device_num = ATOMIC_INIT(0);
31207 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31208
31209 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31210 - atomic_inc_return(&device_num));
31211 + atomic_inc_return_unchecked(&device_num));
31212
31213 child_device_obj->device.bus = &hv_bus;
31214 child_device_obj->device.parent = &hv_acpi_dev->dev;
31215 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31216 index 9140236..ceaef4e 100644
31217 --- a/drivers/hwmon/acpi_power_meter.c
31218 +++ b/drivers/hwmon/acpi_power_meter.c
31219 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31220 return res;
31221
31222 temp /= 1000;
31223 - if (temp < 0)
31224 - return -EINVAL;
31225
31226 mutex_lock(&resource->lock);
31227 resource->trip[attr->index - 7] = temp;
31228 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31229 index 8b011d0..3de24a1 100644
31230 --- a/drivers/hwmon/sht15.c
31231 +++ b/drivers/hwmon/sht15.c
31232 @@ -166,7 +166,7 @@ struct sht15_data {
31233 int supply_uV;
31234 bool supply_uV_valid;
31235 struct work_struct update_supply_work;
31236 - atomic_t interrupt_handled;
31237 + atomic_unchecked_t interrupt_handled;
31238 };
31239
31240 /**
31241 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31242 return ret;
31243
31244 gpio_direction_input(data->pdata->gpio_data);
31245 - atomic_set(&data->interrupt_handled, 0);
31246 + atomic_set_unchecked(&data->interrupt_handled, 0);
31247
31248 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31249 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31250 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31251 /* Only relevant if the interrupt hasn't occurred. */
31252 - if (!atomic_read(&data->interrupt_handled))
31253 + if (!atomic_read_unchecked(&data->interrupt_handled))
31254 schedule_work(&data->read_work);
31255 }
31256 ret = wait_event_timeout(data->wait_queue,
31257 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31258
31259 /* First disable the interrupt */
31260 disable_irq_nosync(irq);
31261 - atomic_inc(&data->interrupt_handled);
31262 + atomic_inc_unchecked(&data->interrupt_handled);
31263 /* Then schedule a reading work struct */
31264 if (data->state != SHT15_READING_NOTHING)
31265 schedule_work(&data->read_work);
31266 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31267 * If not, then start the interrupt again - care here as could
31268 * have gone low in meantime so verify it hasn't!
31269 */
31270 - atomic_set(&data->interrupt_handled, 0);
31271 + atomic_set_unchecked(&data->interrupt_handled, 0);
31272 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31273 /* If still not occurred or another handler was scheduled */
31274 if (gpio_get_value(data->pdata->gpio_data)
31275 - || atomic_read(&data->interrupt_handled))
31276 + || atomic_read_unchecked(&data->interrupt_handled))
31277 return;
31278 }
31279
31280 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31281 index 378fcb5..5e91fa8 100644
31282 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31283 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31284 @@ -43,7 +43,7 @@
31285 extern struct i2c_adapter amd756_smbus;
31286
31287 static struct i2c_adapter *s4882_adapter;
31288 -static struct i2c_algorithm *s4882_algo;
31289 +static i2c_algorithm_no_const *s4882_algo;
31290
31291 /* Wrapper access functions for multiplexed SMBus */
31292 static DEFINE_MUTEX(amd756_lock);
31293 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31294 index 29015eb..af2d8e9 100644
31295 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31296 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31297 @@ -41,7 +41,7 @@
31298 extern struct i2c_adapter *nforce2_smbus;
31299
31300 static struct i2c_adapter *s4985_adapter;
31301 -static struct i2c_algorithm *s4985_algo;
31302 +static i2c_algorithm_no_const *s4985_algo;
31303
31304 /* Wrapper access functions for multiplexed SMBus */
31305 static DEFINE_MUTEX(nforce2_lock);
31306 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31307 index d7a4833..7fae376 100644
31308 --- a/drivers/i2c/i2c-mux.c
31309 +++ b/drivers/i2c/i2c-mux.c
31310 @@ -28,7 +28,7 @@
31311 /* multiplexer per channel data */
31312 struct i2c_mux_priv {
31313 struct i2c_adapter adap;
31314 - struct i2c_algorithm algo;
31315 + i2c_algorithm_no_const algo;
31316
31317 struct i2c_adapter *parent;
31318 void *mux_dev; /* the mux chip/device */
31319 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31320 index 57d00ca..0145194 100644
31321 --- a/drivers/ide/aec62xx.c
31322 +++ b/drivers/ide/aec62xx.c
31323 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31324 .cable_detect = atp86x_cable_detect,
31325 };
31326
31327 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31328 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31329 { /* 0: AEC6210 */
31330 .name = DRV_NAME,
31331 .init_chipset = init_chipset_aec62xx,
31332 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31333 index 2c8016a..911a27c 100644
31334 --- a/drivers/ide/alim15x3.c
31335 +++ b/drivers/ide/alim15x3.c
31336 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31337 .dma_sff_read_status = ide_dma_sff_read_status,
31338 };
31339
31340 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31341 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31342 .name = DRV_NAME,
31343 .init_chipset = init_chipset_ali15x3,
31344 .init_hwif = init_hwif_ali15x3,
31345 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31346 index 3747b25..56fc995 100644
31347 --- a/drivers/ide/amd74xx.c
31348 +++ b/drivers/ide/amd74xx.c
31349 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31350 .udma_mask = udma, \
31351 }
31352
31353 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31354 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31355 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31356 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31357 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31358 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31359 index 15f0ead..cb43480 100644
31360 --- a/drivers/ide/atiixp.c
31361 +++ b/drivers/ide/atiixp.c
31362 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31363 .cable_detect = atiixp_cable_detect,
31364 };
31365
31366 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31367 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31368 { /* 0: IXP200/300/400/700 */
31369 .name = DRV_NAME,
31370 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31371 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31372 index 5f80312..d1fc438 100644
31373 --- a/drivers/ide/cmd64x.c
31374 +++ b/drivers/ide/cmd64x.c
31375 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31376 .dma_sff_read_status = ide_dma_sff_read_status,
31377 };
31378
31379 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31380 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31381 { /* 0: CMD643 */
31382 .name = DRV_NAME,
31383 .init_chipset = init_chipset_cmd64x,
31384 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31385 index 2c1e5f7..1444762 100644
31386 --- a/drivers/ide/cs5520.c
31387 +++ b/drivers/ide/cs5520.c
31388 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31389 .set_dma_mode = cs5520_set_dma_mode,
31390 };
31391
31392 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31393 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31394 .name = DRV_NAME,
31395 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31396 .port_ops = &cs5520_port_ops,
31397 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31398 index 4dc4eb9..49b40ad 100644
31399 --- a/drivers/ide/cs5530.c
31400 +++ b/drivers/ide/cs5530.c
31401 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31402 .udma_filter = cs5530_udma_filter,
31403 };
31404
31405 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31406 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31407 .name = DRV_NAME,
31408 .init_chipset = init_chipset_cs5530,
31409 .init_hwif = init_hwif_cs5530,
31410 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31411 index 5059faf..18d4c85 100644
31412 --- a/drivers/ide/cs5535.c
31413 +++ b/drivers/ide/cs5535.c
31414 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31415 .cable_detect = cs5535_cable_detect,
31416 };
31417
31418 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31419 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31420 .name = DRV_NAME,
31421 .port_ops = &cs5535_port_ops,
31422 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31423 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31424 index 847553f..3ffb49d 100644
31425 --- a/drivers/ide/cy82c693.c
31426 +++ b/drivers/ide/cy82c693.c
31427 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31428 .set_dma_mode = cy82c693_set_dma_mode,
31429 };
31430
31431 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31432 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31433 .name = DRV_NAME,
31434 .init_iops = init_iops_cy82c693,
31435 .port_ops = &cy82c693_port_ops,
31436 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31437 index 58c51cd..4aec3b8 100644
31438 --- a/drivers/ide/hpt366.c
31439 +++ b/drivers/ide/hpt366.c
31440 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31441 }
31442 };
31443
31444 -static const struct hpt_info hpt36x __devinitdata = {
31445 +static const struct hpt_info hpt36x __devinitconst = {
31446 .chip_name = "HPT36x",
31447 .chip_type = HPT36x,
31448 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31449 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31450 .timings = &hpt36x_timings
31451 };
31452
31453 -static const struct hpt_info hpt370 __devinitdata = {
31454 +static const struct hpt_info hpt370 __devinitconst = {
31455 .chip_name = "HPT370",
31456 .chip_type = HPT370,
31457 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31458 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31459 .timings = &hpt37x_timings
31460 };
31461
31462 -static const struct hpt_info hpt370a __devinitdata = {
31463 +static const struct hpt_info hpt370a __devinitconst = {
31464 .chip_name = "HPT370A",
31465 .chip_type = HPT370A,
31466 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31467 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31468 .timings = &hpt37x_timings
31469 };
31470
31471 -static const struct hpt_info hpt374 __devinitdata = {
31472 +static const struct hpt_info hpt374 __devinitconst = {
31473 .chip_name = "HPT374",
31474 .chip_type = HPT374,
31475 .udma_mask = ATA_UDMA5,
31476 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31477 .timings = &hpt37x_timings
31478 };
31479
31480 -static const struct hpt_info hpt372 __devinitdata = {
31481 +static const struct hpt_info hpt372 __devinitconst = {
31482 .chip_name = "HPT372",
31483 .chip_type = HPT372,
31484 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31485 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31486 .timings = &hpt37x_timings
31487 };
31488
31489 -static const struct hpt_info hpt372a __devinitdata = {
31490 +static const struct hpt_info hpt372a __devinitconst = {
31491 .chip_name = "HPT372A",
31492 .chip_type = HPT372A,
31493 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31494 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31495 .timings = &hpt37x_timings
31496 };
31497
31498 -static const struct hpt_info hpt302 __devinitdata = {
31499 +static const struct hpt_info hpt302 __devinitconst = {
31500 .chip_name = "HPT302",
31501 .chip_type = HPT302,
31502 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31503 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31504 .timings = &hpt37x_timings
31505 };
31506
31507 -static const struct hpt_info hpt371 __devinitdata = {
31508 +static const struct hpt_info hpt371 __devinitconst = {
31509 .chip_name = "HPT371",
31510 .chip_type = HPT371,
31511 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31512 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31513 .timings = &hpt37x_timings
31514 };
31515
31516 -static const struct hpt_info hpt372n __devinitdata = {
31517 +static const struct hpt_info hpt372n __devinitconst = {
31518 .chip_name = "HPT372N",
31519 .chip_type = HPT372N,
31520 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31521 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31522 .timings = &hpt37x_timings
31523 };
31524
31525 -static const struct hpt_info hpt302n __devinitdata = {
31526 +static const struct hpt_info hpt302n __devinitconst = {
31527 .chip_name = "HPT302N",
31528 .chip_type = HPT302N,
31529 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31530 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31531 .timings = &hpt37x_timings
31532 };
31533
31534 -static const struct hpt_info hpt371n __devinitdata = {
31535 +static const struct hpt_info hpt371n __devinitconst = {
31536 .chip_name = "HPT371N",
31537 .chip_type = HPT371N,
31538 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31539 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31540 .dma_sff_read_status = ide_dma_sff_read_status,
31541 };
31542
31543 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31544 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31545 { /* 0: HPT36x */
31546 .name = DRV_NAME,
31547 .init_chipset = init_chipset_hpt366,
31548 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31549 index 8126824..55a2798 100644
31550 --- a/drivers/ide/ide-cd.c
31551 +++ b/drivers/ide/ide-cd.c
31552 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31553 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31554 if ((unsigned long)buf & alignment
31555 || blk_rq_bytes(rq) & q->dma_pad_mask
31556 - || object_is_on_stack(buf))
31557 + || object_starts_on_stack(buf))
31558 drive->dma = 0;
31559 }
31560 }
31561 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31562 index 7f56b73..dab5b67 100644
31563 --- a/drivers/ide/ide-pci-generic.c
31564 +++ b/drivers/ide/ide-pci-generic.c
31565 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31566 .udma_mask = ATA_UDMA6, \
31567 }
31568
31569 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31570 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31571 /* 0: Unknown */
31572 DECLARE_GENERIC_PCI_DEV(0),
31573
31574 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31575 index 560e66d..d5dd180 100644
31576 --- a/drivers/ide/it8172.c
31577 +++ b/drivers/ide/it8172.c
31578 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31579 .set_dma_mode = it8172_set_dma_mode,
31580 };
31581
31582 -static const struct ide_port_info it8172_port_info __devinitdata = {
31583 +static const struct ide_port_info it8172_port_info __devinitconst = {
31584 .name = DRV_NAME,
31585 .port_ops = &it8172_port_ops,
31586 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31587 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31588 index 46816ba..1847aeb 100644
31589 --- a/drivers/ide/it8213.c
31590 +++ b/drivers/ide/it8213.c
31591 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31592 .cable_detect = it8213_cable_detect,
31593 };
31594
31595 -static const struct ide_port_info it8213_chipset __devinitdata = {
31596 +static const struct ide_port_info it8213_chipset __devinitconst = {
31597 .name = DRV_NAME,
31598 .enablebits = { {0x41, 0x80, 0x80} },
31599 .port_ops = &it8213_port_ops,
31600 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31601 index 2e3169f..c5611db 100644
31602 --- a/drivers/ide/it821x.c
31603 +++ b/drivers/ide/it821x.c
31604 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31605 .cable_detect = it821x_cable_detect,
31606 };
31607
31608 -static const struct ide_port_info it821x_chipset __devinitdata = {
31609 +static const struct ide_port_info it821x_chipset __devinitconst = {
31610 .name = DRV_NAME,
31611 .init_chipset = init_chipset_it821x,
31612 .init_hwif = init_hwif_it821x,
31613 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31614 index 74c2c4a..efddd7d 100644
31615 --- a/drivers/ide/jmicron.c
31616 +++ b/drivers/ide/jmicron.c
31617 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31618 .cable_detect = jmicron_cable_detect,
31619 };
31620
31621 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31622 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31623 .name = DRV_NAME,
31624 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31625 .port_ops = &jmicron_port_ops,
31626 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31627 index 95327a2..73f78d8 100644
31628 --- a/drivers/ide/ns87415.c
31629 +++ b/drivers/ide/ns87415.c
31630 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31631 .dma_sff_read_status = superio_dma_sff_read_status,
31632 };
31633
31634 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31635 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31636 .name = DRV_NAME,
31637 .init_hwif = init_hwif_ns87415,
31638 .tp_ops = &ns87415_tp_ops,
31639 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31640 index 1a53a4c..39edc66 100644
31641 --- a/drivers/ide/opti621.c
31642 +++ b/drivers/ide/opti621.c
31643 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31644 .set_pio_mode = opti621_set_pio_mode,
31645 };
31646
31647 -static const struct ide_port_info opti621_chipset __devinitdata = {
31648 +static const struct ide_port_info opti621_chipset __devinitconst = {
31649 .name = DRV_NAME,
31650 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31651 .port_ops = &opti621_port_ops,
31652 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31653 index 9546fe2..2e5ceb6 100644
31654 --- a/drivers/ide/pdc202xx_new.c
31655 +++ b/drivers/ide/pdc202xx_new.c
31656 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31657 .udma_mask = udma, \
31658 }
31659
31660 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31661 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31662 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31663 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31664 };
31665 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31666 index 3a35ec6..5634510 100644
31667 --- a/drivers/ide/pdc202xx_old.c
31668 +++ b/drivers/ide/pdc202xx_old.c
31669 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31670 .max_sectors = sectors, \
31671 }
31672
31673 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31674 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31675 { /* 0: PDC20246 */
31676 .name = DRV_NAME,
31677 .init_chipset = init_chipset_pdc202xx,
31678 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31679 index 1892e81..fe0fd60 100644
31680 --- a/drivers/ide/piix.c
31681 +++ b/drivers/ide/piix.c
31682 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31683 .udma_mask = udma, \
31684 }
31685
31686 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31687 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31688 /* 0: MPIIX */
31689 { /*
31690 * MPIIX actually has only a single IDE channel mapped to
31691 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31692 index a6414a8..c04173e 100644
31693 --- a/drivers/ide/rz1000.c
31694 +++ b/drivers/ide/rz1000.c
31695 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31696 }
31697 }
31698
31699 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31700 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31701 .name = DRV_NAME,
31702 .host_flags = IDE_HFLAG_NO_DMA,
31703 };
31704 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31705 index 356b9b5..d4758eb 100644
31706 --- a/drivers/ide/sc1200.c
31707 +++ b/drivers/ide/sc1200.c
31708 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31709 .dma_sff_read_status = ide_dma_sff_read_status,
31710 };
31711
31712 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31713 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31714 .name = DRV_NAME,
31715 .port_ops = &sc1200_port_ops,
31716 .dma_ops = &sc1200_dma_ops,
31717 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31718 index b7f5b0c..9701038 100644
31719 --- a/drivers/ide/scc_pata.c
31720 +++ b/drivers/ide/scc_pata.c
31721 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31722 .dma_sff_read_status = scc_dma_sff_read_status,
31723 };
31724
31725 -static const struct ide_port_info scc_chipset __devinitdata = {
31726 +static const struct ide_port_info scc_chipset __devinitconst = {
31727 .name = "sccIDE",
31728 .init_iops = init_iops_scc,
31729 .init_dma = scc_init_dma,
31730 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31731 index 35fb8da..24d72ef 100644
31732 --- a/drivers/ide/serverworks.c
31733 +++ b/drivers/ide/serverworks.c
31734 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31735 .cable_detect = svwks_cable_detect,
31736 };
31737
31738 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31739 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31740 { /* 0: OSB4 */
31741 .name = DRV_NAME,
31742 .init_chipset = init_chipset_svwks,
31743 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31744 index ddeda44..46f7e30 100644
31745 --- a/drivers/ide/siimage.c
31746 +++ b/drivers/ide/siimage.c
31747 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31748 .udma_mask = ATA_UDMA6, \
31749 }
31750
31751 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31752 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31753 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31754 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31755 };
31756 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31757 index 4a00225..09e61b4 100644
31758 --- a/drivers/ide/sis5513.c
31759 +++ b/drivers/ide/sis5513.c
31760 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31761 .cable_detect = sis_cable_detect,
31762 };
31763
31764 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31765 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31766 .name = DRV_NAME,
31767 .init_chipset = init_chipset_sis5513,
31768 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31769 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31770 index f21dc2a..d051cd2 100644
31771 --- a/drivers/ide/sl82c105.c
31772 +++ b/drivers/ide/sl82c105.c
31773 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31774 .dma_sff_read_status = ide_dma_sff_read_status,
31775 };
31776
31777 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31778 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31779 .name = DRV_NAME,
31780 .init_chipset = init_chipset_sl82c105,
31781 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31782 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31783 index 864ffe0..863a5e9 100644
31784 --- a/drivers/ide/slc90e66.c
31785 +++ b/drivers/ide/slc90e66.c
31786 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31787 .cable_detect = slc90e66_cable_detect,
31788 };
31789
31790 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31791 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31792 .name = DRV_NAME,
31793 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31794 .port_ops = &slc90e66_port_ops,
31795 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31796 index 4799d5c..1794678 100644
31797 --- a/drivers/ide/tc86c001.c
31798 +++ b/drivers/ide/tc86c001.c
31799 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31800 .dma_sff_read_status = ide_dma_sff_read_status,
31801 };
31802
31803 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31804 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31805 .name = DRV_NAME,
31806 .init_hwif = init_hwif_tc86c001,
31807 .port_ops = &tc86c001_port_ops,
31808 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31809 index 281c914..55ce1b8 100644
31810 --- a/drivers/ide/triflex.c
31811 +++ b/drivers/ide/triflex.c
31812 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31813 .set_dma_mode = triflex_set_mode,
31814 };
31815
31816 -static const struct ide_port_info triflex_device __devinitdata = {
31817 +static const struct ide_port_info triflex_device __devinitconst = {
31818 .name = DRV_NAME,
31819 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31820 .port_ops = &triflex_port_ops,
31821 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31822 index 4b42ca0..e494a98 100644
31823 --- a/drivers/ide/trm290.c
31824 +++ b/drivers/ide/trm290.c
31825 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31826 .dma_check = trm290_dma_check,
31827 };
31828
31829 -static const struct ide_port_info trm290_chipset __devinitdata = {
31830 +static const struct ide_port_info trm290_chipset __devinitconst = {
31831 .name = DRV_NAME,
31832 .init_hwif = init_hwif_trm290,
31833 .tp_ops = &trm290_tp_ops,
31834 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31835 index f46f49c..eb77678 100644
31836 --- a/drivers/ide/via82cxxx.c
31837 +++ b/drivers/ide/via82cxxx.c
31838 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31839 .cable_detect = via82cxxx_cable_detect,
31840 };
31841
31842 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31843 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31844 .name = DRV_NAME,
31845 .init_chipset = init_chipset_via82cxxx,
31846 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31847 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31848 index 73d4531..c90cd2d 100644
31849 --- a/drivers/ieee802154/fakehard.c
31850 +++ b/drivers/ieee802154/fakehard.c
31851 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31852 phy->transmit_power = 0xbf;
31853
31854 dev->netdev_ops = &fake_ops;
31855 - dev->ml_priv = &fake_mlme;
31856 + dev->ml_priv = (void *)&fake_mlme;
31857
31858 priv = netdev_priv(dev);
31859 priv->phy = phy;
31860 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31861 index c889aae..6cf5aa7 100644
31862 --- a/drivers/infiniband/core/cm.c
31863 +++ b/drivers/infiniband/core/cm.c
31864 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31865
31866 struct cm_counter_group {
31867 struct kobject obj;
31868 - atomic_long_t counter[CM_ATTR_COUNT];
31869 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31870 };
31871
31872 struct cm_counter_attribute {
31873 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31874 struct ib_mad_send_buf *msg = NULL;
31875 int ret;
31876
31877 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31878 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31879 counter[CM_REQ_COUNTER]);
31880
31881 /* Quick state check to discard duplicate REQs. */
31882 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31883 if (!cm_id_priv)
31884 return;
31885
31886 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31887 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31888 counter[CM_REP_COUNTER]);
31889 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31890 if (ret)
31891 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31892 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31893 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31894 spin_unlock_irq(&cm_id_priv->lock);
31895 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31896 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31897 counter[CM_RTU_COUNTER]);
31898 goto out;
31899 }
31900 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31901 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31902 dreq_msg->local_comm_id);
31903 if (!cm_id_priv) {
31904 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31905 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31906 counter[CM_DREQ_COUNTER]);
31907 cm_issue_drep(work->port, work->mad_recv_wc);
31908 return -EINVAL;
31909 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31910 case IB_CM_MRA_REP_RCVD:
31911 break;
31912 case IB_CM_TIMEWAIT:
31913 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31914 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31915 counter[CM_DREQ_COUNTER]);
31916 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31917 goto unlock;
31918 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31919 cm_free_msg(msg);
31920 goto deref;
31921 case IB_CM_DREQ_RCVD:
31922 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31923 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31924 counter[CM_DREQ_COUNTER]);
31925 goto unlock;
31926 default:
31927 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31928 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31929 cm_id_priv->msg, timeout)) {
31930 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31931 - atomic_long_inc(&work->port->
31932 + atomic_long_inc_unchecked(&work->port->
31933 counter_group[CM_RECV_DUPLICATES].
31934 counter[CM_MRA_COUNTER]);
31935 goto out;
31936 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31937 break;
31938 case IB_CM_MRA_REQ_RCVD:
31939 case IB_CM_MRA_REP_RCVD:
31940 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31941 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31942 counter[CM_MRA_COUNTER]);
31943 /* fall through */
31944 default:
31945 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31946 case IB_CM_LAP_IDLE:
31947 break;
31948 case IB_CM_MRA_LAP_SENT:
31949 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31950 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31951 counter[CM_LAP_COUNTER]);
31952 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31953 goto unlock;
31954 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31955 cm_free_msg(msg);
31956 goto deref;
31957 case IB_CM_LAP_RCVD:
31958 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31959 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31960 counter[CM_LAP_COUNTER]);
31961 goto unlock;
31962 default:
31963 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31964 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31965 if (cur_cm_id_priv) {
31966 spin_unlock_irq(&cm.lock);
31967 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31968 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31969 counter[CM_SIDR_REQ_COUNTER]);
31970 goto out; /* Duplicate message. */
31971 }
31972 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31973 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31974 msg->retries = 1;
31975
31976 - atomic_long_add(1 + msg->retries,
31977 + atomic_long_add_unchecked(1 + msg->retries,
31978 &port->counter_group[CM_XMIT].counter[attr_index]);
31979 if (msg->retries)
31980 - atomic_long_add(msg->retries,
31981 + atomic_long_add_unchecked(msg->retries,
31982 &port->counter_group[CM_XMIT_RETRIES].
31983 counter[attr_index]);
31984
31985 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
31986 }
31987
31988 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31989 - atomic_long_inc(&port->counter_group[CM_RECV].
31990 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31991 counter[attr_id - CM_ATTR_ID_OFFSET]);
31992
31993 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31994 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
31995 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31996
31997 return sprintf(buf, "%ld\n",
31998 - atomic_long_read(&group->counter[cm_attr->index]));
31999 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32000 }
32001
32002 static const struct sysfs_ops cm_counter_ops = {
32003 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32004 index 176c8f9..2627b62 100644
32005 --- a/drivers/infiniband/core/fmr_pool.c
32006 +++ b/drivers/infiniband/core/fmr_pool.c
32007 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
32008
32009 struct task_struct *thread;
32010
32011 - atomic_t req_ser;
32012 - atomic_t flush_ser;
32013 + atomic_unchecked_t req_ser;
32014 + atomic_unchecked_t flush_ser;
32015
32016 wait_queue_head_t force_wait;
32017 };
32018 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32019 struct ib_fmr_pool *pool = pool_ptr;
32020
32021 do {
32022 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32023 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32024 ib_fmr_batch_release(pool);
32025
32026 - atomic_inc(&pool->flush_ser);
32027 + atomic_inc_unchecked(&pool->flush_ser);
32028 wake_up_interruptible(&pool->force_wait);
32029
32030 if (pool->flush_function)
32031 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32032 }
32033
32034 set_current_state(TASK_INTERRUPTIBLE);
32035 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32036 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32037 !kthread_should_stop())
32038 schedule();
32039 __set_current_state(TASK_RUNNING);
32040 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32041 pool->dirty_watermark = params->dirty_watermark;
32042 pool->dirty_len = 0;
32043 spin_lock_init(&pool->pool_lock);
32044 - atomic_set(&pool->req_ser, 0);
32045 - atomic_set(&pool->flush_ser, 0);
32046 + atomic_set_unchecked(&pool->req_ser, 0);
32047 + atomic_set_unchecked(&pool->flush_ser, 0);
32048 init_waitqueue_head(&pool->force_wait);
32049
32050 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32051 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32052 }
32053 spin_unlock_irq(&pool->pool_lock);
32054
32055 - serial = atomic_inc_return(&pool->req_ser);
32056 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32057 wake_up_process(pool->thread);
32058
32059 if (wait_event_interruptible(pool->force_wait,
32060 - atomic_read(&pool->flush_ser) - serial >= 0))
32061 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32062 return -EINTR;
32063
32064 return 0;
32065 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32066 } else {
32067 list_add_tail(&fmr->list, &pool->dirty_list);
32068 if (++pool->dirty_len >= pool->dirty_watermark) {
32069 - atomic_inc(&pool->req_ser);
32070 + atomic_inc_unchecked(&pool->req_ser);
32071 wake_up_process(pool->thread);
32072 }
32073 }
32074 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32075 index 40c8353..946b0e4 100644
32076 --- a/drivers/infiniband/hw/cxgb4/mem.c
32077 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32078 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32079 int err;
32080 struct fw_ri_tpte tpt;
32081 u32 stag_idx;
32082 - static atomic_t key;
32083 + static atomic_unchecked_t key;
32084
32085 if (c4iw_fatal_error(rdev))
32086 return -EIO;
32087 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32088 &rdev->resource.tpt_fifo_lock);
32089 if (!stag_idx)
32090 return -ENOMEM;
32091 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32092 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32093 }
32094 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32095 __func__, stag_state, type, pdid, stag_idx);
32096 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32097 index 79b3dbc..96e5fcc 100644
32098 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32099 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32100 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32101 struct ib_atomic_eth *ateth;
32102 struct ipath_ack_entry *e;
32103 u64 vaddr;
32104 - atomic64_t *maddr;
32105 + atomic64_unchecked_t *maddr;
32106 u64 sdata;
32107 u32 rkey;
32108 u8 next;
32109 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32110 IB_ACCESS_REMOTE_ATOMIC)))
32111 goto nack_acc_unlck;
32112 /* Perform atomic OP and save result. */
32113 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32114 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32115 sdata = be64_to_cpu(ateth->swap_data);
32116 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32117 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32118 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32119 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32120 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32121 be64_to_cpu(ateth->compare_data),
32122 sdata);
32123 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32124 index 1f95bba..9530f87 100644
32125 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32126 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32127 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32128 unsigned long flags;
32129 struct ib_wc wc;
32130 u64 sdata;
32131 - atomic64_t *maddr;
32132 + atomic64_unchecked_t *maddr;
32133 enum ib_wc_status send_status;
32134
32135 /*
32136 @@ -382,11 +382,11 @@ again:
32137 IB_ACCESS_REMOTE_ATOMIC)))
32138 goto acc_err;
32139 /* Perform atomic OP and save result. */
32140 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32141 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32142 sdata = wqe->wr.wr.atomic.compare_add;
32143 *(u64 *) sqp->s_sge.sge.vaddr =
32144 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32145 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32146 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32147 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32148 sdata, wqe->wr.wr.atomic.swap);
32149 goto send_comp;
32150 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32151 index 7140199..da60063 100644
32152 --- a/drivers/infiniband/hw/nes/nes.c
32153 +++ b/drivers/infiniband/hw/nes/nes.c
32154 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32155 LIST_HEAD(nes_adapter_list);
32156 static LIST_HEAD(nes_dev_list);
32157
32158 -atomic_t qps_destroyed;
32159 +atomic_unchecked_t qps_destroyed;
32160
32161 static unsigned int ee_flsh_adapter;
32162 static unsigned int sysfs_nonidx_addr;
32163 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32164 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32165 struct nes_adapter *nesadapter = nesdev->nesadapter;
32166
32167 - atomic_inc(&qps_destroyed);
32168 + atomic_inc_unchecked(&qps_destroyed);
32169
32170 /* Free the control structures */
32171
32172 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32173 index c438e46..ca30356 100644
32174 --- a/drivers/infiniband/hw/nes/nes.h
32175 +++ b/drivers/infiniband/hw/nes/nes.h
32176 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32177 extern unsigned int wqm_quanta;
32178 extern struct list_head nes_adapter_list;
32179
32180 -extern atomic_t cm_connects;
32181 -extern atomic_t cm_accepts;
32182 -extern atomic_t cm_disconnects;
32183 -extern atomic_t cm_closes;
32184 -extern atomic_t cm_connecteds;
32185 -extern atomic_t cm_connect_reqs;
32186 -extern atomic_t cm_rejects;
32187 -extern atomic_t mod_qp_timouts;
32188 -extern atomic_t qps_created;
32189 -extern atomic_t qps_destroyed;
32190 -extern atomic_t sw_qps_destroyed;
32191 +extern atomic_unchecked_t cm_connects;
32192 +extern atomic_unchecked_t cm_accepts;
32193 +extern atomic_unchecked_t cm_disconnects;
32194 +extern atomic_unchecked_t cm_closes;
32195 +extern atomic_unchecked_t cm_connecteds;
32196 +extern atomic_unchecked_t cm_connect_reqs;
32197 +extern atomic_unchecked_t cm_rejects;
32198 +extern atomic_unchecked_t mod_qp_timouts;
32199 +extern atomic_unchecked_t qps_created;
32200 +extern atomic_unchecked_t qps_destroyed;
32201 +extern atomic_unchecked_t sw_qps_destroyed;
32202 extern u32 mh_detected;
32203 extern u32 mh_pauses_sent;
32204 extern u32 cm_packets_sent;
32205 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32206 extern u32 cm_packets_received;
32207 extern u32 cm_packets_dropped;
32208 extern u32 cm_packets_retrans;
32209 -extern atomic_t cm_listens_created;
32210 -extern atomic_t cm_listens_destroyed;
32211 +extern atomic_unchecked_t cm_listens_created;
32212 +extern atomic_unchecked_t cm_listens_destroyed;
32213 extern u32 cm_backlog_drops;
32214 -extern atomic_t cm_loopbacks;
32215 -extern atomic_t cm_nodes_created;
32216 -extern atomic_t cm_nodes_destroyed;
32217 -extern atomic_t cm_accel_dropped_pkts;
32218 -extern atomic_t cm_resets_recvd;
32219 -extern atomic_t pau_qps_created;
32220 -extern atomic_t pau_qps_destroyed;
32221 +extern atomic_unchecked_t cm_loopbacks;
32222 +extern atomic_unchecked_t cm_nodes_created;
32223 +extern atomic_unchecked_t cm_nodes_destroyed;
32224 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32225 +extern atomic_unchecked_t cm_resets_recvd;
32226 +extern atomic_unchecked_t pau_qps_created;
32227 +extern atomic_unchecked_t pau_qps_destroyed;
32228
32229 extern u32 int_mod_timer_init;
32230 extern u32 int_mod_cq_depth_256;
32231 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32232 index 71edfbb..15b62ae 100644
32233 --- a/drivers/infiniband/hw/nes/nes_cm.c
32234 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32235 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32236 u32 cm_packets_retrans;
32237 u32 cm_packets_created;
32238 u32 cm_packets_received;
32239 -atomic_t cm_listens_created;
32240 -atomic_t cm_listens_destroyed;
32241 +atomic_unchecked_t cm_listens_created;
32242 +atomic_unchecked_t cm_listens_destroyed;
32243 u32 cm_backlog_drops;
32244 -atomic_t cm_loopbacks;
32245 -atomic_t cm_nodes_created;
32246 -atomic_t cm_nodes_destroyed;
32247 -atomic_t cm_accel_dropped_pkts;
32248 -atomic_t cm_resets_recvd;
32249 +atomic_unchecked_t cm_loopbacks;
32250 +atomic_unchecked_t cm_nodes_created;
32251 +atomic_unchecked_t cm_nodes_destroyed;
32252 +atomic_unchecked_t cm_accel_dropped_pkts;
32253 +atomic_unchecked_t cm_resets_recvd;
32254
32255 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32256 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32257 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32258
32259 static struct nes_cm_core *g_cm_core;
32260
32261 -atomic_t cm_connects;
32262 -atomic_t cm_accepts;
32263 -atomic_t cm_disconnects;
32264 -atomic_t cm_closes;
32265 -atomic_t cm_connecteds;
32266 -atomic_t cm_connect_reqs;
32267 -atomic_t cm_rejects;
32268 +atomic_unchecked_t cm_connects;
32269 +atomic_unchecked_t cm_accepts;
32270 +atomic_unchecked_t cm_disconnects;
32271 +atomic_unchecked_t cm_closes;
32272 +atomic_unchecked_t cm_connecteds;
32273 +atomic_unchecked_t cm_connect_reqs;
32274 +atomic_unchecked_t cm_rejects;
32275
32276 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32277 {
32278 @@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32279 kfree(listener);
32280 listener = NULL;
32281 ret = 0;
32282 - atomic_inc(&cm_listens_destroyed);
32283 + atomic_inc_unchecked(&cm_listens_destroyed);
32284 } else {
32285 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32286 }
32287 @@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32288 cm_node->rem_mac);
32289
32290 add_hte_node(cm_core, cm_node);
32291 - atomic_inc(&cm_nodes_created);
32292 + atomic_inc_unchecked(&cm_nodes_created);
32293
32294 return cm_node;
32295 }
32296 @@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32297 }
32298
32299 atomic_dec(&cm_core->node_cnt);
32300 - atomic_inc(&cm_nodes_destroyed);
32301 + atomic_inc_unchecked(&cm_nodes_destroyed);
32302 nesqp = cm_node->nesqp;
32303 if (nesqp) {
32304 nesqp->cm_node = NULL;
32305 @@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32306
32307 static void drop_packet(struct sk_buff *skb)
32308 {
32309 - atomic_inc(&cm_accel_dropped_pkts);
32310 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32311 dev_kfree_skb_any(skb);
32312 }
32313
32314 @@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32315 {
32316
32317 int reset = 0; /* whether to send reset in case of err.. */
32318 - atomic_inc(&cm_resets_recvd);
32319 + atomic_inc_unchecked(&cm_resets_recvd);
32320 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32321 " refcnt=%d\n", cm_node, cm_node->state,
32322 atomic_read(&cm_node->ref_count));
32323 @@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32324 rem_ref_cm_node(cm_node->cm_core, cm_node);
32325 return NULL;
32326 }
32327 - atomic_inc(&cm_loopbacks);
32328 + atomic_inc_unchecked(&cm_loopbacks);
32329 loopbackremotenode->loopbackpartner = cm_node;
32330 loopbackremotenode->tcp_cntxt.rcv_wscale =
32331 NES_CM_DEFAULT_RCV_WND_SCALE;
32332 @@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32333 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32334 else {
32335 rem_ref_cm_node(cm_core, cm_node);
32336 - atomic_inc(&cm_accel_dropped_pkts);
32337 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32338 dev_kfree_skb_any(skb);
32339 }
32340 break;
32341 @@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32342
32343 if ((cm_id) && (cm_id->event_handler)) {
32344 if (issue_disconn) {
32345 - atomic_inc(&cm_disconnects);
32346 + atomic_inc_unchecked(&cm_disconnects);
32347 cm_event.event = IW_CM_EVENT_DISCONNECT;
32348 cm_event.status = disconn_status;
32349 cm_event.local_addr = cm_id->local_addr;
32350 @@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32351 }
32352
32353 if (issue_close) {
32354 - atomic_inc(&cm_closes);
32355 + atomic_inc_unchecked(&cm_closes);
32356 nes_disconnect(nesqp, 1);
32357
32358 cm_id->provider_data = nesqp;
32359 @@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32360
32361 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32362 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32363 - atomic_inc(&cm_accepts);
32364 + atomic_inc_unchecked(&cm_accepts);
32365
32366 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32367 netdev_refcnt_read(nesvnic->netdev));
32368 @@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32369 struct nes_cm_core *cm_core;
32370 u8 *start_buff;
32371
32372 - atomic_inc(&cm_rejects);
32373 + atomic_inc_unchecked(&cm_rejects);
32374 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32375 loopback = cm_node->loopbackpartner;
32376 cm_core = cm_node->cm_core;
32377 @@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32378 ntohl(cm_id->local_addr.sin_addr.s_addr),
32379 ntohs(cm_id->local_addr.sin_port));
32380
32381 - atomic_inc(&cm_connects);
32382 + atomic_inc_unchecked(&cm_connects);
32383 nesqp->active_conn = 1;
32384
32385 /* cache the cm_id in the qp */
32386 @@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32387 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32388 return err;
32389 }
32390 - atomic_inc(&cm_listens_created);
32391 + atomic_inc_unchecked(&cm_listens_created);
32392 }
32393
32394 cm_id->add_ref(cm_id);
32395 @@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32396
32397 if (nesqp->destroyed)
32398 return;
32399 - atomic_inc(&cm_connecteds);
32400 + atomic_inc_unchecked(&cm_connecteds);
32401 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32402 " local port 0x%04X. jiffies = %lu.\n",
32403 nesqp->hwqp.qp_id,
32404 @@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32405
32406 cm_id->add_ref(cm_id);
32407 ret = cm_id->event_handler(cm_id, &cm_event);
32408 - atomic_inc(&cm_closes);
32409 + atomic_inc_unchecked(&cm_closes);
32410 cm_event.event = IW_CM_EVENT_CLOSE;
32411 cm_event.status = 0;
32412 cm_event.provider_data = cm_id->provider_data;
32413 @@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32414 return;
32415 cm_id = cm_node->cm_id;
32416
32417 - atomic_inc(&cm_connect_reqs);
32418 + atomic_inc_unchecked(&cm_connect_reqs);
32419 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32420 cm_node, cm_id, jiffies);
32421
32422 @@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32423 return;
32424 cm_id = cm_node->cm_id;
32425
32426 - atomic_inc(&cm_connect_reqs);
32427 + atomic_inc_unchecked(&cm_connect_reqs);
32428 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32429 cm_node, cm_id, jiffies);
32430
32431 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32432 index 3ba7be3..c81f6ff 100644
32433 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32434 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32435 @@ -40,8 +40,8 @@
32436 #include "nes.h"
32437 #include "nes_mgt.h"
32438
32439 -atomic_t pau_qps_created;
32440 -atomic_t pau_qps_destroyed;
32441 +atomic_unchecked_t pau_qps_created;
32442 +atomic_unchecked_t pau_qps_destroyed;
32443
32444 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32445 {
32446 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32447 {
32448 struct sk_buff *skb;
32449 unsigned long flags;
32450 - atomic_inc(&pau_qps_destroyed);
32451 + atomic_inc_unchecked(&pau_qps_destroyed);
32452
32453 /* Free packets that have not yet been forwarded */
32454 /* Lock is acquired by skb_dequeue when removing the skb */
32455 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32456 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32457 skb_queue_head_init(&nesqp->pau_list);
32458 spin_lock_init(&nesqp->pau_lock);
32459 - atomic_inc(&pau_qps_created);
32460 + atomic_inc_unchecked(&pau_qps_created);
32461 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32462 }
32463
32464 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32465 index f3a3ecf..57d311d 100644
32466 --- a/drivers/infiniband/hw/nes/nes_nic.c
32467 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32468 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32469 target_stat_values[++index] = mh_detected;
32470 target_stat_values[++index] = mh_pauses_sent;
32471 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32472 - target_stat_values[++index] = atomic_read(&cm_connects);
32473 - target_stat_values[++index] = atomic_read(&cm_accepts);
32474 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32475 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32476 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32477 - target_stat_values[++index] = atomic_read(&cm_rejects);
32478 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32479 - target_stat_values[++index] = atomic_read(&qps_created);
32480 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32481 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32482 - target_stat_values[++index] = atomic_read(&cm_closes);
32483 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32484 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32485 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32486 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32487 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32488 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32489 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32490 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32491 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32492 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32493 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32494 target_stat_values[++index] = cm_packets_sent;
32495 target_stat_values[++index] = cm_packets_bounced;
32496 target_stat_values[++index] = cm_packets_created;
32497 target_stat_values[++index] = cm_packets_received;
32498 target_stat_values[++index] = cm_packets_dropped;
32499 target_stat_values[++index] = cm_packets_retrans;
32500 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32501 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32502 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32503 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32504 target_stat_values[++index] = cm_backlog_drops;
32505 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32506 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32507 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32508 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32509 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32510 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32511 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32512 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32513 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32514 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32515 target_stat_values[++index] = nesadapter->free_4kpbl;
32516 target_stat_values[++index] = nesadapter->free_256pbl;
32517 target_stat_values[++index] = int_mod_timer_init;
32518 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32519 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32520 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32521 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32522 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32523 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32524 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32525 }
32526
32527 /**
32528 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32529 index 8b8812d..a5e1133 100644
32530 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32531 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32532 @@ -46,9 +46,9 @@
32533
32534 #include <rdma/ib_umem.h>
32535
32536 -atomic_t mod_qp_timouts;
32537 -atomic_t qps_created;
32538 -atomic_t sw_qps_destroyed;
32539 +atomic_unchecked_t mod_qp_timouts;
32540 +atomic_unchecked_t qps_created;
32541 +atomic_unchecked_t sw_qps_destroyed;
32542
32543 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32544
32545 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32546 if (init_attr->create_flags)
32547 return ERR_PTR(-EINVAL);
32548
32549 - atomic_inc(&qps_created);
32550 + atomic_inc_unchecked(&qps_created);
32551 switch (init_attr->qp_type) {
32552 case IB_QPT_RC:
32553 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32554 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32555 struct iw_cm_event cm_event;
32556 int ret = 0;
32557
32558 - atomic_inc(&sw_qps_destroyed);
32559 + atomic_inc_unchecked(&sw_qps_destroyed);
32560 nesqp->destroyed = 1;
32561
32562 /* Blow away the connection if it exists. */
32563 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32564 index 6b811e3..f8acf88 100644
32565 --- a/drivers/infiniband/hw/qib/qib.h
32566 +++ b/drivers/infiniband/hw/qib/qib.h
32567 @@ -51,6 +51,7 @@
32568 #include <linux/completion.h>
32569 #include <linux/kref.h>
32570 #include <linux/sched.h>
32571 +#include <linux/slab.h>
32572
32573 #include "qib_common.h"
32574 #include "qib_verbs.h"
32575 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32576 index da739d9..da1c7f4 100644
32577 --- a/drivers/input/gameport/gameport.c
32578 +++ b/drivers/input/gameport/gameport.c
32579 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32580 */
32581 static void gameport_init_port(struct gameport *gameport)
32582 {
32583 - static atomic_t gameport_no = ATOMIC_INIT(0);
32584 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32585
32586 __module_get(THIS_MODULE);
32587
32588 mutex_init(&gameport->drv_mutex);
32589 device_initialize(&gameport->dev);
32590 dev_set_name(&gameport->dev, "gameport%lu",
32591 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32592 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32593 gameport->dev.bus = &gameport_bus;
32594 gameport->dev.release = gameport_release_port;
32595 if (gameport->parent)
32596 diff --git a/drivers/input/input.c b/drivers/input/input.c
32597 index 8921c61..f5cd63d 100644
32598 --- a/drivers/input/input.c
32599 +++ b/drivers/input/input.c
32600 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32601 */
32602 int input_register_device(struct input_dev *dev)
32603 {
32604 - static atomic_t input_no = ATOMIC_INIT(0);
32605 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32606 struct input_handler *handler;
32607 const char *path;
32608 int error;
32609 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32610 dev->setkeycode = input_default_setkeycode;
32611
32612 dev_set_name(&dev->dev, "input%ld",
32613 - (unsigned long) atomic_inc_return(&input_no) - 1);
32614 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32615
32616 error = device_add(&dev->dev);
32617 if (error)
32618 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32619 index b8d8611..7a4a04b 100644
32620 --- a/drivers/input/joystick/sidewinder.c
32621 +++ b/drivers/input/joystick/sidewinder.c
32622 @@ -30,6 +30,7 @@
32623 #include <linux/kernel.h>
32624 #include <linux/module.h>
32625 #include <linux/slab.h>
32626 +#include <linux/sched.h>
32627 #include <linux/init.h>
32628 #include <linux/input.h>
32629 #include <linux/gameport.h>
32630 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32631 index fd7a0d5..a4af10c 100644
32632 --- a/drivers/input/joystick/xpad.c
32633 +++ b/drivers/input/joystick/xpad.c
32634 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32635
32636 static int xpad_led_probe(struct usb_xpad *xpad)
32637 {
32638 - static atomic_t led_seq = ATOMIC_INIT(0);
32639 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32640 long led_no;
32641 struct xpad_led *led;
32642 struct led_classdev *led_cdev;
32643 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32644 if (!led)
32645 return -ENOMEM;
32646
32647 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32648 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32649
32650 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32651 led->xpad = xpad;
32652 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32653 index 0110b5a..d3ad144 100644
32654 --- a/drivers/input/mousedev.c
32655 +++ b/drivers/input/mousedev.c
32656 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32657
32658 spin_unlock_irq(&client->packet_lock);
32659
32660 - if (copy_to_user(buffer, data, count))
32661 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32662 return -EFAULT;
32663
32664 return count;
32665 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32666 index d0f7533..fb8215b 100644
32667 --- a/drivers/input/serio/serio.c
32668 +++ b/drivers/input/serio/serio.c
32669 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32670 */
32671 static void serio_init_port(struct serio *serio)
32672 {
32673 - static atomic_t serio_no = ATOMIC_INIT(0);
32674 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32675
32676 __module_get(THIS_MODULE);
32677
32678 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32679 mutex_init(&serio->drv_mutex);
32680 device_initialize(&serio->dev);
32681 dev_set_name(&serio->dev, "serio%ld",
32682 - (long)atomic_inc_return(&serio_no) - 1);
32683 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32684 serio->dev.bus = &serio_bus;
32685 serio->dev.release = serio_release_port;
32686 serio->dev.groups = serio_device_attr_groups;
32687 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32688 index b902794..fc7b85b 100644
32689 --- a/drivers/isdn/capi/capi.c
32690 +++ b/drivers/isdn/capi/capi.c
32691 @@ -83,8 +83,8 @@ struct capiminor {
32692
32693 struct capi20_appl *ap;
32694 u32 ncci;
32695 - atomic_t datahandle;
32696 - atomic_t msgid;
32697 + atomic_unchecked_t datahandle;
32698 + atomic_unchecked_t msgid;
32699
32700 struct tty_port port;
32701 int ttyinstop;
32702 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32703 capimsg_setu16(s, 2, mp->ap->applid);
32704 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32705 capimsg_setu8 (s, 5, CAPI_RESP);
32706 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32707 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32708 capimsg_setu32(s, 8, mp->ncci);
32709 capimsg_setu16(s, 12, datahandle);
32710 }
32711 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32712 mp->outbytes -= len;
32713 spin_unlock_bh(&mp->outlock);
32714
32715 - datahandle = atomic_inc_return(&mp->datahandle);
32716 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32717 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32718 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32719 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32720 capimsg_setu16(skb->data, 2, mp->ap->applid);
32721 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32722 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32723 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32724 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32725 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32726 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32727 capimsg_setu16(skb->data, 16, len); /* Data length */
32728 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32729 index 821f7ac..28d4030 100644
32730 --- a/drivers/isdn/hardware/avm/b1.c
32731 +++ b/drivers/isdn/hardware/avm/b1.c
32732 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
32733 }
32734 if (left) {
32735 if (t4file->user) {
32736 - if (copy_from_user(buf, dp, left))
32737 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32738 return -EFAULT;
32739 } else {
32740 memcpy(buf, dp, left);
32741 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
32742 }
32743 if (left) {
32744 if (config->user) {
32745 - if (copy_from_user(buf, dp, left))
32746 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32747 return -EFAULT;
32748 } else {
32749 memcpy(buf, dp, left);
32750 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32751 index dd6b53a..19d9ee6 100644
32752 --- a/drivers/isdn/hardware/eicon/divasync.h
32753 +++ b/drivers/isdn/hardware/eicon/divasync.h
32754 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32755 } diva_didd_add_adapter_t;
32756 typedef struct _diva_didd_remove_adapter {
32757 IDI_CALL p_request;
32758 -} diva_didd_remove_adapter_t;
32759 +} __no_const diva_didd_remove_adapter_t;
32760 typedef struct _diva_didd_read_adapter_array {
32761 void *buffer;
32762 dword length;
32763 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32764 index d303e65..28bcb7b 100644
32765 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32766 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32767 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32768 typedef struct _diva_os_idi_adapter_interface {
32769 diva_init_card_proc_t cleanup_adapter_proc;
32770 diva_cmd_card_proc_t cmd_proc;
32771 -} diva_os_idi_adapter_interface_t;
32772 +} __no_const diva_os_idi_adapter_interface_t;
32773
32774 typedef struct _diva_os_xdi_adapter {
32775 struct list_head link;
32776 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32777 index e74df7c..03a03ba 100644
32778 --- a/drivers/isdn/icn/icn.c
32779 +++ b/drivers/isdn/icn/icn.c
32780 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
32781 if (count > len)
32782 count = len;
32783 if (user) {
32784 - if (copy_from_user(msg, buf, count))
32785 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32786 return -EFAULT;
32787 } else
32788 memcpy(msg, buf, count);
32789 diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
32790 index 8bc4915..4cc6a2e 100644
32791 --- a/drivers/leds/leds-mc13783.c
32792 +++ b/drivers/leds/leds-mc13783.c
32793 @@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
32794 return -EINVAL;
32795 }
32796
32797 - led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
32798 + led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
32799 if (led == NULL) {
32800 dev_err(&pdev->dev, "failed to alloc memory\n");
32801 return -ENOMEM;
32802 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32803 index b5fdcb7..5b6c59f 100644
32804 --- a/drivers/lguest/core.c
32805 +++ b/drivers/lguest/core.c
32806 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32807 * it's worked so far. The end address needs +1 because __get_vm_area
32808 * allocates an extra guard page, so we need space for that.
32809 */
32810 +
32811 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32812 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32813 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32814 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32815 +#else
32816 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32817 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32818 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32819 +#endif
32820 +
32821 if (!switcher_vma) {
32822 err = -ENOMEM;
32823 printk("lguest: could not map switcher pages high\n");
32824 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32825 * Now the Switcher is mapped at the right address, we can't fail!
32826 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32827 */
32828 - memcpy(switcher_vma->addr, start_switcher_text,
32829 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32830 end_switcher_text - start_switcher_text);
32831
32832 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32833 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32834 index 3980903..ce25c5e 100644
32835 --- a/drivers/lguest/x86/core.c
32836 +++ b/drivers/lguest/x86/core.c
32837 @@ -59,7 +59,7 @@ static struct {
32838 /* Offset from where switcher.S was compiled to where we've copied it */
32839 static unsigned long switcher_offset(void)
32840 {
32841 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32842 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32843 }
32844
32845 /* This cpu's struct lguest_pages. */
32846 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32847 * These copies are pretty cheap, so we do them unconditionally: */
32848 /* Save the current Host top-level page directory.
32849 */
32850 +
32851 +#ifdef CONFIG_PAX_PER_CPU_PGD
32852 + pages->state.host_cr3 = read_cr3();
32853 +#else
32854 pages->state.host_cr3 = __pa(current->mm->pgd);
32855 +#endif
32856 +
32857 /*
32858 * Set up the Guest's page tables to see this CPU's pages (and no
32859 * other CPU's pages).
32860 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32861 * compiled-in switcher code and the high-mapped copy we just made.
32862 */
32863 for (i = 0; i < IDT_ENTRIES; i++)
32864 - default_idt_entries[i] += switcher_offset();
32865 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32866
32867 /*
32868 * Set up the Switcher's per-cpu areas.
32869 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32870 * it will be undisturbed when we switch. To change %cs and jump we
32871 * need this structure to feed to Intel's "lcall" instruction.
32872 */
32873 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32874 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32875 lguest_entry.segment = LGUEST_CS;
32876
32877 /*
32878 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32879 index 40634b0..4f5855e 100644
32880 --- a/drivers/lguest/x86/switcher_32.S
32881 +++ b/drivers/lguest/x86/switcher_32.S
32882 @@ -87,6 +87,7 @@
32883 #include <asm/page.h>
32884 #include <asm/segment.h>
32885 #include <asm/lguest.h>
32886 +#include <asm/processor-flags.h>
32887
32888 // We mark the start of the code to copy
32889 // It's placed in .text tho it's never run here
32890 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32891 // Changes type when we load it: damn Intel!
32892 // For after we switch over our page tables
32893 // That entry will be read-only: we'd crash.
32894 +
32895 +#ifdef CONFIG_PAX_KERNEXEC
32896 + mov %cr0, %edx
32897 + xor $X86_CR0_WP, %edx
32898 + mov %edx, %cr0
32899 +#endif
32900 +
32901 movl $(GDT_ENTRY_TSS*8), %edx
32902 ltr %dx
32903
32904 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32905 // Let's clear it again for our return.
32906 // The GDT descriptor of the Host
32907 // Points to the table after two "size" bytes
32908 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32909 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32910 // Clear "used" from type field (byte 5, bit 2)
32911 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32912 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32913 +
32914 +#ifdef CONFIG_PAX_KERNEXEC
32915 + mov %cr0, %eax
32916 + xor $X86_CR0_WP, %eax
32917 + mov %eax, %cr0
32918 +#endif
32919
32920 // Once our page table's switched, the Guest is live!
32921 // The Host fades as we run this final step.
32922 @@ -295,13 +309,12 @@ deliver_to_host:
32923 // I consulted gcc, and it gave
32924 // These instructions, which I gladly credit:
32925 leal (%edx,%ebx,8), %eax
32926 - movzwl (%eax),%edx
32927 - movl 4(%eax), %eax
32928 - xorw %ax, %ax
32929 - orl %eax, %edx
32930 + movl 4(%eax), %edx
32931 + movw (%eax), %dx
32932 // Now the address of the handler's in %edx
32933 // We call it now: its "iret" drops us home.
32934 - jmp *%edx
32935 + ljmp $__KERNEL_CS, $1f
32936 +1: jmp *%edx
32937
32938 // Every interrupt can come to us here
32939 // But we must truly tell each apart.
32940 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
32941 index 20e5c2c..9e849a9 100644
32942 --- a/drivers/macintosh/macio_asic.c
32943 +++ b/drivers/macintosh/macio_asic.c
32944 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
32945 * MacIO is matched against any Apple ID, it's probe() function
32946 * will then decide wether it applies or not
32947 */
32948 -static const struct pci_device_id __devinitdata pci_ids [] = { {
32949 +static const struct pci_device_id __devinitconst pci_ids [] = { {
32950 .vendor = PCI_VENDOR_ID_APPLE,
32951 .device = PCI_ANY_ID,
32952 .subvendor = PCI_ANY_ID,
32953 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
32954 index 17e2b47..bcbeec4 100644
32955 --- a/drivers/md/bitmap.c
32956 +++ b/drivers/md/bitmap.c
32957 @@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
32958 chunk_kb ? "KB" : "B");
32959 if (bitmap->file) {
32960 seq_printf(seq, ", file: ");
32961 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32962 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32963 }
32964
32965 seq_printf(seq, "\n");
32966 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
32967 index a1a3e6d..1918bfc 100644
32968 --- a/drivers/md/dm-ioctl.c
32969 +++ b/drivers/md/dm-ioctl.c
32970 @@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
32971 cmd == DM_LIST_VERSIONS_CMD)
32972 return 0;
32973
32974 - if ((cmd == DM_DEV_CREATE_CMD)) {
32975 + if (cmd == DM_DEV_CREATE_CMD) {
32976 if (!*param->name) {
32977 DMWARN("name not supplied when creating device");
32978 return -EINVAL;
32979 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
32980 index d039de8..0cf5b87 100644
32981 --- a/drivers/md/dm-raid1.c
32982 +++ b/drivers/md/dm-raid1.c
32983 @@ -40,7 +40,7 @@ enum dm_raid1_error {
32984
32985 struct mirror {
32986 struct mirror_set *ms;
32987 - atomic_t error_count;
32988 + atomic_unchecked_t error_count;
32989 unsigned long error_type;
32990 struct dm_dev *dev;
32991 sector_t offset;
32992 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
32993 struct mirror *m;
32994
32995 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
32996 - if (!atomic_read(&m->error_count))
32997 + if (!atomic_read_unchecked(&m->error_count))
32998 return m;
32999
33000 return NULL;
33001 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33002 * simple way to tell if a device has encountered
33003 * errors.
33004 */
33005 - atomic_inc(&m->error_count);
33006 + atomic_inc_unchecked(&m->error_count);
33007
33008 if (test_and_set_bit(error_type, &m->error_type))
33009 return;
33010 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33011 struct mirror *m = get_default_mirror(ms);
33012
33013 do {
33014 - if (likely(!atomic_read(&m->error_count)))
33015 + if (likely(!atomic_read_unchecked(&m->error_count)))
33016 return m;
33017
33018 if (m-- == ms->mirror)
33019 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33020 {
33021 struct mirror *default_mirror = get_default_mirror(m->ms);
33022
33023 - return !atomic_read(&default_mirror->error_count);
33024 + return !atomic_read_unchecked(&default_mirror->error_count);
33025 }
33026
33027 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33028 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33029 */
33030 if (likely(region_in_sync(ms, region, 1)))
33031 m = choose_mirror(ms, bio->bi_sector);
33032 - else if (m && atomic_read(&m->error_count))
33033 + else if (m && atomic_read_unchecked(&m->error_count))
33034 m = NULL;
33035
33036 if (likely(m))
33037 @@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33038 }
33039
33040 ms->mirror[mirror].ms = ms;
33041 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33042 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33043 ms->mirror[mirror].error_type = 0;
33044 ms->mirror[mirror].offset = offset;
33045
33046 @@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33047 */
33048 static char device_status_char(struct mirror *m)
33049 {
33050 - if (!atomic_read(&(m->error_count)))
33051 + if (!atomic_read_unchecked(&(m->error_count)))
33052 return 'A';
33053
33054 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33055 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33056 index 35c94ff..20d4c17 100644
33057 --- a/drivers/md/dm-stripe.c
33058 +++ b/drivers/md/dm-stripe.c
33059 @@ -20,7 +20,7 @@ struct stripe {
33060 struct dm_dev *dev;
33061 sector_t physical_start;
33062
33063 - atomic_t error_count;
33064 + atomic_unchecked_t error_count;
33065 };
33066
33067 struct stripe_c {
33068 @@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33069 kfree(sc);
33070 return r;
33071 }
33072 - atomic_set(&(sc->stripe[i].error_count), 0);
33073 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33074 }
33075
33076 ti->private = sc;
33077 @@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33078 DMEMIT("%d ", sc->stripes);
33079 for (i = 0; i < sc->stripes; i++) {
33080 DMEMIT("%s ", sc->stripe[i].dev->name);
33081 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33082 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33083 'D' : 'A';
33084 }
33085 buffer[i] = '\0';
33086 @@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33087 */
33088 for (i = 0; i < sc->stripes; i++)
33089 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33090 - atomic_inc(&(sc->stripe[i].error_count));
33091 - if (atomic_read(&(sc->stripe[i].error_count)) <
33092 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33093 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33094 DM_IO_ERROR_THRESHOLD)
33095 schedule_work(&sc->trigger_event);
33096 }
33097 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33098 index 2e227fb..44ead1f 100644
33099 --- a/drivers/md/dm-table.c
33100 +++ b/drivers/md/dm-table.c
33101 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33102 if (!dev_size)
33103 return 0;
33104
33105 - if ((start >= dev_size) || (start + len > dev_size)) {
33106 + if ((start >= dev_size) || (len > dev_size - start)) {
33107 DMWARN("%s: %s too small for target: "
33108 "start=%llu, len=%llu, dev_size=%llu",
33109 dm_device_name(ti->table->md), bdevname(bdev, b),
33110 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33111 index 737d388..811ad5a 100644
33112 --- a/drivers/md/dm-thin-metadata.c
33113 +++ b/drivers/md/dm-thin-metadata.c
33114 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33115
33116 pmd->info.tm = tm;
33117 pmd->info.levels = 2;
33118 - pmd->info.value_type.context = pmd->data_sm;
33119 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33120 pmd->info.value_type.size = sizeof(__le64);
33121 pmd->info.value_type.inc = data_block_inc;
33122 pmd->info.value_type.dec = data_block_dec;
33123 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33124
33125 pmd->bl_info.tm = tm;
33126 pmd->bl_info.levels = 1;
33127 - pmd->bl_info.value_type.context = pmd->data_sm;
33128 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33129 pmd->bl_info.value_type.size = sizeof(__le64);
33130 pmd->bl_info.value_type.inc = data_block_inc;
33131 pmd->bl_info.value_type.dec = data_block_dec;
33132 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33133 index e24143c..ce2f21a1 100644
33134 --- a/drivers/md/dm.c
33135 +++ b/drivers/md/dm.c
33136 @@ -176,9 +176,9 @@ struct mapped_device {
33137 /*
33138 * Event handling.
33139 */
33140 - atomic_t event_nr;
33141 + atomic_unchecked_t event_nr;
33142 wait_queue_head_t eventq;
33143 - atomic_t uevent_seq;
33144 + atomic_unchecked_t uevent_seq;
33145 struct list_head uevent_list;
33146 spinlock_t uevent_lock; /* Protect access to uevent_list */
33147
33148 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33149 rwlock_init(&md->map_lock);
33150 atomic_set(&md->holders, 1);
33151 atomic_set(&md->open_count, 0);
33152 - atomic_set(&md->event_nr, 0);
33153 - atomic_set(&md->uevent_seq, 0);
33154 + atomic_set_unchecked(&md->event_nr, 0);
33155 + atomic_set_unchecked(&md->uevent_seq, 0);
33156 INIT_LIST_HEAD(&md->uevent_list);
33157 spin_lock_init(&md->uevent_lock);
33158
33159 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33160
33161 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33162
33163 - atomic_inc(&md->event_nr);
33164 + atomic_inc_unchecked(&md->event_nr);
33165 wake_up(&md->eventq);
33166 }
33167
33168 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33169
33170 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33171 {
33172 - return atomic_add_return(1, &md->uevent_seq);
33173 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33174 }
33175
33176 uint32_t dm_get_event_nr(struct mapped_device *md)
33177 {
33178 - return atomic_read(&md->event_nr);
33179 + return atomic_read_unchecked(&md->event_nr);
33180 }
33181
33182 int dm_wait_event(struct mapped_device *md, int event_nr)
33183 {
33184 return wait_event_interruptible(md->eventq,
33185 - (event_nr != atomic_read(&md->event_nr)));
33186 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33187 }
33188
33189 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33190 diff --git a/drivers/md/md.c b/drivers/md/md.c
33191 index 2b30ffd..bf789ce 100644
33192 --- a/drivers/md/md.c
33193 +++ b/drivers/md/md.c
33194 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33195 * start build, activate spare
33196 */
33197 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33198 -static atomic_t md_event_count;
33199 +static atomic_unchecked_t md_event_count;
33200 void md_new_event(struct mddev *mddev)
33201 {
33202 - atomic_inc(&md_event_count);
33203 + atomic_inc_unchecked(&md_event_count);
33204 wake_up(&md_event_waiters);
33205 }
33206 EXPORT_SYMBOL_GPL(md_new_event);
33207 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33208 */
33209 static void md_new_event_inintr(struct mddev *mddev)
33210 {
33211 - atomic_inc(&md_event_count);
33212 + atomic_inc_unchecked(&md_event_count);
33213 wake_up(&md_event_waiters);
33214 }
33215
33216 @@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33217
33218 rdev->preferred_minor = 0xffff;
33219 rdev->data_offset = le64_to_cpu(sb->data_offset);
33220 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33221 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33222
33223 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33224 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33225 @@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33226 else
33227 sb->resync_offset = cpu_to_le64(0);
33228
33229 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33230 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33231
33232 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33233 sb->size = cpu_to_le64(mddev->dev_sectors);
33234 @@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33235 static ssize_t
33236 errors_show(struct md_rdev *rdev, char *page)
33237 {
33238 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33239 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33240 }
33241
33242 static ssize_t
33243 @@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33244 char *e;
33245 unsigned long n = simple_strtoul(buf, &e, 10);
33246 if (*buf && (*e == 0 || *e == '\n')) {
33247 - atomic_set(&rdev->corrected_errors, n);
33248 + atomic_set_unchecked(&rdev->corrected_errors, n);
33249 return len;
33250 }
33251 return -EINVAL;
33252 @@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
33253 rdev->sb_loaded = 0;
33254 rdev->bb_page = NULL;
33255 atomic_set(&rdev->nr_pending, 0);
33256 - atomic_set(&rdev->read_errors, 0);
33257 - atomic_set(&rdev->corrected_errors, 0);
33258 + atomic_set_unchecked(&rdev->read_errors, 0);
33259 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33260
33261 INIT_LIST_HEAD(&rdev->same_set);
33262 init_waitqueue_head(&rdev->blocked_wait);
33263 @@ -6738,7 +6738,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33264
33265 spin_unlock(&pers_lock);
33266 seq_printf(seq, "\n");
33267 - seq->poll_event = atomic_read(&md_event_count);
33268 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33269 return 0;
33270 }
33271 if (v == (void*)2) {
33272 @@ -6841,7 +6841,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33273 return error;
33274
33275 seq = file->private_data;
33276 - seq->poll_event = atomic_read(&md_event_count);
33277 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33278 return error;
33279 }
33280
33281 @@ -6855,7 +6855,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33282 /* always allow read */
33283 mask = POLLIN | POLLRDNORM;
33284
33285 - if (seq->poll_event != atomic_read(&md_event_count))
33286 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33287 mask |= POLLERR | POLLPRI;
33288 return mask;
33289 }
33290 @@ -6899,7 +6899,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33291 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33292 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33293 (int)part_stat_read(&disk->part0, sectors[1]) -
33294 - atomic_read(&disk->sync_io);
33295 + atomic_read_unchecked(&disk->sync_io);
33296 /* sync IO will cause sync_io to increase before the disk_stats
33297 * as sync_io is counted when a request starts, and
33298 * disk_stats is counted when it completes.
33299 diff --git a/drivers/md/md.h b/drivers/md/md.h
33300 index 1c2063c..9639970 100644
33301 --- a/drivers/md/md.h
33302 +++ b/drivers/md/md.h
33303 @@ -93,13 +93,13 @@ struct md_rdev {
33304 * only maintained for arrays that
33305 * support hot removal
33306 */
33307 - atomic_t read_errors; /* number of consecutive read errors that
33308 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33309 * we have tried to ignore.
33310 */
33311 struct timespec last_read_error; /* monotonic time since our
33312 * last read error
33313 */
33314 - atomic_t corrected_errors; /* number of corrected read errors,
33315 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33316 * for reporting to userspace and storing
33317 * in superblock.
33318 */
33319 @@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33320
33321 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33322 {
33323 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33324 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33325 }
33326
33327 struct md_personality
33328 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33329 index 50ed53b..4f29d7d 100644
33330 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33331 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33332 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
33333 /*----------------------------------------------------------------*/
33334
33335 struct sm_checker {
33336 - struct dm_space_map sm;
33337 + dm_space_map_no_const sm;
33338
33339 struct count_array old_counts;
33340 struct count_array counts;
33341 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33342 index fc469ba..2d91555 100644
33343 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33344 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33345 @@ -23,7 +23,7 @@
33346 * Space map interface.
33347 */
33348 struct sm_disk {
33349 - struct dm_space_map sm;
33350 + dm_space_map_no_const sm;
33351
33352 struct ll_disk ll;
33353 struct ll_disk old_ll;
33354 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33355 index e89ae5e..062e4c2 100644
33356 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33357 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33358 @@ -43,7 +43,7 @@ struct block_op {
33359 };
33360
33361 struct sm_metadata {
33362 - struct dm_space_map sm;
33363 + dm_space_map_no_const sm;
33364
33365 struct ll_disk ll;
33366 struct ll_disk old_ll;
33367 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33368 index 1cbfc6b..56e1dbb 100644
33369 --- a/drivers/md/persistent-data/dm-space-map.h
33370 +++ b/drivers/md/persistent-data/dm-space-map.h
33371 @@ -60,6 +60,7 @@ struct dm_space_map {
33372 int (*root_size)(struct dm_space_map *sm, size_t *result);
33373 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33374 };
33375 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33376
33377 /*----------------------------------------------------------------*/
33378
33379 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33380 index 15dd59b..662bb39 100644
33381 --- a/drivers/md/raid1.c
33382 +++ b/drivers/md/raid1.c
33383 @@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33384 if (r1_sync_page_io(rdev, sect, s,
33385 bio->bi_io_vec[idx].bv_page,
33386 READ) != 0)
33387 - atomic_add(s, &rdev->corrected_errors);
33388 + atomic_add_unchecked(s, &rdev->corrected_errors);
33389 }
33390 sectors -= s;
33391 sect += s;
33392 @@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33393 test_bit(In_sync, &rdev->flags)) {
33394 if (r1_sync_page_io(rdev, sect, s,
33395 conf->tmppage, READ)) {
33396 - atomic_add(s, &rdev->corrected_errors);
33397 + atomic_add_unchecked(s, &rdev->corrected_errors);
33398 printk(KERN_INFO
33399 "md/raid1:%s: read error corrected "
33400 "(%d sectors at %llu on %s)\n",
33401 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33402 index 3f91c2e..e1f37bd 100644
33403 --- a/drivers/md/raid10.c
33404 +++ b/drivers/md/raid10.c
33405 @@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bio, int error)
33406 /* The write handler will notice the lack of
33407 * R10BIO_Uptodate and record any errors etc
33408 */
33409 - atomic_add(r10_bio->sectors,
33410 + atomic_add_unchecked(r10_bio->sectors,
33411 &conf->mirrors[d].rdev->corrected_errors);
33412
33413 /* for reconstruct, we always reschedule after a read.
33414 @@ -2033,7 +2033,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33415 {
33416 struct timespec cur_time_mon;
33417 unsigned long hours_since_last;
33418 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33419 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33420
33421 ktime_get_ts(&cur_time_mon);
33422
33423 @@ -2055,9 +2055,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33424 * overflowing the shift of read_errors by hours_since_last.
33425 */
33426 if (hours_since_last >= 8 * sizeof(read_errors))
33427 - atomic_set(&rdev->read_errors, 0);
33428 + atomic_set_unchecked(&rdev->read_errors, 0);
33429 else
33430 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33431 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33432 }
33433
33434 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33435 @@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33436 return;
33437
33438 check_decay_read_errors(mddev, rdev);
33439 - atomic_inc(&rdev->read_errors);
33440 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33441 + atomic_inc_unchecked(&rdev->read_errors);
33442 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33443 char b[BDEVNAME_SIZE];
33444 bdevname(rdev->bdev, b);
33445
33446 @@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33447 "md/raid10:%s: %s: Raid device exceeded "
33448 "read_error threshold [cur %d:max %d]\n",
33449 mdname(mddev), b,
33450 - atomic_read(&rdev->read_errors), max_read_errors);
33451 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33452 printk(KERN_NOTICE
33453 "md/raid10:%s: %s: Failing raid device\n",
33454 mdname(mddev), b);
33455 @@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33456 (unsigned long long)(
33457 sect + rdev->data_offset),
33458 bdevname(rdev->bdev, b));
33459 - atomic_add(s, &rdev->corrected_errors);
33460 + atomic_add_unchecked(s, &rdev->corrected_errors);
33461 }
33462
33463 rdev_dec_pending(rdev, mddev);
33464 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33465 index f351422..85c01bb 100644
33466 --- a/drivers/md/raid5.c
33467 +++ b/drivers/md/raid5.c
33468 @@ -1686,18 +1686,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33469 (unsigned long long)(sh->sector
33470 + rdev->data_offset),
33471 bdevname(rdev->bdev, b));
33472 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33473 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33474 clear_bit(R5_ReadError, &sh->dev[i].flags);
33475 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33476 }
33477 - if (atomic_read(&rdev->read_errors))
33478 - atomic_set(&rdev->read_errors, 0);
33479 + if (atomic_read_unchecked(&rdev->read_errors))
33480 + atomic_set_unchecked(&rdev->read_errors, 0);
33481 } else {
33482 const char *bdn = bdevname(rdev->bdev, b);
33483 int retry = 0;
33484
33485 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33486 - atomic_inc(&rdev->read_errors);
33487 + atomic_inc_unchecked(&rdev->read_errors);
33488 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33489 printk_ratelimited(
33490 KERN_WARNING
33491 @@ -1726,7 +1726,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33492 (unsigned long long)(sh->sector
33493 + rdev->data_offset),
33494 bdn);
33495 - else if (atomic_read(&rdev->read_errors)
33496 + else if (atomic_read_unchecked(&rdev->read_errors)
33497 > conf->max_nr_stripes)
33498 printk(KERN_WARNING
33499 "md/raid:%s: Too many read errors, failing device %s.\n",
33500 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33501 index d88c4aa..17c80b1 100644
33502 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33503 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33504 @@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
33505 .subvendor = _subvend, .subdevice = _subdev, \
33506 .driver_data = (unsigned long)&_driverdata }
33507
33508 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33509 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33510 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33511 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33512 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33513 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33514 index a7d876f..8c21b61 100644
33515 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33516 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33517 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33518 union {
33519 dmx_ts_cb ts;
33520 dmx_section_cb sec;
33521 - } cb;
33522 + } __no_const cb;
33523
33524 struct dvb_demux *demux;
33525 void *priv;
33526 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33527 index 00a6732..70a682e 100644
33528 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33529 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33530 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33531 const struct dvb_device *template, void *priv, int type)
33532 {
33533 struct dvb_device *dvbdev;
33534 - struct file_operations *dvbdevfops;
33535 + file_operations_no_const *dvbdevfops;
33536 struct device *clsdev;
33537 int minor;
33538 int id;
33539 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33540 index 3940bb0..fb3952a 100644
33541 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33542 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33543 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33544
33545 struct dib0700_adapter_state {
33546 int (*set_param_save) (struct dvb_frontend *);
33547 -};
33548 +} __no_const;
33549
33550 static int dib7070_set_param_override(struct dvb_frontend *fe)
33551 {
33552 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33553 index 451c5a7..649f711 100644
33554 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33555 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33556 @@ -95,7 +95,7 @@ struct su3000_state {
33557
33558 struct s6x0_state {
33559 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33560 -};
33561 +} __no_const;
33562
33563 /* debug */
33564 static int dvb_usb_dw2102_debug;
33565 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33566 index 404f63a..4796533 100644
33567 --- a/drivers/media/dvb/frontends/dib3000.h
33568 +++ b/drivers/media/dvb/frontends/dib3000.h
33569 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33570 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33571 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33572 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33573 -};
33574 +} __no_const;
33575
33576 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33577 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33578 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33579 index 7539a5d..06531a6 100644
33580 --- a/drivers/media/dvb/ngene/ngene-cards.c
33581 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33582 @@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33583
33584 /****************************************************************************/
33585
33586 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33587 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33588 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33589 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33590 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33591 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33592 index 16a089f..1661b11 100644
33593 --- a/drivers/media/radio/radio-cadet.c
33594 +++ b/drivers/media/radio/radio-cadet.c
33595 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33596 unsigned char readbuf[RDS_BUFFER];
33597 int i = 0;
33598
33599 + if (count > RDS_BUFFER)
33600 + return -EFAULT;
33601 mutex_lock(&dev->lock);
33602 if (dev->rdsstat == 0) {
33603 dev->rdsstat = 1;
33604 @@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33605 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33606 mutex_unlock(&dev->lock);
33607
33608 - if (copy_to_user(data, readbuf, i))
33609 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33610 return -EFAULT;
33611 return i;
33612 }
33613 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33614 index 9cde353..8c6a1c3 100644
33615 --- a/drivers/media/video/au0828/au0828.h
33616 +++ b/drivers/media/video/au0828/au0828.h
33617 @@ -191,7 +191,7 @@ struct au0828_dev {
33618
33619 /* I2C */
33620 struct i2c_adapter i2c_adap;
33621 - struct i2c_algorithm i2c_algo;
33622 + i2c_algorithm_no_const i2c_algo;
33623 struct i2c_client i2c_client;
33624 u32 i2c_rc;
33625
33626 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33627 index 04bf662..e0ac026 100644
33628 --- a/drivers/media/video/cx88/cx88-alsa.c
33629 +++ b/drivers/media/video/cx88/cx88-alsa.c
33630 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33631 * Only boards with eeprom and byte 1 at eeprom=1 have it
33632 */
33633
33634 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33635 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33636 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33637 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33638 {0, }
33639 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33640 index 88cf9d9..bbc4b2c 100644
33641 --- a/drivers/media/video/omap/omap_vout.c
33642 +++ b/drivers/media/video/omap/omap_vout.c
33643 @@ -64,7 +64,6 @@ enum omap_vout_channels {
33644 OMAP_VIDEO2,
33645 };
33646
33647 -static struct videobuf_queue_ops video_vbq_ops;
33648 /* Variables configurable through module params*/
33649 static u32 video1_numbuffers = 3;
33650 static u32 video2_numbuffers = 3;
33651 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33652 {
33653 struct videobuf_queue *q;
33654 struct omap_vout_device *vout = NULL;
33655 + static struct videobuf_queue_ops video_vbq_ops = {
33656 + .buf_setup = omap_vout_buffer_setup,
33657 + .buf_prepare = omap_vout_buffer_prepare,
33658 + .buf_release = omap_vout_buffer_release,
33659 + .buf_queue = omap_vout_buffer_queue,
33660 + };
33661
33662 vout = video_drvdata(file);
33663 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33664 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33665 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33666
33667 q = &vout->vbq;
33668 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33669 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33670 - video_vbq_ops.buf_release = omap_vout_buffer_release;
33671 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33672 spin_lock_init(&vout->vbq_lock);
33673
33674 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33675 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33676 index 305e6aa..0143317 100644
33677 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33678 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33679 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33680
33681 /* I2C stuff */
33682 struct i2c_adapter i2c_adap;
33683 - struct i2c_algorithm i2c_algo;
33684 + i2c_algorithm_no_const i2c_algo;
33685 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33686 int i2c_cx25840_hack_state;
33687 int i2c_linked;
33688 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33689 index 02194c0..091733b 100644
33690 --- a/drivers/media/video/timblogiw.c
33691 +++ b/drivers/media/video/timblogiw.c
33692 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33693
33694 /* Platform device functions */
33695
33696 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33697 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33698 .vidioc_querycap = timblogiw_querycap,
33699 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33700 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33701 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33702 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33703 };
33704
33705 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33706 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33707 .owner = THIS_MODULE,
33708 .open = timblogiw_open,
33709 .release = timblogiw_close,
33710 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33711 index a5c591f..db692a3 100644
33712 --- a/drivers/message/fusion/mptbase.c
33713 +++ b/drivers/message/fusion/mptbase.c
33714 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33715 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33716 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33717
33718 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33719 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33720 +#else
33721 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33722 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33723 +#endif
33724 +
33725 /*
33726 * Rounding UP to nearest 4-kB boundary here...
33727 */
33728 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33729 index 551262e..7551198 100644
33730 --- a/drivers/message/fusion/mptsas.c
33731 +++ b/drivers/message/fusion/mptsas.c
33732 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33733 return 0;
33734 }
33735
33736 +static inline void
33737 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33738 +{
33739 + if (phy_info->port_details) {
33740 + phy_info->port_details->rphy = rphy;
33741 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33742 + ioc->name, rphy));
33743 + }
33744 +
33745 + if (rphy) {
33746 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33747 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33748 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33749 + ioc->name, rphy, rphy->dev.release));
33750 + }
33751 +}
33752 +
33753 /* no mutex */
33754 static void
33755 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33756 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33757 return NULL;
33758 }
33759
33760 -static inline void
33761 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33762 -{
33763 - if (phy_info->port_details) {
33764 - phy_info->port_details->rphy = rphy;
33765 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33766 - ioc->name, rphy));
33767 - }
33768 -
33769 - if (rphy) {
33770 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33771 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33772 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33773 - ioc->name, rphy, rphy->dev.release));
33774 - }
33775 -}
33776 -
33777 static inline struct sas_port *
33778 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33779 {
33780 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33781 index 0c3ced7..1fe34ec 100644
33782 --- a/drivers/message/fusion/mptscsih.c
33783 +++ b/drivers/message/fusion/mptscsih.c
33784 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33785
33786 h = shost_priv(SChost);
33787
33788 - if (h) {
33789 - if (h->info_kbuf == NULL)
33790 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33791 - return h->info_kbuf;
33792 - h->info_kbuf[0] = '\0';
33793 + if (!h)
33794 + return NULL;
33795
33796 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33797 - h->info_kbuf[size-1] = '\0';
33798 - }
33799 + if (h->info_kbuf == NULL)
33800 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33801 + return h->info_kbuf;
33802 + h->info_kbuf[0] = '\0';
33803 +
33804 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33805 + h->info_kbuf[size-1] = '\0';
33806
33807 return h->info_kbuf;
33808 }
33809 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33810 index 6d115c7..58ff7fd 100644
33811 --- a/drivers/message/i2o/i2o_proc.c
33812 +++ b/drivers/message/i2o/i2o_proc.c
33813 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33814 "Array Controller Device"
33815 };
33816
33817 -static char *chtostr(u8 * chars, int n)
33818 -{
33819 - char tmp[256];
33820 - tmp[0] = 0;
33821 - return strncat(tmp, (char *)chars, n);
33822 -}
33823 -
33824 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33825 char *group)
33826 {
33827 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33828
33829 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33830 seq_printf(seq, "%-#8x", ddm_table.module_id);
33831 - seq_printf(seq, "%-29s",
33832 - chtostr(ddm_table.module_name_version, 28));
33833 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33834 seq_printf(seq, "%9d ", ddm_table.data_size);
33835 seq_printf(seq, "%8d", ddm_table.code_size);
33836
33837 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33838
33839 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33840 seq_printf(seq, "%-#8x", dst->module_id);
33841 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33842 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33843 + seq_printf(seq, "%-.28s", dst->module_name_version);
33844 + seq_printf(seq, "%-.8s", dst->date);
33845 seq_printf(seq, "%8d ", dst->module_size);
33846 seq_printf(seq, "%8d ", dst->mpb_size);
33847 seq_printf(seq, "0x%04x", dst->module_flags);
33848 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33849 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33850 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33851 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33852 - seq_printf(seq, "Vendor info : %s\n",
33853 - chtostr((u8 *) (work32 + 2), 16));
33854 - seq_printf(seq, "Product info : %s\n",
33855 - chtostr((u8 *) (work32 + 6), 16));
33856 - seq_printf(seq, "Description : %s\n",
33857 - chtostr((u8 *) (work32 + 10), 16));
33858 - seq_printf(seq, "Product rev. : %s\n",
33859 - chtostr((u8 *) (work32 + 14), 8));
33860 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33861 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33862 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33863 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33864
33865 seq_printf(seq, "Serial number : ");
33866 print_serial_number(seq, (u8 *) (work32 + 16),
33867 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33868 }
33869
33870 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33871 - seq_printf(seq, "Module name : %s\n",
33872 - chtostr(result.module_name, 24));
33873 - seq_printf(seq, "Module revision : %s\n",
33874 - chtostr(result.module_rev, 8));
33875 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
33876 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33877
33878 seq_printf(seq, "Serial number : ");
33879 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33880 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33881 return 0;
33882 }
33883
33884 - seq_printf(seq, "Device name : %s\n",
33885 - chtostr(result.device_name, 64));
33886 - seq_printf(seq, "Service name : %s\n",
33887 - chtostr(result.service_name, 64));
33888 - seq_printf(seq, "Physical name : %s\n",
33889 - chtostr(result.physical_location, 64));
33890 - seq_printf(seq, "Instance number : %s\n",
33891 - chtostr(result.instance_number, 4));
33892 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
33893 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
33894 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33895 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33896
33897 return 0;
33898 }
33899 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33900 index a8c08f3..155fe3d 100644
33901 --- a/drivers/message/i2o/iop.c
33902 +++ b/drivers/message/i2o/iop.c
33903 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33904
33905 spin_lock_irqsave(&c->context_list_lock, flags);
33906
33907 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33908 - atomic_inc(&c->context_list_counter);
33909 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33910 + atomic_inc_unchecked(&c->context_list_counter);
33911
33912 - entry->context = atomic_read(&c->context_list_counter);
33913 + entry->context = atomic_read_unchecked(&c->context_list_counter);
33914
33915 list_add(&entry->list, &c->context_list);
33916
33917 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
33918
33919 #if BITS_PER_LONG == 64
33920 spin_lock_init(&c->context_list_lock);
33921 - atomic_set(&c->context_list_counter, 0);
33922 + atomic_set_unchecked(&c->context_list_counter, 0);
33923 INIT_LIST_HEAD(&c->context_list);
33924 #endif
33925
33926 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
33927 index 7ce65f4..e66e9bc 100644
33928 --- a/drivers/mfd/abx500-core.c
33929 +++ b/drivers/mfd/abx500-core.c
33930 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
33931
33932 struct abx500_device_entry {
33933 struct list_head list;
33934 - struct abx500_ops ops;
33935 + abx500_ops_no_const ops;
33936 struct device *dev;
33937 };
33938
33939 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
33940 index a9223ed..4127b13 100644
33941 --- a/drivers/mfd/janz-cmodio.c
33942 +++ b/drivers/mfd/janz-cmodio.c
33943 @@ -13,6 +13,7 @@
33944
33945 #include <linux/kernel.h>
33946 #include <linux/module.h>
33947 +#include <linux/slab.h>
33948 #include <linux/init.h>
33949 #include <linux/pci.h>
33950 #include <linux/interrupt.h>
33951 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
33952 index a981e2a..5ca0c8b 100644
33953 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
33954 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
33955 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
33956 * the lid is closed. This leads to interrupts as soon as a little move
33957 * is done.
33958 */
33959 - atomic_inc(&lis3->count);
33960 + atomic_inc_unchecked(&lis3->count);
33961
33962 wake_up_interruptible(&lis3->misc_wait);
33963 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
33964 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33965 if (lis3->pm_dev)
33966 pm_runtime_get_sync(lis3->pm_dev);
33967
33968 - atomic_set(&lis3->count, 0);
33969 + atomic_set_unchecked(&lis3->count, 0);
33970 return 0;
33971 }
33972
33973 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33974 add_wait_queue(&lis3->misc_wait, &wait);
33975 while (true) {
33976 set_current_state(TASK_INTERRUPTIBLE);
33977 - data = atomic_xchg(&lis3->count, 0);
33978 + data = atomic_xchg_unchecked(&lis3->count, 0);
33979 if (data)
33980 break;
33981
33982 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33983 struct lis3lv02d, miscdev);
33984
33985 poll_wait(file, &lis3->misc_wait, wait);
33986 - if (atomic_read(&lis3->count))
33987 + if (atomic_read_unchecked(&lis3->count))
33988 return POLLIN | POLLRDNORM;
33989 return 0;
33990 }
33991 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
33992 index 2b1482a..5d33616 100644
33993 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
33994 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
33995 @@ -266,7 +266,7 @@ struct lis3lv02d {
33996 struct input_polled_dev *idev; /* input device */
33997 struct platform_device *pdev; /* platform device */
33998 struct regulator_bulk_data regulators[2];
33999 - atomic_t count; /* interrupt count after last read */
34000 + atomic_unchecked_t count; /* interrupt count after last read */
34001 union axis_conversion ac; /* hw -> logical axis */
34002 int mapped_btns[3];
34003
34004 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34005 index 2f30bad..c4c13d0 100644
34006 --- a/drivers/misc/sgi-gru/gruhandles.c
34007 +++ b/drivers/misc/sgi-gru/gruhandles.c
34008 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34009 unsigned long nsec;
34010
34011 nsec = CLKS2NSEC(clks);
34012 - atomic_long_inc(&mcs_op_statistics[op].count);
34013 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34014 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34015 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34016 if (mcs_op_statistics[op].max < nsec)
34017 mcs_op_statistics[op].max = nsec;
34018 }
34019 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34020 index 950dbe9..eeef0f8 100644
34021 --- a/drivers/misc/sgi-gru/gruprocfs.c
34022 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34023 @@ -32,9 +32,9 @@
34024
34025 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34026
34027 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34028 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34029 {
34030 - unsigned long val = atomic_long_read(v);
34031 + unsigned long val = atomic_long_read_unchecked(v);
34032
34033 seq_printf(s, "%16lu %s\n", val, id);
34034 }
34035 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34036
34037 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34038 for (op = 0; op < mcsop_last; op++) {
34039 - count = atomic_long_read(&mcs_op_statistics[op].count);
34040 - total = atomic_long_read(&mcs_op_statistics[op].total);
34041 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34042 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34043 max = mcs_op_statistics[op].max;
34044 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34045 count ? total / count : 0, max);
34046 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34047 index 5c3ce24..4915ccb 100644
34048 --- a/drivers/misc/sgi-gru/grutables.h
34049 +++ b/drivers/misc/sgi-gru/grutables.h
34050 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34051 * GRU statistics.
34052 */
34053 struct gru_stats_s {
34054 - atomic_long_t vdata_alloc;
34055 - atomic_long_t vdata_free;
34056 - atomic_long_t gts_alloc;
34057 - atomic_long_t gts_free;
34058 - atomic_long_t gms_alloc;
34059 - atomic_long_t gms_free;
34060 - atomic_long_t gts_double_allocate;
34061 - atomic_long_t assign_context;
34062 - atomic_long_t assign_context_failed;
34063 - atomic_long_t free_context;
34064 - atomic_long_t load_user_context;
34065 - atomic_long_t load_kernel_context;
34066 - atomic_long_t lock_kernel_context;
34067 - atomic_long_t unlock_kernel_context;
34068 - atomic_long_t steal_user_context;
34069 - atomic_long_t steal_kernel_context;
34070 - atomic_long_t steal_context_failed;
34071 - atomic_long_t nopfn;
34072 - atomic_long_t asid_new;
34073 - atomic_long_t asid_next;
34074 - atomic_long_t asid_wrap;
34075 - atomic_long_t asid_reuse;
34076 - atomic_long_t intr;
34077 - atomic_long_t intr_cbr;
34078 - atomic_long_t intr_tfh;
34079 - atomic_long_t intr_spurious;
34080 - atomic_long_t intr_mm_lock_failed;
34081 - atomic_long_t call_os;
34082 - atomic_long_t call_os_wait_queue;
34083 - atomic_long_t user_flush_tlb;
34084 - atomic_long_t user_unload_context;
34085 - atomic_long_t user_exception;
34086 - atomic_long_t set_context_option;
34087 - atomic_long_t check_context_retarget_intr;
34088 - atomic_long_t check_context_unload;
34089 - atomic_long_t tlb_dropin;
34090 - atomic_long_t tlb_preload_page;
34091 - atomic_long_t tlb_dropin_fail_no_asid;
34092 - atomic_long_t tlb_dropin_fail_upm;
34093 - atomic_long_t tlb_dropin_fail_invalid;
34094 - atomic_long_t tlb_dropin_fail_range_active;
34095 - atomic_long_t tlb_dropin_fail_idle;
34096 - atomic_long_t tlb_dropin_fail_fmm;
34097 - atomic_long_t tlb_dropin_fail_no_exception;
34098 - atomic_long_t tfh_stale_on_fault;
34099 - atomic_long_t mmu_invalidate_range;
34100 - atomic_long_t mmu_invalidate_page;
34101 - atomic_long_t flush_tlb;
34102 - atomic_long_t flush_tlb_gru;
34103 - atomic_long_t flush_tlb_gru_tgh;
34104 - atomic_long_t flush_tlb_gru_zero_asid;
34105 + atomic_long_unchecked_t vdata_alloc;
34106 + atomic_long_unchecked_t vdata_free;
34107 + atomic_long_unchecked_t gts_alloc;
34108 + atomic_long_unchecked_t gts_free;
34109 + atomic_long_unchecked_t gms_alloc;
34110 + atomic_long_unchecked_t gms_free;
34111 + atomic_long_unchecked_t gts_double_allocate;
34112 + atomic_long_unchecked_t assign_context;
34113 + atomic_long_unchecked_t assign_context_failed;
34114 + atomic_long_unchecked_t free_context;
34115 + atomic_long_unchecked_t load_user_context;
34116 + atomic_long_unchecked_t load_kernel_context;
34117 + atomic_long_unchecked_t lock_kernel_context;
34118 + atomic_long_unchecked_t unlock_kernel_context;
34119 + atomic_long_unchecked_t steal_user_context;
34120 + atomic_long_unchecked_t steal_kernel_context;
34121 + atomic_long_unchecked_t steal_context_failed;
34122 + atomic_long_unchecked_t nopfn;
34123 + atomic_long_unchecked_t asid_new;
34124 + atomic_long_unchecked_t asid_next;
34125 + atomic_long_unchecked_t asid_wrap;
34126 + atomic_long_unchecked_t asid_reuse;
34127 + atomic_long_unchecked_t intr;
34128 + atomic_long_unchecked_t intr_cbr;
34129 + atomic_long_unchecked_t intr_tfh;
34130 + atomic_long_unchecked_t intr_spurious;
34131 + atomic_long_unchecked_t intr_mm_lock_failed;
34132 + atomic_long_unchecked_t call_os;
34133 + atomic_long_unchecked_t call_os_wait_queue;
34134 + atomic_long_unchecked_t user_flush_tlb;
34135 + atomic_long_unchecked_t user_unload_context;
34136 + atomic_long_unchecked_t user_exception;
34137 + atomic_long_unchecked_t set_context_option;
34138 + atomic_long_unchecked_t check_context_retarget_intr;
34139 + atomic_long_unchecked_t check_context_unload;
34140 + atomic_long_unchecked_t tlb_dropin;
34141 + atomic_long_unchecked_t tlb_preload_page;
34142 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34143 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34144 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34145 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34146 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34147 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34148 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34149 + atomic_long_unchecked_t tfh_stale_on_fault;
34150 + atomic_long_unchecked_t mmu_invalidate_range;
34151 + atomic_long_unchecked_t mmu_invalidate_page;
34152 + atomic_long_unchecked_t flush_tlb;
34153 + atomic_long_unchecked_t flush_tlb_gru;
34154 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34155 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34156
34157 - atomic_long_t copy_gpa;
34158 - atomic_long_t read_gpa;
34159 + atomic_long_unchecked_t copy_gpa;
34160 + atomic_long_unchecked_t read_gpa;
34161
34162 - atomic_long_t mesq_receive;
34163 - atomic_long_t mesq_receive_none;
34164 - atomic_long_t mesq_send;
34165 - atomic_long_t mesq_send_failed;
34166 - atomic_long_t mesq_noop;
34167 - atomic_long_t mesq_send_unexpected_error;
34168 - atomic_long_t mesq_send_lb_overflow;
34169 - atomic_long_t mesq_send_qlimit_reached;
34170 - atomic_long_t mesq_send_amo_nacked;
34171 - atomic_long_t mesq_send_put_nacked;
34172 - atomic_long_t mesq_page_overflow;
34173 - atomic_long_t mesq_qf_locked;
34174 - atomic_long_t mesq_qf_noop_not_full;
34175 - atomic_long_t mesq_qf_switch_head_failed;
34176 - atomic_long_t mesq_qf_unexpected_error;
34177 - atomic_long_t mesq_noop_unexpected_error;
34178 - atomic_long_t mesq_noop_lb_overflow;
34179 - atomic_long_t mesq_noop_qlimit_reached;
34180 - atomic_long_t mesq_noop_amo_nacked;
34181 - atomic_long_t mesq_noop_put_nacked;
34182 - atomic_long_t mesq_noop_page_overflow;
34183 + atomic_long_unchecked_t mesq_receive;
34184 + atomic_long_unchecked_t mesq_receive_none;
34185 + atomic_long_unchecked_t mesq_send;
34186 + atomic_long_unchecked_t mesq_send_failed;
34187 + atomic_long_unchecked_t mesq_noop;
34188 + atomic_long_unchecked_t mesq_send_unexpected_error;
34189 + atomic_long_unchecked_t mesq_send_lb_overflow;
34190 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34191 + atomic_long_unchecked_t mesq_send_amo_nacked;
34192 + atomic_long_unchecked_t mesq_send_put_nacked;
34193 + atomic_long_unchecked_t mesq_page_overflow;
34194 + atomic_long_unchecked_t mesq_qf_locked;
34195 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34196 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34197 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34198 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34199 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34200 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34201 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34202 + atomic_long_unchecked_t mesq_noop_put_nacked;
34203 + atomic_long_unchecked_t mesq_noop_page_overflow;
34204
34205 };
34206
34207 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34208 tghop_invalidate, mcsop_last};
34209
34210 struct mcs_op_statistic {
34211 - atomic_long_t count;
34212 - atomic_long_t total;
34213 + atomic_long_unchecked_t count;
34214 + atomic_long_unchecked_t total;
34215 unsigned long max;
34216 };
34217
34218 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34219
34220 #define STAT(id) do { \
34221 if (gru_options & OPT_STATS) \
34222 - atomic_long_inc(&gru_stats.id); \
34223 + atomic_long_inc_unchecked(&gru_stats.id); \
34224 } while (0)
34225
34226 #ifdef CONFIG_SGI_GRU_DEBUG
34227 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34228 index c862cd4..0d176fe 100644
34229 --- a/drivers/misc/sgi-xp/xp.h
34230 +++ b/drivers/misc/sgi-xp/xp.h
34231 @@ -288,7 +288,7 @@ struct xpc_interface {
34232 xpc_notify_func, void *);
34233 void (*received) (short, int, void *);
34234 enum xp_retval (*partid_to_nasids) (short, void *);
34235 -};
34236 +} __no_const;
34237
34238 extern struct xpc_interface xpc_interface;
34239
34240 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34241 index b94d5f7..7f494c5 100644
34242 --- a/drivers/misc/sgi-xp/xpc.h
34243 +++ b/drivers/misc/sgi-xp/xpc.h
34244 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34245 void (*received_payload) (struct xpc_channel *, void *);
34246 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34247 };
34248 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34249
34250 /* struct xpc_partition act_state values (for XPC HB) */
34251
34252 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34253 /* found in xpc_main.c */
34254 extern struct device *xpc_part;
34255 extern struct device *xpc_chan;
34256 -extern struct xpc_arch_operations xpc_arch_ops;
34257 +extern xpc_arch_operations_no_const xpc_arch_ops;
34258 extern int xpc_disengage_timelimit;
34259 extern int xpc_disengage_timedout;
34260 extern int xpc_activate_IRQ_rcvd;
34261 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34262 index 8d082b4..aa749ae 100644
34263 --- a/drivers/misc/sgi-xp/xpc_main.c
34264 +++ b/drivers/misc/sgi-xp/xpc_main.c
34265 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34266 .notifier_call = xpc_system_die,
34267 };
34268
34269 -struct xpc_arch_operations xpc_arch_ops;
34270 +xpc_arch_operations_no_const xpc_arch_ops;
34271
34272 /*
34273 * Timer function to enforce the timelimit on the partition disengage.
34274 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34275 index 69ef0be..f3ef91e 100644
34276 --- a/drivers/mmc/host/sdhci-pci.c
34277 +++ b/drivers/mmc/host/sdhci-pci.c
34278 @@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34279 .probe = via_probe,
34280 };
34281
34282 -static const struct pci_device_id pci_ids[] __devinitdata = {
34283 +static const struct pci_device_id pci_ids[] __devinitconst = {
34284 {
34285 .vendor = PCI_VENDOR_ID_RICOH,
34286 .device = PCI_DEVICE_ID_RICOH_R5C822,
34287 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34288 index a4eb8b5..8c0628f 100644
34289 --- a/drivers/mtd/devices/doc2000.c
34290 +++ b/drivers/mtd/devices/doc2000.c
34291 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34292
34293 /* The ECC will not be calculated correctly if less than 512 is written */
34294 /* DBB-
34295 - if (len != 0x200 && eccbuf)
34296 + if (len != 0x200)
34297 printk(KERN_WARNING
34298 "ECC needs a full sector write (adr: %lx size %lx)\n",
34299 (long) to, (long) len);
34300 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34301 index a9e57d6..c6d8731 100644
34302 --- a/drivers/mtd/nand/denali.c
34303 +++ b/drivers/mtd/nand/denali.c
34304 @@ -26,6 +26,7 @@
34305 #include <linux/pci.h>
34306 #include <linux/mtd/mtd.h>
34307 #include <linux/module.h>
34308 +#include <linux/slab.h>
34309
34310 #include "denali.h"
34311
34312 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34313 index 51b9d6a..52af9a7 100644
34314 --- a/drivers/mtd/nftlmount.c
34315 +++ b/drivers/mtd/nftlmount.c
34316 @@ -24,6 +24,7 @@
34317 #include <asm/errno.h>
34318 #include <linux/delay.h>
34319 #include <linux/slab.h>
34320 +#include <linux/sched.h>
34321 #include <linux/mtd/mtd.h>
34322 #include <linux/mtd/nand.h>
34323 #include <linux/mtd/nftl.h>
34324 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34325 index 6762dc4..9956862 100644
34326 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34327 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34328 @@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34329 */
34330
34331 #define ATL2_PARAM(X, desc) \
34332 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34333 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34334 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34335 MODULE_PARM_DESC(X, desc);
34336 #else
34337 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34338 index 61a7670..7da6e34 100644
34339 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34340 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34341 @@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
34342
34343 int (*wait_comp)(struct bnx2x *bp,
34344 struct bnx2x_rx_mode_ramrod_params *p);
34345 -};
34346 +} __no_const;
34347
34348 /********************** Set multicast group ***********************************/
34349
34350 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34351 index 93865f8..5448741 100644
34352 --- a/drivers/net/ethernet/broadcom/tg3.h
34353 +++ b/drivers/net/ethernet/broadcom/tg3.h
34354 @@ -140,6 +140,7 @@
34355 #define CHIPREV_ID_5750_A0 0x4000
34356 #define CHIPREV_ID_5750_A1 0x4001
34357 #define CHIPREV_ID_5750_A3 0x4003
34358 +#define CHIPREV_ID_5750_C1 0x4201
34359 #define CHIPREV_ID_5750_C2 0x4202
34360 #define CHIPREV_ID_5752_A0_HW 0x5000
34361 #define CHIPREV_ID_5752_A0 0x6000
34362 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34363 index c4e8643..0979484 100644
34364 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34365 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34366 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34367 */
34368 struct l2t_skb_cb {
34369 arp_failure_handler_func arp_failure_handler;
34370 -};
34371 +} __no_const;
34372
34373 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34374
34375 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34376 index 18b106c..2b38d36 100644
34377 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34378 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34379 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34380 for (i=0; i<ETH_ALEN; i++) {
34381 tmp.addr[i] = dev->dev_addr[i];
34382 }
34383 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34384 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34385 break;
34386
34387 case DE4X5_SET_HWADDR: /* Set the hardware address */
34388 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34389 spin_lock_irqsave(&lp->lock, flags);
34390 memcpy(&statbuf, &lp->pktStats, ioc->len);
34391 spin_unlock_irqrestore(&lp->lock, flags);
34392 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34393 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34394 return -EFAULT;
34395 break;
34396 }
34397 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34398 index ed7d1dc..d426748 100644
34399 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34400 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34401 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34402 {NULL}};
34403
34404
34405 -static const char *block_name[] __devinitdata = {
34406 +static const char *block_name[] __devinitconst = {
34407 "21140 non-MII",
34408 "21140 MII PHY",
34409 "21142 Serial PHY",
34410 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34411 index 2ac6fff..2d127d0 100644
34412 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34413 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34414 @@ -236,7 +236,7 @@ struct pci_id_info {
34415 int drv_flags; /* Driver use, intended as capability flags. */
34416 };
34417
34418 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34419 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34420 { /* Sometime a Level-One switch card. */
34421 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34422 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34423 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34424 index d783f4f..97fa1b0 100644
34425 --- a/drivers/net/ethernet/dlink/sundance.c
34426 +++ b/drivers/net/ethernet/dlink/sundance.c
34427 @@ -218,7 +218,7 @@ enum {
34428 struct pci_id_info {
34429 const char *name;
34430 };
34431 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34432 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34433 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34434 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34435 {"D-Link DFE-580TX 4 port Server Adapter"},
34436 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34437 index 528a886..e6a98a3 100644
34438 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34439 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34440 @@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34441
34442 if (wrapped)
34443 newacc += 65536;
34444 - ACCESS_ONCE(*acc) = newacc;
34445 + ACCESS_ONCE_RW(*acc) = newacc;
34446 }
34447
34448 void be_parse_stats(struct be_adapter *adapter)
34449 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34450 index 16b0704..d2c07d7 100644
34451 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34452 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34453 @@ -31,6 +31,8 @@
34454 #include <linux/netdevice.h>
34455 #include <linux/phy.h>
34456 #include <linux/platform_device.h>
34457 +#include <linux/interrupt.h>
34458 +#include <linux/irqreturn.h>
34459 #include <net/ip.h>
34460
34461 #include "ftgmac100.h"
34462 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34463 index 829b109..4ae5f6a 100644
34464 --- a/drivers/net/ethernet/faraday/ftmac100.c
34465 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34466 @@ -31,6 +31,8 @@
34467 #include <linux/module.h>
34468 #include <linux/netdevice.h>
34469 #include <linux/platform_device.h>
34470 +#include <linux/interrupt.h>
34471 +#include <linux/irqreturn.h>
34472
34473 #include "ftmac100.h"
34474
34475 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34476 index 1637b98..c42f87b 100644
34477 --- a/drivers/net/ethernet/fealnx.c
34478 +++ b/drivers/net/ethernet/fealnx.c
34479 @@ -150,7 +150,7 @@ struct chip_info {
34480 int flags;
34481 };
34482
34483 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34484 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34485 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34486 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34487 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34488 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34489 index f82ecf5..7d59ecb 100644
34490 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34491 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34492 @@ -784,6 +784,7 @@ struct e1000_mac_operations {
34493 void (*config_collision_dist)(struct e1000_hw *);
34494 s32 (*read_mac_addr)(struct e1000_hw *);
34495 };
34496 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34497
34498 /*
34499 * When to use various PHY register access functions:
34500 @@ -824,6 +825,7 @@ struct e1000_phy_operations {
34501 void (*power_up)(struct e1000_hw *);
34502 void (*power_down)(struct e1000_hw *);
34503 };
34504 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34505
34506 /* Function pointers for the NVM. */
34507 struct e1000_nvm_operations {
34508 @@ -836,9 +838,10 @@ struct e1000_nvm_operations {
34509 s32 (*validate)(struct e1000_hw *);
34510 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34511 };
34512 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34513
34514 struct e1000_mac_info {
34515 - struct e1000_mac_operations ops;
34516 + e1000_mac_operations_no_const ops;
34517 u8 addr[ETH_ALEN];
34518 u8 perm_addr[ETH_ALEN];
34519
34520 @@ -879,7 +882,7 @@ struct e1000_mac_info {
34521 };
34522
34523 struct e1000_phy_info {
34524 - struct e1000_phy_operations ops;
34525 + e1000_phy_operations_no_const ops;
34526
34527 enum e1000_phy_type type;
34528
34529 @@ -913,7 +916,7 @@ struct e1000_phy_info {
34530 };
34531
34532 struct e1000_nvm_info {
34533 - struct e1000_nvm_operations ops;
34534 + e1000_nvm_operations_no_const ops;
34535
34536 enum e1000_nvm_type type;
34537 enum e1000_nvm_override override;
34538 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34539 index f67cbd3..cef9e3d 100644
34540 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34541 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34542 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34543 s32 (*read_mac_addr)(struct e1000_hw *);
34544 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34545 };
34546 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34547
34548 struct e1000_phy_operations {
34549 s32 (*acquire)(struct e1000_hw *);
34550 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34551 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34552 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34553 };
34554 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34555
34556 struct e1000_nvm_operations {
34557 s32 (*acquire)(struct e1000_hw *);
34558 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34559 s32 (*update)(struct e1000_hw *);
34560 s32 (*validate)(struct e1000_hw *);
34561 };
34562 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34563
34564 struct e1000_info {
34565 s32 (*get_invariants)(struct e1000_hw *);
34566 @@ -350,7 +353,7 @@ struct e1000_info {
34567 extern const struct e1000_info e1000_82575_info;
34568
34569 struct e1000_mac_info {
34570 - struct e1000_mac_operations ops;
34571 + e1000_mac_operations_no_const ops;
34572
34573 u8 addr[6];
34574 u8 perm_addr[6];
34575 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34576 };
34577
34578 struct e1000_phy_info {
34579 - struct e1000_phy_operations ops;
34580 + e1000_phy_operations_no_const ops;
34581
34582 enum e1000_phy_type type;
34583
34584 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34585 };
34586
34587 struct e1000_nvm_info {
34588 - struct e1000_nvm_operations ops;
34589 + e1000_nvm_operations_no_const ops;
34590 enum e1000_nvm_type type;
34591 enum e1000_nvm_override override;
34592
34593 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34594 s32 (*check_for_ack)(struct e1000_hw *, u16);
34595 s32 (*check_for_rst)(struct e1000_hw *, u16);
34596 };
34597 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34598
34599 struct e1000_mbx_stats {
34600 u32 msgs_tx;
34601 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34602 };
34603
34604 struct e1000_mbx_info {
34605 - struct e1000_mbx_operations ops;
34606 + e1000_mbx_operations_no_const ops;
34607 struct e1000_mbx_stats stats;
34608 u32 timeout;
34609 u32 usec_delay;
34610 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34611 index 57db3c6..aa825fc 100644
34612 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34613 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34614 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34615 s32 (*read_mac_addr)(struct e1000_hw *);
34616 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34617 };
34618 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34619
34620 struct e1000_mac_info {
34621 - struct e1000_mac_operations ops;
34622 + e1000_mac_operations_no_const ops;
34623 u8 addr[6];
34624 u8 perm_addr[6];
34625
34626 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34627 s32 (*check_for_ack)(struct e1000_hw *);
34628 s32 (*check_for_rst)(struct e1000_hw *);
34629 };
34630 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34631
34632 struct e1000_mbx_stats {
34633 u32 msgs_tx;
34634 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34635 };
34636
34637 struct e1000_mbx_info {
34638 - struct e1000_mbx_operations ops;
34639 + e1000_mbx_operations_no_const ops;
34640 struct e1000_mbx_stats stats;
34641 u32 timeout;
34642 u32 usec_delay;
34643 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34644 index 8636e83..ab9bbc3 100644
34645 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34646 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34647 @@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
34648 s32 (*update_checksum)(struct ixgbe_hw *);
34649 u16 (*calc_checksum)(struct ixgbe_hw *);
34650 };
34651 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34652
34653 struct ixgbe_mac_operations {
34654 s32 (*init_hw)(struct ixgbe_hw *);
34655 @@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
34656 /* Manageability interface */
34657 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34658 };
34659 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34660
34661 struct ixgbe_phy_operations {
34662 s32 (*identify)(struct ixgbe_hw *);
34663 @@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
34664 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34665 s32 (*check_overtemp)(struct ixgbe_hw *);
34666 };
34667 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34668
34669 struct ixgbe_eeprom_info {
34670 - struct ixgbe_eeprom_operations ops;
34671 + ixgbe_eeprom_operations_no_const ops;
34672 enum ixgbe_eeprom_type type;
34673 u32 semaphore_delay;
34674 u16 word_size;
34675 @@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
34676
34677 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34678 struct ixgbe_mac_info {
34679 - struct ixgbe_mac_operations ops;
34680 + ixgbe_mac_operations_no_const ops;
34681 enum ixgbe_mac_type type;
34682 u8 addr[ETH_ALEN];
34683 u8 perm_addr[ETH_ALEN];
34684 @@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
34685 };
34686
34687 struct ixgbe_phy_info {
34688 - struct ixgbe_phy_operations ops;
34689 + ixgbe_phy_operations_no_const ops;
34690 struct mdio_if_info mdio;
34691 enum ixgbe_phy_type type;
34692 u32 id;
34693 @@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
34694 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34695 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34696 };
34697 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34698
34699 struct ixgbe_mbx_stats {
34700 u32 msgs_tx;
34701 @@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
34702 };
34703
34704 struct ixgbe_mbx_info {
34705 - struct ixgbe_mbx_operations ops;
34706 + ixgbe_mbx_operations_no_const ops;
34707 struct ixgbe_mbx_stats stats;
34708 u32 timeout;
34709 u32 usec_delay;
34710 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34711 index 25c951d..cc7cf33 100644
34712 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34713 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34714 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34715 s32 (*clear_vfta)(struct ixgbe_hw *);
34716 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34717 };
34718 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34719
34720 enum ixgbe_mac_type {
34721 ixgbe_mac_unknown = 0,
34722 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34723 };
34724
34725 struct ixgbe_mac_info {
34726 - struct ixgbe_mac_operations ops;
34727 + ixgbe_mac_operations_no_const ops;
34728 u8 addr[6];
34729 u8 perm_addr[6];
34730
34731 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34732 s32 (*check_for_ack)(struct ixgbe_hw *);
34733 s32 (*check_for_rst)(struct ixgbe_hw *);
34734 };
34735 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34736
34737 struct ixgbe_mbx_stats {
34738 u32 msgs_tx;
34739 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34740 };
34741
34742 struct ixgbe_mbx_info {
34743 - struct ixgbe_mbx_operations ops;
34744 + ixgbe_mbx_operations_no_const ops;
34745 struct ixgbe_mbx_stats stats;
34746 u32 timeout;
34747 u32 udelay;
34748 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34749 index 8bb05b4..074796f 100644
34750 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
34751 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34752 @@ -41,6 +41,7 @@
34753 #include <linux/slab.h>
34754 #include <linux/io-mapping.h>
34755 #include <linux/delay.h>
34756 +#include <linux/sched.h>
34757
34758 #include <linux/mlx4/device.h>
34759 #include <linux/mlx4/doorbell.h>
34760 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34761 index 5046a64..71ca936 100644
34762 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34763 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34764 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34765 void (*link_down)(struct __vxge_hw_device *devh);
34766 void (*crit_err)(struct __vxge_hw_device *devh,
34767 enum vxge_hw_event type, u64 ext_data);
34768 -};
34769 +} __no_const;
34770
34771 /*
34772 * struct __vxge_hw_blockpool_entry - Block private data structure
34773 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34774 index 4a518a3..936b334 100644
34775 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34776 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34777 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34778 struct vxge_hw_mempool_dma *dma_object,
34779 u32 index,
34780 u32 is_last);
34781 -};
34782 +} __no_const;
34783
34784 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34785 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34786 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34787 index ce6b44d..74f10c2 100644
34788 --- a/drivers/net/ethernet/realtek/r8169.c
34789 +++ b/drivers/net/ethernet/realtek/r8169.c
34790 @@ -708,17 +708,17 @@ struct rtl8169_private {
34791 struct mdio_ops {
34792 void (*write)(void __iomem *, int, int);
34793 int (*read)(void __iomem *, int);
34794 - } mdio_ops;
34795 + } __no_const mdio_ops;
34796
34797 struct pll_power_ops {
34798 void (*down)(struct rtl8169_private *);
34799 void (*up)(struct rtl8169_private *);
34800 - } pll_power_ops;
34801 + } __no_const pll_power_ops;
34802
34803 struct jumbo_ops {
34804 void (*enable)(struct rtl8169_private *);
34805 void (*disable)(struct rtl8169_private *);
34806 - } jumbo_ops;
34807 + } __no_const jumbo_ops;
34808
34809 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34810 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34811 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34812 index a9deda8..5507c31 100644
34813 --- a/drivers/net/ethernet/sis/sis190.c
34814 +++ b/drivers/net/ethernet/sis/sis190.c
34815 @@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34816 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34817 struct net_device *dev)
34818 {
34819 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34820 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34821 struct sis190_private *tp = netdev_priv(dev);
34822 struct pci_dev *isa_bridge;
34823 u8 reg, tmp8;
34824 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34825 index c07cfe9..81cbf7e 100644
34826 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34827 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34828 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
34829
34830 writel(value, ioaddr + MMC_CNTRL);
34831
34832 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34833 - MMC_CNTRL, value);
34834 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34835 +// MMC_CNTRL, value);
34836 }
34837
34838 /* To mask all all interrupts.*/
34839 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34840 index 48d56da..a27e46c 100644
34841 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34842 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
34843 @@ -1584,7 +1584,7 @@ static const struct file_operations stmmac_rings_status_fops = {
34844 .open = stmmac_sysfs_ring_open,
34845 .read = seq_read,
34846 .llseek = seq_lseek,
34847 - .release = seq_release,
34848 + .release = single_release,
34849 };
34850
34851 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
34852 @@ -1656,7 +1656,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
34853 .open = stmmac_sysfs_dma_cap_open,
34854 .read = seq_read,
34855 .llseek = seq_lseek,
34856 - .release = seq_release,
34857 + .release = single_release,
34858 };
34859
34860 static int stmmac_init_fs(struct net_device *dev)
34861 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
34862 index c358245..8c1de63 100644
34863 --- a/drivers/net/hyperv/hyperv_net.h
34864 +++ b/drivers/net/hyperv/hyperv_net.h
34865 @@ -98,7 +98,7 @@ struct rndis_device {
34866
34867 enum rndis_device_state state;
34868 bool link_state;
34869 - atomic_t new_req_id;
34870 + atomic_unchecked_t new_req_id;
34871
34872 spinlock_t request_lock;
34873 struct list_head req_list;
34874 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
34875 index d6be64b..5d97e3b 100644
34876 --- a/drivers/net/hyperv/rndis_filter.c
34877 +++ b/drivers/net/hyperv/rndis_filter.c
34878 @@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34879 * template
34880 */
34881 set = &rndis_msg->msg.set_req;
34882 - set->req_id = atomic_inc_return(&dev->new_req_id);
34883 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34884
34885 /* Add to the request list */
34886 spin_lock_irqsave(&dev->request_lock, flags);
34887 @@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34888
34889 /* Setup the rndis set */
34890 halt = &request->request_msg.msg.halt_req;
34891 - halt->req_id = atomic_inc_return(&dev->new_req_id);
34892 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34893
34894 /* Ignore return since this msg is optional. */
34895 rndis_filter_send_request(dev, request);
34896 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
34897 index cb8fd50..003ec38 100644
34898 --- a/drivers/net/macvtap.c
34899 +++ b/drivers/net/macvtap.c
34900 @@ -528,6 +528,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
34901 }
34902 base = (unsigned long)from->iov_base + offset1;
34903 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
34904 + if (i + size >= MAX_SKB_FRAGS)
34905 + return -EFAULT;
34906 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
34907 if ((num_pages != size) ||
34908 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
34909 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
34910 index 21d7151..8034208 100644
34911 --- a/drivers/net/ppp/ppp_generic.c
34912 +++ b/drivers/net/ppp/ppp_generic.c
34913 @@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34914 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34915 struct ppp_stats stats;
34916 struct ppp_comp_stats cstats;
34917 - char *vers;
34918
34919 switch (cmd) {
34920 case SIOCGPPPSTATS:
34921 @@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34922 break;
34923
34924 case SIOCGPPPVER:
34925 - vers = PPP_VERSION;
34926 - if (copy_to_user(addr, vers, strlen(vers) + 1))
34927 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34928 break;
34929 err = 0;
34930 break;
34931 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34932 index b715e6b..6d2490f 100644
34933 --- a/drivers/net/tokenring/abyss.c
34934 +++ b/drivers/net/tokenring/abyss.c
34935 @@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
34936
34937 static int __init abyss_init (void)
34938 {
34939 - abyss_netdev_ops = tms380tr_netdev_ops;
34940 + pax_open_kernel();
34941 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34942
34943 - abyss_netdev_ops.ndo_open = abyss_open;
34944 - abyss_netdev_ops.ndo_stop = abyss_close;
34945 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34946 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34947 + pax_close_kernel();
34948
34949 return pci_register_driver(&abyss_driver);
34950 }
34951 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34952 index 28adcdf..ae82f35 100644
34953 --- a/drivers/net/tokenring/madgemc.c
34954 +++ b/drivers/net/tokenring/madgemc.c
34955 @@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
34956
34957 static int __init madgemc_init (void)
34958 {
34959 - madgemc_netdev_ops = tms380tr_netdev_ops;
34960 - madgemc_netdev_ops.ndo_open = madgemc_open;
34961 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34962 + pax_open_kernel();
34963 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34964 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34965 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34966 + pax_close_kernel();
34967
34968 return mca_register_driver (&madgemc_driver);
34969 }
34970 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34971 index 62d90e4..9d84237 100644
34972 --- a/drivers/net/tokenring/proteon.c
34973 +++ b/drivers/net/tokenring/proteon.c
34974 @@ -352,9 +352,11 @@ static int __init proteon_init(void)
34975 struct platform_device *pdev;
34976 int i, num = 0, err = 0;
34977
34978 - proteon_netdev_ops = tms380tr_netdev_ops;
34979 - proteon_netdev_ops.ndo_open = proteon_open;
34980 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34981 + pax_open_kernel();
34982 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34983 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34984 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34985 + pax_close_kernel();
34986
34987 err = platform_driver_register(&proteon_driver);
34988 if (err)
34989 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34990 index ee11e93..c8f19c7 100644
34991 --- a/drivers/net/tokenring/skisa.c
34992 +++ b/drivers/net/tokenring/skisa.c
34993 @@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
34994 struct platform_device *pdev;
34995 int i, num = 0, err = 0;
34996
34997 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34998 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34999 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35000 + pax_open_kernel();
35001 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35002 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35003 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35004 + pax_close_kernel();
35005
35006 err = platform_driver_register(&sk_isa_driver);
35007 if (err)
35008 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35009 index 2d2a688..35f2372 100644
35010 --- a/drivers/net/usb/hso.c
35011 +++ b/drivers/net/usb/hso.c
35012 @@ -71,7 +71,7 @@
35013 #include <asm/byteorder.h>
35014 #include <linux/serial_core.h>
35015 #include <linux/serial.h>
35016 -
35017 +#include <asm/local.h>
35018
35019 #define MOD_AUTHOR "Option Wireless"
35020 #define MOD_DESCRIPTION "USB High Speed Option driver"
35021 @@ -257,7 +257,7 @@ struct hso_serial {
35022
35023 /* from usb_serial_port */
35024 struct tty_struct *tty;
35025 - int open_count;
35026 + local_t open_count;
35027 spinlock_t serial_lock;
35028
35029 int (*write_data) (struct hso_serial *serial);
35030 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35031 struct urb *urb;
35032
35033 urb = serial->rx_urb[0];
35034 - if (serial->open_count > 0) {
35035 + if (local_read(&serial->open_count) > 0) {
35036 count = put_rxbuf_data(urb, serial);
35037 if (count == -1)
35038 return;
35039 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35040 DUMP1(urb->transfer_buffer, urb->actual_length);
35041
35042 /* Anyone listening? */
35043 - if (serial->open_count == 0)
35044 + if (local_read(&serial->open_count) == 0)
35045 return;
35046
35047 if (status == 0) {
35048 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35049 spin_unlock_irq(&serial->serial_lock);
35050
35051 /* check for port already opened, if not set the termios */
35052 - serial->open_count++;
35053 - if (serial->open_count == 1) {
35054 + if (local_inc_return(&serial->open_count) == 1) {
35055 serial->rx_state = RX_IDLE;
35056 /* Force default termio settings */
35057 _hso_serial_set_termios(tty, NULL);
35058 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35059 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35060 if (result) {
35061 hso_stop_serial_device(serial->parent);
35062 - serial->open_count--;
35063 + local_dec(&serial->open_count);
35064 kref_put(&serial->parent->ref, hso_serial_ref_free);
35065 }
35066 } else {
35067 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35068
35069 /* reset the rts and dtr */
35070 /* do the actual close */
35071 - serial->open_count--;
35072 + local_dec(&serial->open_count);
35073
35074 - if (serial->open_count <= 0) {
35075 - serial->open_count = 0;
35076 + if (local_read(&serial->open_count) <= 0) {
35077 + local_set(&serial->open_count, 0);
35078 spin_lock_irq(&serial->serial_lock);
35079 if (serial->tty == tty) {
35080 serial->tty->driver_data = NULL;
35081 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35082
35083 /* the actual setup */
35084 spin_lock_irqsave(&serial->serial_lock, flags);
35085 - if (serial->open_count)
35086 + if (local_read(&serial->open_count))
35087 _hso_serial_set_termios(tty, old);
35088 else
35089 tty->termios = old;
35090 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35091 D1("Pending read interrupt on port %d\n", i);
35092 spin_lock(&serial->serial_lock);
35093 if (serial->rx_state == RX_IDLE &&
35094 - serial->open_count > 0) {
35095 + local_read(&serial->open_count) > 0) {
35096 /* Setup and send a ctrl req read on
35097 * port i */
35098 if (!serial->rx_urb_filled[0]) {
35099 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35100 /* Start all serial ports */
35101 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35102 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35103 - if (dev2ser(serial_table[i])->open_count) {
35104 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35105 result =
35106 hso_start_serial_device(serial_table[i], GFP_NOIO);
35107 hso_kick_transmit(dev2ser(serial_table[i]));
35108 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35109 index c54b7d37..af1f359 100644
35110 --- a/drivers/net/wireless/ath/ath.h
35111 +++ b/drivers/net/wireless/ath/ath.h
35112 @@ -119,6 +119,7 @@ struct ath_ops {
35113 void (*write_flush) (void *);
35114 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35115 };
35116 +typedef struct ath_ops __no_const ath_ops_no_const;
35117
35118 struct ath_common;
35119 struct ath_bus_ops;
35120 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35121 index aa2abaf..5f5152d 100644
35122 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35123 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35124 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35125 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35126 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35127
35128 - ACCESS_ONCE(ads->ds_link) = i->link;
35129 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35130 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35131 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35132
35133 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35134 ctl6 = SM(i->keytype, AR_EncrType);
35135 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35136
35137 if ((i->is_first || i->is_last) &&
35138 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35139 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35140 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35141 | set11nTries(i->rates, 1)
35142 | set11nTries(i->rates, 2)
35143 | set11nTries(i->rates, 3)
35144 | (i->dur_update ? AR_DurUpdateEna : 0)
35145 | SM(0, AR_BurstDur);
35146
35147 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35148 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35149 | set11nRate(i->rates, 1)
35150 | set11nRate(i->rates, 2)
35151 | set11nRate(i->rates, 3);
35152 } else {
35153 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35154 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35155 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35156 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35157 }
35158
35159 if (!i->is_first) {
35160 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35161 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35162 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35163 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35164 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35165 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35166 return;
35167 }
35168
35169 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35170 break;
35171 }
35172
35173 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35174 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35175 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35176 | SM(i->txpower, AR_XmitPower)
35177 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35178 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35179 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35180 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35181
35182 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35183 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35184 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35185 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35186
35187 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35188 return;
35189
35190 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35191 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35192 | set11nPktDurRTSCTS(i->rates, 1);
35193
35194 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35195 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35196 | set11nPktDurRTSCTS(i->rates, 3);
35197
35198 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35199 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35200 | set11nRateFlags(i->rates, 1)
35201 | set11nRateFlags(i->rates, 2)
35202 | set11nRateFlags(i->rates, 3)
35203 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35204 index a66a13b..0ef399e 100644
35205 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35206 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35207 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35208 (i->qcu << AR_TxQcuNum_S) | desc_len;
35209
35210 checksum += val;
35211 - ACCESS_ONCE(ads->info) = val;
35212 + ACCESS_ONCE_RW(ads->info) = val;
35213
35214 checksum += i->link;
35215 - ACCESS_ONCE(ads->link) = i->link;
35216 + ACCESS_ONCE_RW(ads->link) = i->link;
35217
35218 checksum += i->buf_addr[0];
35219 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35220 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35221 checksum += i->buf_addr[1];
35222 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35223 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35224 checksum += i->buf_addr[2];
35225 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35226 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35227 checksum += i->buf_addr[3];
35228 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35229 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35230
35231 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35232 - ACCESS_ONCE(ads->ctl3) = val;
35233 + ACCESS_ONCE_RW(ads->ctl3) = val;
35234 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35235 - ACCESS_ONCE(ads->ctl5) = val;
35236 + ACCESS_ONCE_RW(ads->ctl5) = val;
35237 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35238 - ACCESS_ONCE(ads->ctl7) = val;
35239 + ACCESS_ONCE_RW(ads->ctl7) = val;
35240 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35241 - ACCESS_ONCE(ads->ctl9) = val;
35242 + ACCESS_ONCE_RW(ads->ctl9) = val;
35243
35244 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35245 - ACCESS_ONCE(ads->ctl10) = checksum;
35246 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
35247
35248 if (i->is_first || i->is_last) {
35249 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35250 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35251 | set11nTries(i->rates, 1)
35252 | set11nTries(i->rates, 2)
35253 | set11nTries(i->rates, 3)
35254 | (i->dur_update ? AR_DurUpdateEna : 0)
35255 | SM(0, AR_BurstDur);
35256
35257 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35258 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35259 | set11nRate(i->rates, 1)
35260 | set11nRate(i->rates, 2)
35261 | set11nRate(i->rates, 3);
35262 } else {
35263 - ACCESS_ONCE(ads->ctl13) = 0;
35264 - ACCESS_ONCE(ads->ctl14) = 0;
35265 + ACCESS_ONCE_RW(ads->ctl13) = 0;
35266 + ACCESS_ONCE_RW(ads->ctl14) = 0;
35267 }
35268
35269 ads->ctl20 = 0;
35270 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35271
35272 ctl17 = SM(i->keytype, AR_EncrType);
35273 if (!i->is_first) {
35274 - ACCESS_ONCE(ads->ctl11) = 0;
35275 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35276 - ACCESS_ONCE(ads->ctl15) = 0;
35277 - ACCESS_ONCE(ads->ctl16) = 0;
35278 - ACCESS_ONCE(ads->ctl17) = ctl17;
35279 - ACCESS_ONCE(ads->ctl18) = 0;
35280 - ACCESS_ONCE(ads->ctl19) = 0;
35281 + ACCESS_ONCE_RW(ads->ctl11) = 0;
35282 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35283 + ACCESS_ONCE_RW(ads->ctl15) = 0;
35284 + ACCESS_ONCE_RW(ads->ctl16) = 0;
35285 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35286 + ACCESS_ONCE_RW(ads->ctl18) = 0;
35287 + ACCESS_ONCE_RW(ads->ctl19) = 0;
35288 return;
35289 }
35290
35291 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35292 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35293 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35294 | SM(i->txpower, AR_XmitPower)
35295 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35296 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35297 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35298 ctl12 |= SM(val, AR_PAPRDChainMask);
35299
35300 - ACCESS_ONCE(ads->ctl12) = ctl12;
35301 - ACCESS_ONCE(ads->ctl17) = ctl17;
35302 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35303 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35304
35305 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35306 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35307 | set11nPktDurRTSCTS(i->rates, 1);
35308
35309 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35310 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35311 | set11nPktDurRTSCTS(i->rates, 3);
35312
35313 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35314 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35315 | set11nRateFlags(i->rates, 1)
35316 | set11nRateFlags(i->rates, 2)
35317 | set11nRateFlags(i->rates, 3)
35318 | SM(i->rtscts_rate, AR_RTSCTSRate);
35319
35320 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35321 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35322 }
35323
35324 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35325 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35326 index e88f182..4e57f5d 100644
35327 --- a/drivers/net/wireless/ath/ath9k/hw.h
35328 +++ b/drivers/net/wireless/ath/ath9k/hw.h
35329 @@ -614,7 +614,7 @@ struct ath_hw_private_ops {
35330
35331 /* ANI */
35332 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35333 -};
35334 +} __no_const;
35335
35336 /**
35337 * struct ath_hw_ops - callbacks used by hardware code and driver code
35338 @@ -644,7 +644,7 @@ struct ath_hw_ops {
35339 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35340 struct ath_hw_antcomb_conf *antconf);
35341
35342 -};
35343 +} __no_const;
35344
35345 struct ath_nf_limits {
35346 s16 max;
35347 @@ -664,7 +664,7 @@ enum ath_cal_list {
35348 #define AH_FASTCC 0x4
35349
35350 struct ath_hw {
35351 - struct ath_ops reg_ops;
35352 + ath_ops_no_const reg_ops;
35353
35354 struct ieee80211_hw *hw;
35355 struct ath_common common;
35356 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35357 index af00e2c..ab04d34 100644
35358 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35359 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35360 @@ -545,7 +545,7 @@ struct phy_func_ptr {
35361 void (*carrsuppr)(struct brcms_phy *);
35362 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35363 void (*detach)(struct brcms_phy *);
35364 -};
35365 +} __no_const;
35366
35367 struct brcms_phy {
35368 struct brcms_phy_pub pubpi_ro;
35369 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35370 index faec404..a5277f1 100644
35371 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
35372 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35373 @@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35374 */
35375 if (il3945_mod_params.disable_hw_scan) {
35376 D_INFO("Disabling hw_scan\n");
35377 - il3945_mac_ops.hw_scan = NULL;
35378 + pax_open_kernel();
35379 + *(void **)&il3945_mac_ops.hw_scan = NULL;
35380 + pax_close_kernel();
35381 }
35382
35383 D_INFO("*** LOAD DRIVER ***\n");
35384 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35385 index b7ce6a6..5649756 100644
35386 --- a/drivers/net/wireless/mac80211_hwsim.c
35387 +++ b/drivers/net/wireless/mac80211_hwsim.c
35388 @@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
35389 return -EINVAL;
35390
35391 if (fake_hw_scan) {
35392 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35393 - mac80211_hwsim_ops.sw_scan_start = NULL;
35394 - mac80211_hwsim_ops.sw_scan_complete = NULL;
35395 + pax_open_kernel();
35396 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35397 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35398 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35399 + pax_close_kernel();
35400 }
35401
35402 spin_lock_init(&hwsim_radio_lock);
35403 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35404 index 35225e9..95e6bf9 100644
35405 --- a/drivers/net/wireless/mwifiex/main.h
35406 +++ b/drivers/net/wireless/mwifiex/main.h
35407 @@ -537,7 +537,7 @@ struct mwifiex_if_ops {
35408 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35409 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35410 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35411 -};
35412 +} __no_const;
35413
35414 struct mwifiex_adapter {
35415 u8 iface_type;
35416 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35417 index d66e298..55b0a89 100644
35418 --- a/drivers/net/wireless/rndis_wlan.c
35419 +++ b/drivers/net/wireless/rndis_wlan.c
35420 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35421
35422 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35423
35424 - if (rts_threshold < 0 || rts_threshold > 2347)
35425 + if (rts_threshold > 2347)
35426 rts_threshold = 2347;
35427
35428 tmp = cpu_to_le32(rts_threshold);
35429 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35430 index 9d8f581..0f6589e 100644
35431 --- a/drivers/net/wireless/wl1251/wl1251.h
35432 +++ b/drivers/net/wireless/wl1251/wl1251.h
35433 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
35434 void (*reset)(struct wl1251 *wl);
35435 void (*enable_irq)(struct wl1251 *wl);
35436 void (*disable_irq)(struct wl1251 *wl);
35437 -};
35438 +} __no_const;
35439
35440 struct wl1251 {
35441 struct ieee80211_hw *hw;
35442 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35443 index f34b5b2..b5abb9f 100644
35444 --- a/drivers/oprofile/buffer_sync.c
35445 +++ b/drivers/oprofile/buffer_sync.c
35446 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35447 if (cookie == NO_COOKIE)
35448 offset = pc;
35449 if (cookie == INVALID_COOKIE) {
35450 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35451 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35452 offset = pc;
35453 }
35454 if (cookie != last_cookie) {
35455 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35456 /* add userspace sample */
35457
35458 if (!mm) {
35459 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35460 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35461 return 0;
35462 }
35463
35464 cookie = lookup_dcookie(mm, s->eip, &offset);
35465
35466 if (cookie == INVALID_COOKIE) {
35467 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35468 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35469 return 0;
35470 }
35471
35472 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35473 /* ignore backtraces if failed to add a sample */
35474 if (state == sb_bt_start) {
35475 state = sb_bt_ignore;
35476 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35477 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35478 }
35479 }
35480 release_mm(mm);
35481 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35482 index c0cc4e7..44d4e54 100644
35483 --- a/drivers/oprofile/event_buffer.c
35484 +++ b/drivers/oprofile/event_buffer.c
35485 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35486 }
35487
35488 if (buffer_pos == buffer_size) {
35489 - atomic_inc(&oprofile_stats.event_lost_overflow);
35490 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35491 return;
35492 }
35493
35494 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35495 index ed2c3ec..deda85a 100644
35496 --- a/drivers/oprofile/oprof.c
35497 +++ b/drivers/oprofile/oprof.c
35498 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35499 if (oprofile_ops.switch_events())
35500 return;
35501
35502 - atomic_inc(&oprofile_stats.multiplex_counter);
35503 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35504 start_switch_worker();
35505 }
35506
35507 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35508 index 917d28e..d62d981 100644
35509 --- a/drivers/oprofile/oprofile_stats.c
35510 +++ b/drivers/oprofile/oprofile_stats.c
35511 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35512 cpu_buf->sample_invalid_eip = 0;
35513 }
35514
35515 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35516 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35517 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35518 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35519 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35520 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35521 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35522 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35523 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35524 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35525 }
35526
35527
35528 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35529 index 38b6fc0..b5cbfce 100644
35530 --- a/drivers/oprofile/oprofile_stats.h
35531 +++ b/drivers/oprofile/oprofile_stats.h
35532 @@ -13,11 +13,11 @@
35533 #include <linux/atomic.h>
35534
35535 struct oprofile_stat_struct {
35536 - atomic_t sample_lost_no_mm;
35537 - atomic_t sample_lost_no_mapping;
35538 - atomic_t bt_lost_no_mapping;
35539 - atomic_t event_lost_overflow;
35540 - atomic_t multiplex_counter;
35541 + atomic_unchecked_t sample_lost_no_mm;
35542 + atomic_unchecked_t sample_lost_no_mapping;
35543 + atomic_unchecked_t bt_lost_no_mapping;
35544 + atomic_unchecked_t event_lost_overflow;
35545 + atomic_unchecked_t multiplex_counter;
35546 };
35547
35548 extern struct oprofile_stat_struct oprofile_stats;
35549 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35550 index 849357c..b83c1e0 100644
35551 --- a/drivers/oprofile/oprofilefs.c
35552 +++ b/drivers/oprofile/oprofilefs.c
35553 @@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
35554
35555
35556 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35557 - char const *name, atomic_t *val)
35558 + char const *name, atomic_unchecked_t *val)
35559 {
35560 return __oprofilefs_create_file(sb, root, name,
35561 &atomic_ro_fops, 0444, val);
35562 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35563 index 3f56bc0..707d642 100644
35564 --- a/drivers/parport/procfs.c
35565 +++ b/drivers/parport/procfs.c
35566 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35567
35568 *ppos += len;
35569
35570 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35571 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35572 }
35573
35574 #ifdef CONFIG_PARPORT_1284
35575 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35576
35577 *ppos += len;
35578
35579 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35580 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35581 }
35582 #endif /* IEEE1284.3 support. */
35583
35584 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35585 index 9fff878..ad0ad53 100644
35586 --- a/drivers/pci/hotplug/cpci_hotplug.h
35587 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35588 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35589 int (*hardware_test) (struct slot* slot, u32 value);
35590 u8 (*get_power) (struct slot* slot);
35591 int (*set_power) (struct slot* slot, int value);
35592 -};
35593 +} __no_const;
35594
35595 struct cpci_hp_controller {
35596 unsigned int irq;
35597 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35598 index 76ba8a1..20ca857 100644
35599 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35600 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35601 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35602
35603 void compaq_nvram_init (void __iomem *rom_start)
35604 {
35605 +
35606 +#ifndef CONFIG_PAX_KERNEXEC
35607 if (rom_start) {
35608 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35609 }
35610 +#endif
35611 +
35612 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35613
35614 /* initialize our int15 lock */
35615 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35616 index b500840..d7159d3 100644
35617 --- a/drivers/pci/pcie/aspm.c
35618 +++ b/drivers/pci/pcie/aspm.c
35619 @@ -27,9 +27,9 @@
35620 #define MODULE_PARAM_PREFIX "pcie_aspm."
35621
35622 /* Note: those are not register definitions */
35623 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35624 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35625 -#define ASPM_STATE_L1 (4) /* L1 state */
35626 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35627 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35628 +#define ASPM_STATE_L1 (4U) /* L1 state */
35629 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35630 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35631
35632 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35633 index 5e1ca3c..08082fe 100644
35634 --- a/drivers/pci/probe.c
35635 +++ b/drivers/pci/probe.c
35636 @@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35637 u16 orig_cmd;
35638 struct pci_bus_region region;
35639
35640 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35641 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35642
35643 if (!dev->mmio_always_on) {
35644 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35645 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35646 index 27911b5..5b6db88 100644
35647 --- a/drivers/pci/proc.c
35648 +++ b/drivers/pci/proc.c
35649 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35650 static int __init pci_proc_init(void)
35651 {
35652 struct pci_dev *dev = NULL;
35653 +
35654 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35655 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35656 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35657 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35658 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35659 +#endif
35660 +#else
35661 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35662 +#endif
35663 proc_create("devices", 0, proc_bus_pci_dir,
35664 &proc_bus_pci_dev_operations);
35665 proc_initialized = 1;
35666 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35667 index d68c000..f6094ca 100644
35668 --- a/drivers/platform/x86/thinkpad_acpi.c
35669 +++ b/drivers/platform/x86/thinkpad_acpi.c
35670 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35671 return 0;
35672 }
35673
35674 -void static hotkey_mask_warn_incomplete_mask(void)
35675 +static void hotkey_mask_warn_incomplete_mask(void)
35676 {
35677 /* log only what the user can fix... */
35678 const u32 wantedmask = hotkey_driver_mask &
35679 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35680 }
35681 }
35682
35683 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35684 - struct tp_nvram_state *newn,
35685 - const u32 event_mask)
35686 -{
35687 -
35688 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35689 do { \
35690 if ((event_mask & (1 << __scancode)) && \
35691 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35692 tpacpi_hotkey_send_key(__scancode); \
35693 } while (0)
35694
35695 - void issue_volchange(const unsigned int oldvol,
35696 - const unsigned int newvol)
35697 - {
35698 - unsigned int i = oldvol;
35699 +static void issue_volchange(const unsigned int oldvol,
35700 + const unsigned int newvol,
35701 + const u32 event_mask)
35702 +{
35703 + unsigned int i = oldvol;
35704
35705 - while (i > newvol) {
35706 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35707 - i--;
35708 - }
35709 - while (i < newvol) {
35710 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35711 - i++;
35712 - }
35713 + while (i > newvol) {
35714 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35715 + i--;
35716 }
35717 + while (i < newvol) {
35718 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35719 + i++;
35720 + }
35721 +}
35722
35723 - void issue_brightnesschange(const unsigned int oldbrt,
35724 - const unsigned int newbrt)
35725 - {
35726 - unsigned int i = oldbrt;
35727 +static void issue_brightnesschange(const unsigned int oldbrt,
35728 + const unsigned int newbrt,
35729 + const u32 event_mask)
35730 +{
35731 + unsigned int i = oldbrt;
35732
35733 - while (i > newbrt) {
35734 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35735 - i--;
35736 - }
35737 - while (i < newbrt) {
35738 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35739 - i++;
35740 - }
35741 + while (i > newbrt) {
35742 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35743 + i--;
35744 + }
35745 + while (i < newbrt) {
35746 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35747 + i++;
35748 }
35749 +}
35750
35751 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35752 + struct tp_nvram_state *newn,
35753 + const u32 event_mask)
35754 +{
35755 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35756 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35757 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35758 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35759 oldn->volume_level != newn->volume_level) {
35760 /* recently muted, or repeated mute keypress, or
35761 * multiple presses ending in mute */
35762 - issue_volchange(oldn->volume_level, newn->volume_level);
35763 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35764 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35765 }
35766 } else {
35767 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35768 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35769 }
35770 if (oldn->volume_level != newn->volume_level) {
35771 - issue_volchange(oldn->volume_level, newn->volume_level);
35772 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35773 } else if (oldn->volume_toggle != newn->volume_toggle) {
35774 /* repeated vol up/down keypress at end of scale ? */
35775 if (newn->volume_level == 0)
35776 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35777 /* handle brightness */
35778 if (oldn->brightness_level != newn->brightness_level) {
35779 issue_brightnesschange(oldn->brightness_level,
35780 - newn->brightness_level);
35781 + newn->brightness_level,
35782 + event_mask);
35783 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35784 /* repeated key presses that didn't change state */
35785 if (newn->brightness_level == 0)
35786 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35787 && !tp_features.bright_unkfw)
35788 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35789 }
35790 +}
35791
35792 #undef TPACPI_COMPARE_KEY
35793 #undef TPACPI_MAY_SEND_KEY
35794 -}
35795
35796 /*
35797 * Polling driver
35798 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35799 index 769d265..a3a05ca 100644
35800 --- a/drivers/pnp/pnpbios/bioscalls.c
35801 +++ b/drivers/pnp/pnpbios/bioscalls.c
35802 @@ -58,7 +58,7 @@ do { \
35803 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35804 } while(0)
35805
35806 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35807 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35808 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35809
35810 /*
35811 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35812
35813 cpu = get_cpu();
35814 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35815 +
35816 + pax_open_kernel();
35817 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35818 + pax_close_kernel();
35819
35820 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35821 spin_lock_irqsave(&pnp_bios_lock, flags);
35822 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35823 :"memory");
35824 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35825
35826 + pax_open_kernel();
35827 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35828 + pax_close_kernel();
35829 +
35830 put_cpu();
35831
35832 /* If we get here and this is set then the PnP BIOS faulted on us. */
35833 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35834 return status;
35835 }
35836
35837 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35838 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35839 {
35840 int i;
35841
35842 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35843 pnp_bios_callpoint.offset = header->fields.pm16offset;
35844 pnp_bios_callpoint.segment = PNP_CS16;
35845
35846 + pax_open_kernel();
35847 +
35848 for_each_possible_cpu(i) {
35849 struct desc_struct *gdt = get_cpu_gdt_table(i);
35850 if (!gdt)
35851 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35852 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35853 (unsigned long)__va(header->fields.pm16dseg));
35854 }
35855 +
35856 + pax_close_kernel();
35857 }
35858 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35859 index b0ecacb..7c9da2e 100644
35860 --- a/drivers/pnp/resource.c
35861 +++ b/drivers/pnp/resource.c
35862 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35863 return 1;
35864
35865 /* check if the resource is valid */
35866 - if (*irq < 0 || *irq > 15)
35867 + if (*irq > 15)
35868 return 0;
35869
35870 /* check if the resource is reserved */
35871 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35872 return 1;
35873
35874 /* check if the resource is valid */
35875 - if (*dma < 0 || *dma == 4 || *dma > 7)
35876 + if (*dma == 4 || *dma > 7)
35877 return 0;
35878
35879 /* check if the resource is reserved */
35880 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35881 index 222ccd8..6275fa5 100644
35882 --- a/drivers/power/bq27x00_battery.c
35883 +++ b/drivers/power/bq27x00_battery.c
35884 @@ -72,7 +72,7 @@
35885 struct bq27x00_device_info;
35886 struct bq27x00_access_methods {
35887 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35888 -};
35889 +} __no_const;
35890
35891 enum bq27x00_chip { BQ27000, BQ27500 };
35892
35893 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35894 index 4c5b053..104263e 100644
35895 --- a/drivers/regulator/max8660.c
35896 +++ b/drivers/regulator/max8660.c
35897 @@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35898 max8660->shadow_regs[MAX8660_OVER1] = 5;
35899 } else {
35900 /* Otherwise devices can be toggled via software */
35901 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35902 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35903 + pax_open_kernel();
35904 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35905 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35906 + pax_close_kernel();
35907 }
35908
35909 /*
35910 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35911 index 845aa22..99ec402 100644
35912 --- a/drivers/regulator/mc13892-regulator.c
35913 +++ b/drivers/regulator/mc13892-regulator.c
35914 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35915 }
35916 mc13xxx_unlock(mc13892);
35917
35918 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35919 + pax_open_kernel();
35920 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35921 = mc13892_vcam_set_mode;
35922 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35923 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35924 = mc13892_vcam_get_mode;
35925 + pax_close_kernel();
35926
35927 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
35928 ARRAY_SIZE(mc13892_regulators));
35929 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35930 index cace6d3..f623fda 100644
35931 --- a/drivers/rtc/rtc-dev.c
35932 +++ b/drivers/rtc/rtc-dev.c
35933 @@ -14,6 +14,7 @@
35934 #include <linux/module.h>
35935 #include <linux/rtc.h>
35936 #include <linux/sched.h>
35937 +#include <linux/grsecurity.h>
35938 #include "rtc-core.h"
35939
35940 static dev_t rtc_devt;
35941 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35942 if (copy_from_user(&tm, uarg, sizeof(tm)))
35943 return -EFAULT;
35944
35945 + gr_log_timechange();
35946 +
35947 return rtc_set_time(rtc, &tm);
35948
35949 case RTC_PIE_ON:
35950 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35951 index 3fcf627..f334910 100644
35952 --- a/drivers/scsi/aacraid/aacraid.h
35953 +++ b/drivers/scsi/aacraid/aacraid.h
35954 @@ -492,7 +492,7 @@ struct adapter_ops
35955 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35956 /* Administrative operations */
35957 int (*adapter_comm)(struct aac_dev * dev, int comm);
35958 -};
35959 +} __no_const;
35960
35961 /*
35962 * Define which interrupt handler needs to be installed
35963 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35964 index 0d279c44..3d25a97 100644
35965 --- a/drivers/scsi/aacraid/linit.c
35966 +++ b/drivers/scsi/aacraid/linit.c
35967 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35968 #elif defined(__devinitconst)
35969 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35970 #else
35971 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35972 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35973 #endif
35974 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35975 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35976 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35977 index ff80552..1c4120c 100644
35978 --- a/drivers/scsi/aic94xx/aic94xx_init.c
35979 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
35980 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35981 .lldd_ata_set_dmamode = asd_set_dmamode,
35982 };
35983
35984 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35985 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35986 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35987 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35988 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35989 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35990 index 4ad7e36..d004679 100644
35991 --- a/drivers/scsi/bfa/bfa.h
35992 +++ b/drivers/scsi/bfa/bfa.h
35993 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
35994 u32 *end);
35995 int cpe_vec_q0;
35996 int rme_vec_q0;
35997 -};
35998 +} __no_const;
35999 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36000
36001 struct bfa_faa_cbfn_s {
36002 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36003 index f0f80e2..8ec946b 100644
36004 --- a/drivers/scsi/bfa/bfa_fcpim.c
36005 +++ b/drivers/scsi/bfa/bfa_fcpim.c
36006 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36007
36008 bfa_iotag_attach(fcp);
36009
36010 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36011 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36012 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36013 (fcp->num_itns * sizeof(struct bfa_itn_s));
36014 memset(fcp->itn_arr, 0,
36015 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36016 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36017 {
36018 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36019 - struct bfa_itn_s *itn;
36020 + bfa_itn_s_no_const *itn;
36021
36022 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36023 itn->isr = isr;
36024 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36025 index 36f26da..38a34a8 100644
36026 --- a/drivers/scsi/bfa/bfa_fcpim.h
36027 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36028 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36029 struct bfa_itn_s {
36030 bfa_isr_func_t isr;
36031 };
36032 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36033
36034 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36035 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36036 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36037 struct list_head iotag_tio_free_q; /* free IO resources */
36038 struct list_head iotag_unused_q; /* unused IO resources*/
36039 struct bfa_iotag_s *iotag_arr;
36040 - struct bfa_itn_s *itn_arr;
36041 + bfa_itn_s_no_const *itn_arr;
36042 int num_ioim_reqs;
36043 int num_fwtio_reqs;
36044 int num_itns;
36045 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36046 index 1a99d4b..e85d64b 100644
36047 --- a/drivers/scsi/bfa/bfa_ioc.h
36048 +++ b/drivers/scsi/bfa/bfa_ioc.h
36049 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36050 bfa_ioc_disable_cbfn_t disable_cbfn;
36051 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36052 bfa_ioc_reset_cbfn_t reset_cbfn;
36053 -};
36054 +} __no_const;
36055
36056 /*
36057 * IOC event notification mechanism.
36058 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36059 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36060 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36061 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36062 -};
36063 +} __no_const;
36064
36065 /*
36066 * Queue element to wait for room in request queue. FIFO order is
36067 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36068 index a3a056a..b9bbc2f 100644
36069 --- a/drivers/scsi/hosts.c
36070 +++ b/drivers/scsi/hosts.c
36071 @@ -42,7 +42,7 @@
36072 #include "scsi_logging.h"
36073
36074
36075 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36076 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36077
36078
36079 static void scsi_host_cls_release(struct device *dev)
36080 @@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36081 * subtract one because we increment first then return, but we need to
36082 * know what the next host number was before increment
36083 */
36084 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36085 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36086 shost->dma_channel = 0xff;
36087
36088 /* These three are default values which can be overridden */
36089 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36090 index 500e20d..ebd3059 100644
36091 --- a/drivers/scsi/hpsa.c
36092 +++ b/drivers/scsi/hpsa.c
36093 @@ -521,7 +521,7 @@ static inline u32 next_command(struct ctlr_info *h)
36094 u32 a;
36095
36096 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36097 - return h->access.command_completed(h);
36098 + return h->access->command_completed(h);
36099
36100 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36101 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36102 @@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h)
36103 while (!list_empty(&h->reqQ)) {
36104 c = list_entry(h->reqQ.next, struct CommandList, list);
36105 /* can't do anything if fifo is full */
36106 - if ((h->access.fifo_full(h))) {
36107 + if ((h->access->fifo_full(h))) {
36108 dev_warn(&h->pdev->dev, "fifo full\n");
36109 break;
36110 }
36111 @@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h)
36112 h->Qdepth--;
36113
36114 /* Tell the controller execute command */
36115 - h->access.submit_command(h, c);
36116 + h->access->submit_command(h, c);
36117
36118 /* Put job onto the completed Q */
36119 addQ(&h->cmpQ, c);
36120 @@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h)
36121
36122 static inline unsigned long get_next_completion(struct ctlr_info *h)
36123 {
36124 - return h->access.command_completed(h);
36125 + return h->access->command_completed(h);
36126 }
36127
36128 static inline bool interrupt_pending(struct ctlr_info *h)
36129 {
36130 - return h->access.intr_pending(h);
36131 + return h->access->intr_pending(h);
36132 }
36133
36134 static inline long interrupt_not_for_us(struct ctlr_info *h)
36135 {
36136 - return (h->access.intr_pending(h) == 0) ||
36137 + return (h->access->intr_pending(h) == 0) ||
36138 (h->interrupts_enabled == 0);
36139 }
36140
36141 @@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36142 if (prod_index < 0)
36143 return -ENODEV;
36144 h->product_name = products[prod_index].product_name;
36145 - h->access = *(products[prod_index].access);
36146 + h->access = products[prod_index].access;
36147
36148 if (hpsa_board_disabled(h->pdev)) {
36149 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36150 @@ -4175,7 +4175,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36151
36152 assert_spin_locked(&lockup_detector_lock);
36153 remove_ctlr_from_lockup_detector_list(h);
36154 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36155 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36156 spin_lock_irqsave(&h->lock, flags);
36157 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36158 spin_unlock_irqrestore(&h->lock, flags);
36159 @@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
36160 }
36161
36162 /* make sure the board interrupts are off */
36163 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36164 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36165
36166 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36167 goto clean2;
36168 @@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
36169 * fake ones to scoop up any residual completions.
36170 */
36171 spin_lock_irqsave(&h->lock, flags);
36172 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36173 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36174 spin_unlock_irqrestore(&h->lock, flags);
36175 free_irq(h->intr[h->intr_mode], h);
36176 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36177 @@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
36178 dev_info(&h->pdev->dev, "Board READY.\n");
36179 dev_info(&h->pdev->dev,
36180 "Waiting for stale completions to drain.\n");
36181 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36182 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36183 msleep(10000);
36184 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36185 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36186
36187 rc = controller_reset_failed(h->cfgtable);
36188 if (rc)
36189 @@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
36190 }
36191
36192 /* Turn the interrupts on so we can service requests */
36193 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36194 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36195
36196 hpsa_hba_inquiry(h);
36197 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36198 @@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36199 * To write all data in the battery backed cache to disks
36200 */
36201 hpsa_flush_cache(h);
36202 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36203 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36204 free_irq(h->intr[h->intr_mode], h);
36205 #ifdef CONFIG_PCI_MSI
36206 if (h->msix_vector)
36207 @@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36208 return;
36209 }
36210 /* Change the access methods to the performant access methods */
36211 - h->access = SA5_performant_access;
36212 + h->access = &SA5_performant_access;
36213 h->transMethod = CFGTBL_Trans_Performant;
36214 }
36215
36216 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36217 index 7b28d54..952f23a 100644
36218 --- a/drivers/scsi/hpsa.h
36219 +++ b/drivers/scsi/hpsa.h
36220 @@ -72,7 +72,7 @@ struct ctlr_info {
36221 unsigned int msix_vector;
36222 unsigned int msi_vector;
36223 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36224 - struct access_method access;
36225 + struct access_method *access;
36226
36227 /* queue and queue Info */
36228 struct list_head reqQ;
36229 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36230 index f2df059..a3a9930 100644
36231 --- a/drivers/scsi/ips.h
36232 +++ b/drivers/scsi/ips.h
36233 @@ -1027,7 +1027,7 @@ typedef struct {
36234 int (*intr)(struct ips_ha *);
36235 void (*enableint)(struct ips_ha *);
36236 uint32_t (*statupd)(struct ips_ha *);
36237 -} ips_hw_func_t;
36238 +} __no_const ips_hw_func_t;
36239
36240 typedef struct ips_ha {
36241 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36242 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36243 index aceffad..c35c08d 100644
36244 --- a/drivers/scsi/libfc/fc_exch.c
36245 +++ b/drivers/scsi/libfc/fc_exch.c
36246 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36247 * all together if not used XXX
36248 */
36249 struct {
36250 - atomic_t no_free_exch;
36251 - atomic_t no_free_exch_xid;
36252 - atomic_t xid_not_found;
36253 - atomic_t xid_busy;
36254 - atomic_t seq_not_found;
36255 - atomic_t non_bls_resp;
36256 + atomic_unchecked_t no_free_exch;
36257 + atomic_unchecked_t no_free_exch_xid;
36258 + atomic_unchecked_t xid_not_found;
36259 + atomic_unchecked_t xid_busy;
36260 + atomic_unchecked_t seq_not_found;
36261 + atomic_unchecked_t non_bls_resp;
36262 } stats;
36263 };
36264
36265 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36266 /* allocate memory for exchange */
36267 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36268 if (!ep) {
36269 - atomic_inc(&mp->stats.no_free_exch);
36270 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36271 goto out;
36272 }
36273 memset(ep, 0, sizeof(*ep));
36274 @@ -780,7 +780,7 @@ out:
36275 return ep;
36276 err:
36277 spin_unlock_bh(&pool->lock);
36278 - atomic_inc(&mp->stats.no_free_exch_xid);
36279 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36280 mempool_free(ep, mp->ep_pool);
36281 return NULL;
36282 }
36283 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36284 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36285 ep = fc_exch_find(mp, xid);
36286 if (!ep) {
36287 - atomic_inc(&mp->stats.xid_not_found);
36288 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36289 reject = FC_RJT_OX_ID;
36290 goto out;
36291 }
36292 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36293 ep = fc_exch_find(mp, xid);
36294 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36295 if (ep) {
36296 - atomic_inc(&mp->stats.xid_busy);
36297 + atomic_inc_unchecked(&mp->stats.xid_busy);
36298 reject = FC_RJT_RX_ID;
36299 goto rel;
36300 }
36301 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36302 }
36303 xid = ep->xid; /* get our XID */
36304 } else if (!ep) {
36305 - atomic_inc(&mp->stats.xid_not_found);
36306 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36307 reject = FC_RJT_RX_ID; /* XID not found */
36308 goto out;
36309 }
36310 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36311 } else {
36312 sp = &ep->seq;
36313 if (sp->id != fh->fh_seq_id) {
36314 - atomic_inc(&mp->stats.seq_not_found);
36315 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36316 if (f_ctl & FC_FC_END_SEQ) {
36317 /*
36318 * Update sequence_id based on incoming last
36319 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36320
36321 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36322 if (!ep) {
36323 - atomic_inc(&mp->stats.xid_not_found);
36324 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36325 goto out;
36326 }
36327 if (ep->esb_stat & ESB_ST_COMPLETE) {
36328 - atomic_inc(&mp->stats.xid_not_found);
36329 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36330 goto rel;
36331 }
36332 if (ep->rxid == FC_XID_UNKNOWN)
36333 ep->rxid = ntohs(fh->fh_rx_id);
36334 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36335 - atomic_inc(&mp->stats.xid_not_found);
36336 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36337 goto rel;
36338 }
36339 if (ep->did != ntoh24(fh->fh_s_id) &&
36340 ep->did != FC_FID_FLOGI) {
36341 - atomic_inc(&mp->stats.xid_not_found);
36342 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36343 goto rel;
36344 }
36345 sof = fr_sof(fp);
36346 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36347 sp->ssb_stat |= SSB_ST_RESP;
36348 sp->id = fh->fh_seq_id;
36349 } else if (sp->id != fh->fh_seq_id) {
36350 - atomic_inc(&mp->stats.seq_not_found);
36351 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36352 goto rel;
36353 }
36354
36355 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36356 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36357
36358 if (!sp)
36359 - atomic_inc(&mp->stats.xid_not_found);
36360 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36361 else
36362 - atomic_inc(&mp->stats.non_bls_resp);
36363 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36364
36365 fc_frame_free(fp);
36366 }
36367 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36368 index 441d88a..689ad71 100644
36369 --- a/drivers/scsi/libsas/sas_ata.c
36370 +++ b/drivers/scsi/libsas/sas_ata.c
36371 @@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36372 .postreset = ata_std_postreset,
36373 .error_handler = ata_std_error_handler,
36374 .post_internal_cmd = sas_ata_post_internal,
36375 - .qc_defer = ata_std_qc_defer,
36376 + .qc_defer = ata_std_qc_defer,
36377 .qc_prep = ata_noop_qc_prep,
36378 .qc_issue = sas_ata_qc_issue,
36379 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36380 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36381 index 3a1ffdd..8eb7c71 100644
36382 --- a/drivers/scsi/lpfc/lpfc.h
36383 +++ b/drivers/scsi/lpfc/lpfc.h
36384 @@ -413,7 +413,7 @@ struct lpfc_vport {
36385 struct dentry *debug_nodelist;
36386 struct dentry *vport_debugfs_root;
36387 struct lpfc_debugfs_trc *disc_trc;
36388 - atomic_t disc_trc_cnt;
36389 + atomic_unchecked_t disc_trc_cnt;
36390 #endif
36391 uint8_t stat_data_enabled;
36392 uint8_t stat_data_blocked;
36393 @@ -826,8 +826,8 @@ struct lpfc_hba {
36394 struct timer_list fabric_block_timer;
36395 unsigned long bit_flags;
36396 #define FABRIC_COMANDS_BLOCKED 0
36397 - atomic_t num_rsrc_err;
36398 - atomic_t num_cmd_success;
36399 + atomic_unchecked_t num_rsrc_err;
36400 + atomic_unchecked_t num_cmd_success;
36401 unsigned long last_rsrc_error_time;
36402 unsigned long last_ramp_down_time;
36403 unsigned long last_ramp_up_time;
36404 @@ -863,7 +863,7 @@ struct lpfc_hba {
36405
36406 struct dentry *debug_slow_ring_trc;
36407 struct lpfc_debugfs_trc *slow_ring_trc;
36408 - atomic_t slow_ring_trc_cnt;
36409 + atomic_unchecked_t slow_ring_trc_cnt;
36410 /* iDiag debugfs sub-directory */
36411 struct dentry *idiag_root;
36412 struct dentry *idiag_pci_cfg;
36413 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36414 index af04b0d..8f1a97e 100644
36415 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36416 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36417 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36418
36419 #include <linux/debugfs.h>
36420
36421 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36422 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36423 static unsigned long lpfc_debugfs_start_time = 0L;
36424
36425 /* iDiag */
36426 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36427 lpfc_debugfs_enable = 0;
36428
36429 len = 0;
36430 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36431 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36432 (lpfc_debugfs_max_disc_trc - 1);
36433 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36434 dtp = vport->disc_trc + i;
36435 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36436 lpfc_debugfs_enable = 0;
36437
36438 len = 0;
36439 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36440 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36441 (lpfc_debugfs_max_slow_ring_trc - 1);
36442 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36443 dtp = phba->slow_ring_trc + i;
36444 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36445 !vport || !vport->disc_trc)
36446 return;
36447
36448 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36449 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36450 (lpfc_debugfs_max_disc_trc - 1);
36451 dtp = vport->disc_trc + index;
36452 dtp->fmt = fmt;
36453 dtp->data1 = data1;
36454 dtp->data2 = data2;
36455 dtp->data3 = data3;
36456 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36457 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36458 dtp->jif = jiffies;
36459 #endif
36460 return;
36461 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36462 !phba || !phba->slow_ring_trc)
36463 return;
36464
36465 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36466 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36467 (lpfc_debugfs_max_slow_ring_trc - 1);
36468 dtp = phba->slow_ring_trc + index;
36469 dtp->fmt = fmt;
36470 dtp->data1 = data1;
36471 dtp->data2 = data2;
36472 dtp->data3 = data3;
36473 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36474 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36475 dtp->jif = jiffies;
36476 #endif
36477 return;
36478 @@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36479 "slow_ring buffer\n");
36480 goto debug_failed;
36481 }
36482 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36483 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36484 memset(phba->slow_ring_trc, 0,
36485 (sizeof(struct lpfc_debugfs_trc) *
36486 lpfc_debugfs_max_slow_ring_trc));
36487 @@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36488 "buffer\n");
36489 goto debug_failed;
36490 }
36491 - atomic_set(&vport->disc_trc_cnt, 0);
36492 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36493
36494 snprintf(name, sizeof(name), "discovery_trace");
36495 vport->debug_disc_trc =
36496 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36497 index 9598fdc..7e9f3d9 100644
36498 --- a/drivers/scsi/lpfc/lpfc_init.c
36499 +++ b/drivers/scsi/lpfc/lpfc_init.c
36500 @@ -10266,8 +10266,10 @@ lpfc_init(void)
36501 "misc_register returned with status %d", error);
36502
36503 if (lpfc_enable_npiv) {
36504 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36505 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36506 + pax_open_kernel();
36507 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36508 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36509 + pax_close_kernel();
36510 }
36511 lpfc_transport_template =
36512 fc_attach_transport(&lpfc_transport_functions);
36513 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36514 index 88f3a83..686d3fa 100644
36515 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36516 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36517 @@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36518 uint32_t evt_posted;
36519
36520 spin_lock_irqsave(&phba->hbalock, flags);
36521 - atomic_inc(&phba->num_rsrc_err);
36522 + atomic_inc_unchecked(&phba->num_rsrc_err);
36523 phba->last_rsrc_error_time = jiffies;
36524
36525 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36526 @@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36527 unsigned long flags;
36528 struct lpfc_hba *phba = vport->phba;
36529 uint32_t evt_posted;
36530 - atomic_inc(&phba->num_cmd_success);
36531 + atomic_inc_unchecked(&phba->num_cmd_success);
36532
36533 if (vport->cfg_lun_queue_depth <= queue_depth)
36534 return;
36535 @@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36536 unsigned long num_rsrc_err, num_cmd_success;
36537 int i;
36538
36539 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36540 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36541 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36542 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36543
36544 vports = lpfc_create_vport_work_array(phba);
36545 if (vports != NULL)
36546 @@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36547 }
36548 }
36549 lpfc_destroy_vport_work_array(phba, vports);
36550 - atomic_set(&phba->num_rsrc_err, 0);
36551 - atomic_set(&phba->num_cmd_success, 0);
36552 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36553 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36554 }
36555
36556 /**
36557 @@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36558 }
36559 }
36560 lpfc_destroy_vport_work_array(phba, vports);
36561 - atomic_set(&phba->num_rsrc_err, 0);
36562 - atomic_set(&phba->num_cmd_success, 0);
36563 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36564 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36565 }
36566
36567 /**
36568 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36569 index ea8a0b4..812a124 100644
36570 --- a/drivers/scsi/pmcraid.c
36571 +++ b/drivers/scsi/pmcraid.c
36572 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36573 res->scsi_dev = scsi_dev;
36574 scsi_dev->hostdata = res;
36575 res->change_detected = 0;
36576 - atomic_set(&res->read_failures, 0);
36577 - atomic_set(&res->write_failures, 0);
36578 + atomic_set_unchecked(&res->read_failures, 0);
36579 + atomic_set_unchecked(&res->write_failures, 0);
36580 rc = 0;
36581 }
36582 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36583 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36584
36585 /* If this was a SCSI read/write command keep count of errors */
36586 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36587 - atomic_inc(&res->read_failures);
36588 + atomic_inc_unchecked(&res->read_failures);
36589 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36590 - atomic_inc(&res->write_failures);
36591 + atomic_inc_unchecked(&res->write_failures);
36592
36593 if (!RES_IS_GSCSI(res->cfg_entry) &&
36594 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36595 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36596 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36597 * hrrq_id assigned here in queuecommand
36598 */
36599 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36600 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36601 pinstance->num_hrrq;
36602 cmd->cmd_done = pmcraid_io_done;
36603
36604 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36605 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36606 * hrrq_id assigned here in queuecommand
36607 */
36608 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36609 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36610 pinstance->num_hrrq;
36611
36612 if (request_size) {
36613 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36614
36615 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36616 /* add resources only after host is added into system */
36617 - if (!atomic_read(&pinstance->expose_resources))
36618 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36619 return;
36620
36621 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36622 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36623 init_waitqueue_head(&pinstance->reset_wait_q);
36624
36625 atomic_set(&pinstance->outstanding_cmds, 0);
36626 - atomic_set(&pinstance->last_message_id, 0);
36627 - atomic_set(&pinstance->expose_resources, 0);
36628 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36629 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36630
36631 INIT_LIST_HEAD(&pinstance->free_res_q);
36632 INIT_LIST_HEAD(&pinstance->used_res_q);
36633 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36634 /* Schedule worker thread to handle CCN and take care of adding and
36635 * removing devices to OS
36636 */
36637 - atomic_set(&pinstance->expose_resources, 1);
36638 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36639 schedule_work(&pinstance->worker_q);
36640 return rc;
36641
36642 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36643 index e1d150f..6c6df44 100644
36644 --- a/drivers/scsi/pmcraid.h
36645 +++ b/drivers/scsi/pmcraid.h
36646 @@ -748,7 +748,7 @@ struct pmcraid_instance {
36647 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36648
36649 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36650 - atomic_t last_message_id;
36651 + atomic_unchecked_t last_message_id;
36652
36653 /* configuration table */
36654 struct pmcraid_config_table *cfg_table;
36655 @@ -777,7 +777,7 @@ struct pmcraid_instance {
36656 atomic_t outstanding_cmds;
36657
36658 /* should add/delete resources to mid-layer now ?*/
36659 - atomic_t expose_resources;
36660 + atomic_unchecked_t expose_resources;
36661
36662
36663
36664 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36665 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36666 };
36667 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36668 - atomic_t read_failures; /* count of failed READ commands */
36669 - atomic_t write_failures; /* count of failed WRITE commands */
36670 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36671 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36672
36673 /* To indicate add/delete/modify during CCN */
36674 u8 change_detected;
36675 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36676 index a244303..6015eb7 100644
36677 --- a/drivers/scsi/qla2xxx/qla_def.h
36678 +++ b/drivers/scsi/qla2xxx/qla_def.h
36679 @@ -2264,7 +2264,7 @@ struct isp_operations {
36680 int (*start_scsi) (srb_t *);
36681 int (*abort_isp) (struct scsi_qla_host *);
36682 int (*iospace_config)(struct qla_hw_data*);
36683 -};
36684 +} __no_const;
36685
36686 /* MSI-X Support *************************************************************/
36687
36688 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36689 index 7f2492e..5113877 100644
36690 --- a/drivers/scsi/qla4xxx/ql4_def.h
36691 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36692 @@ -268,7 +268,7 @@ struct ddb_entry {
36693 * (4000 only) */
36694 atomic_t relogin_timer; /* Max Time to wait for
36695 * relogin to complete */
36696 - atomic_t relogin_retry_count; /* Num of times relogin has been
36697 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36698 * retried */
36699 uint32_t default_time2wait; /* Default Min time between
36700 * relogins (+aens) */
36701 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36702 index ee47820..a83b1f4 100644
36703 --- a/drivers/scsi/qla4xxx/ql4_os.c
36704 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36705 @@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36706 */
36707 if (!iscsi_is_session_online(cls_sess)) {
36708 /* Reset retry relogin timer */
36709 - atomic_inc(&ddb_entry->relogin_retry_count);
36710 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36711 DEBUG2(ql4_printk(KERN_INFO, ha,
36712 "%s: index[%d] relogin timed out-retrying"
36713 " relogin (%d), retry (%d)\n", __func__,
36714 ddb_entry->fw_ddb_index,
36715 - atomic_read(&ddb_entry->relogin_retry_count),
36716 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36717 ddb_entry->default_time2wait + 4));
36718 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36719 atomic_set(&ddb_entry->retry_relogin_timer,
36720 @@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36721
36722 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36723 atomic_set(&ddb_entry->relogin_timer, 0);
36724 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36725 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36726 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36727 ddb_entry->default_relogin_timeout =
36728 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
36729 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36730 index 07322ec..91ccc23 100644
36731 --- a/drivers/scsi/scsi.c
36732 +++ b/drivers/scsi/scsi.c
36733 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36734 unsigned long timeout;
36735 int rtn = 0;
36736
36737 - atomic_inc(&cmd->device->iorequest_cnt);
36738 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36739
36740 /* check if the device is still usable */
36741 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36742 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36743 index 5dfd749..e86bf7e 100644
36744 --- a/drivers/scsi/scsi_lib.c
36745 +++ b/drivers/scsi/scsi_lib.c
36746 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36747 shost = sdev->host;
36748 scsi_init_cmd_errh(cmd);
36749 cmd->result = DID_NO_CONNECT << 16;
36750 - atomic_inc(&cmd->device->iorequest_cnt);
36751 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36752
36753 /*
36754 * SCSI request completion path will do scsi_device_unbusy(),
36755 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct request *rq)
36756
36757 INIT_LIST_HEAD(&cmd->eh_entry);
36758
36759 - atomic_inc(&cmd->device->iodone_cnt);
36760 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36761 if (cmd->result)
36762 - atomic_inc(&cmd->device->ioerr_cnt);
36763 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36764
36765 disposition = scsi_decide_disposition(cmd);
36766 if (disposition != SUCCESS &&
36767 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36768 index 04c2a27..9d8bd66 100644
36769 --- a/drivers/scsi/scsi_sysfs.c
36770 +++ b/drivers/scsi/scsi_sysfs.c
36771 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36772 char *buf) \
36773 { \
36774 struct scsi_device *sdev = to_scsi_device(dev); \
36775 - unsigned long long count = atomic_read(&sdev->field); \
36776 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36777 return snprintf(buf, 20, "0x%llx\n", count); \
36778 } \
36779 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36780 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36781 index 84a1fdf..693b0d6 100644
36782 --- a/drivers/scsi/scsi_tgt_lib.c
36783 +++ b/drivers/scsi/scsi_tgt_lib.c
36784 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36785 int err;
36786
36787 dprintk("%lx %u\n", uaddr, len);
36788 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36789 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36790 if (err) {
36791 /*
36792 * TODO: need to fixup sg_tablesize, max_segment_size,
36793 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36794 index 80fbe2a..efa223b 100644
36795 --- a/drivers/scsi/scsi_transport_fc.c
36796 +++ b/drivers/scsi/scsi_transport_fc.c
36797 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36798 * Netlink Infrastructure
36799 */
36800
36801 -static atomic_t fc_event_seq;
36802 +static atomic_unchecked_t fc_event_seq;
36803
36804 /**
36805 * fc_get_event_number - Obtain the next sequential FC event number
36806 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
36807 u32
36808 fc_get_event_number(void)
36809 {
36810 - return atomic_add_return(1, &fc_event_seq);
36811 + return atomic_add_return_unchecked(1, &fc_event_seq);
36812 }
36813 EXPORT_SYMBOL(fc_get_event_number);
36814
36815 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
36816 {
36817 int error;
36818
36819 - atomic_set(&fc_event_seq, 0);
36820 + atomic_set_unchecked(&fc_event_seq, 0);
36821
36822 error = transport_class_register(&fc_host_class);
36823 if (error)
36824 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36825 char *cp;
36826
36827 *val = simple_strtoul(buf, &cp, 0);
36828 - if ((*cp && (*cp != '\n')) || (*val < 0))
36829 + if (*cp && (*cp != '\n'))
36830 return -EINVAL;
36831 /*
36832 * Check for overflow; dev_loss_tmo is u32
36833 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36834 index 1cf640e..78e9014 100644
36835 --- a/drivers/scsi/scsi_transport_iscsi.c
36836 +++ b/drivers/scsi/scsi_transport_iscsi.c
36837 @@ -79,7 +79,7 @@ struct iscsi_internal {
36838 struct transport_container session_cont;
36839 };
36840
36841 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36842 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36843 static struct workqueue_struct *iscsi_eh_timer_workq;
36844
36845 static DEFINE_IDA(iscsi_sess_ida);
36846 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36847 int err;
36848
36849 ihost = shost->shost_data;
36850 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36851 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36852
36853 if (target_id == ISCSI_MAX_TARGET) {
36854 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36855 @@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
36856 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36857 ISCSI_TRANSPORT_VERSION);
36858
36859 - atomic_set(&iscsi_session_nr, 0);
36860 + atomic_set_unchecked(&iscsi_session_nr, 0);
36861
36862 err = class_register(&iscsi_transport_class);
36863 if (err)
36864 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36865 index 21a045e..ec89e03 100644
36866 --- a/drivers/scsi/scsi_transport_srp.c
36867 +++ b/drivers/scsi/scsi_transport_srp.c
36868 @@ -33,7 +33,7 @@
36869 #include "scsi_transport_srp_internal.h"
36870
36871 struct srp_host_attrs {
36872 - atomic_t next_port_id;
36873 + atomic_unchecked_t next_port_id;
36874 };
36875 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36876
36877 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36878 struct Scsi_Host *shost = dev_to_shost(dev);
36879 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36880
36881 - atomic_set(&srp_host->next_port_id, 0);
36882 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36883 return 0;
36884 }
36885
36886 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36887 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36888 rport->roles = ids->roles;
36889
36890 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36891 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36892 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36893
36894 transport_setup_device(&rport->dev);
36895 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36896 index eacd46b..e3f4d62 100644
36897 --- a/drivers/scsi/sg.c
36898 +++ b/drivers/scsi/sg.c
36899 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36900 sdp->disk->disk_name,
36901 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36902 NULL,
36903 - (char *)arg);
36904 + (char __user *)arg);
36905 case BLKTRACESTART:
36906 return blk_trace_startstop(sdp->device->request_queue, 1);
36907 case BLKTRACESTOP:
36908 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
36909 const struct file_operations * fops;
36910 };
36911
36912 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36913 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36914 {"allow_dio", &adio_fops},
36915 {"debug", &debug_fops},
36916 {"def_reserved_size", &dressz_fops},
36917 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
36918 if (!sg_proc_sgp)
36919 return 1;
36920 for (k = 0; k < num_leaves; ++k) {
36921 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36922 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
36923 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
36924 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
36925 }
36926 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36927 index 3d8f662..070f1a5 100644
36928 --- a/drivers/spi/spi.c
36929 +++ b/drivers/spi/spi.c
36930 @@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
36931 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36932
36933 /* portable code must never pass more than 32 bytes */
36934 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36935 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36936
36937 static u8 *buf;
36938
36939 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
36940 index d91751f..a3a9e36 100644
36941 --- a/drivers/staging/octeon/ethernet-rx.c
36942 +++ b/drivers/staging/octeon/ethernet-rx.c
36943 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36944 /* Increment RX stats for virtual ports */
36945 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
36946 #ifdef CONFIG_64BIT
36947 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
36948 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
36949 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
36950 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
36951 #else
36952 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
36953 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
36954 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
36955 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
36956 #endif
36957 }
36958 netif_receive_skb(skb);
36959 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36960 dev->name);
36961 */
36962 #ifdef CONFIG_64BIT
36963 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
36964 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36965 #else
36966 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
36967 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
36968 #endif
36969 dev_kfree_skb_irq(skb);
36970 }
36971 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
36972 index 60cba81..71eb239 100644
36973 --- a/drivers/staging/octeon/ethernet.c
36974 +++ b/drivers/staging/octeon/ethernet.c
36975 @@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
36976 * since the RX tasklet also increments it.
36977 */
36978 #ifdef CONFIG_64BIT
36979 - atomic64_add(rx_status.dropped_packets,
36980 - (atomic64_t *)&priv->stats.rx_dropped);
36981 + atomic64_add_unchecked(rx_status.dropped_packets,
36982 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36983 #else
36984 - atomic_add(rx_status.dropped_packets,
36985 - (atomic_t *)&priv->stats.rx_dropped);
36986 + atomic_add_unchecked(rx_status.dropped_packets,
36987 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
36988 #endif
36989 }
36990
36991 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
36992 index d3d8727..f9327bb8 100644
36993 --- a/drivers/staging/rtl8712/rtl871x_io.h
36994 +++ b/drivers/staging/rtl8712/rtl871x_io.h
36995 @@ -108,7 +108,7 @@ struct _io_ops {
36996 u8 *pmem);
36997 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
36998 u8 *pmem);
36999 -};
37000 +} __no_const;
37001
37002 struct io_req {
37003 struct list_head list;
37004 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37005 index c7b5e8b..783d6cb 100644
37006 --- a/drivers/staging/sbe-2t3e3/netdev.c
37007 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37008 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37009 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37010
37011 if (rlen)
37012 - if (copy_to_user(data, &resp, rlen))
37013 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37014 return -EFAULT;
37015
37016 return 0;
37017 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37018 index 42cdafe..2769103 100644
37019 --- a/drivers/staging/speakup/speakup_soft.c
37020 +++ b/drivers/staging/speakup/speakup_soft.c
37021 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37022 break;
37023 } else if (!initialized) {
37024 if (*init) {
37025 - ch = *init;
37026 init++;
37027 } else {
37028 initialized = 1;
37029 }
37030 + ch = *init;
37031 } else {
37032 ch = synth_buffer_getc();
37033 }
37034 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37035 index c7b888c..c94be93 100644
37036 --- a/drivers/staging/usbip/usbip_common.h
37037 +++ b/drivers/staging/usbip/usbip_common.h
37038 @@ -289,7 +289,7 @@ struct usbip_device {
37039 void (*shutdown)(struct usbip_device *);
37040 void (*reset)(struct usbip_device *);
37041 void (*unusable)(struct usbip_device *);
37042 - } eh_ops;
37043 + } __no_const eh_ops;
37044 };
37045
37046 /* usbip_common.c */
37047 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37048 index 88b3298..3783eee 100644
37049 --- a/drivers/staging/usbip/vhci.h
37050 +++ b/drivers/staging/usbip/vhci.h
37051 @@ -88,7 +88,7 @@ struct vhci_hcd {
37052 unsigned resuming:1;
37053 unsigned long re_timeout;
37054
37055 - atomic_t seqnum;
37056 + atomic_unchecked_t seqnum;
37057
37058 /*
37059 * NOTE:
37060 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37061 index dca9bf1..80735c9 100644
37062 --- a/drivers/staging/usbip/vhci_hcd.c
37063 +++ b/drivers/staging/usbip/vhci_hcd.c
37064 @@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37065 return;
37066 }
37067
37068 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37069 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37070 if (priv->seqnum == 0xffff)
37071 dev_info(&urb->dev->dev, "seqnum max\n");
37072
37073 @@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37074 return -ENOMEM;
37075 }
37076
37077 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37078 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37079 if (unlink->seqnum == 0xffff)
37080 pr_info("seqnum max\n");
37081
37082 @@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37083 vdev->rhport = rhport;
37084 }
37085
37086 - atomic_set(&vhci->seqnum, 0);
37087 + atomic_set_unchecked(&vhci->seqnum, 0);
37088 spin_lock_init(&vhci->lock);
37089
37090 hcd->power_budget = 0; /* no limit */
37091 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37092 index f5fba732..210a16c 100644
37093 --- a/drivers/staging/usbip/vhci_rx.c
37094 +++ b/drivers/staging/usbip/vhci_rx.c
37095 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37096 if (!urb) {
37097 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37098 pr_info("max seqnum %d\n",
37099 - atomic_read(&the_controller->seqnum));
37100 + atomic_read_unchecked(&the_controller->seqnum));
37101 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37102 return;
37103 }
37104 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37105 index 7735027..30eed13 100644
37106 --- a/drivers/staging/vt6655/hostap.c
37107 +++ b/drivers/staging/vt6655/hostap.c
37108 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37109 *
37110 */
37111
37112 +static net_device_ops_no_const apdev_netdev_ops;
37113 +
37114 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37115 {
37116 PSDevice apdev_priv;
37117 struct net_device *dev = pDevice->dev;
37118 int ret;
37119 - const struct net_device_ops apdev_netdev_ops = {
37120 - .ndo_start_xmit = pDevice->tx_80211,
37121 - };
37122
37123 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37124
37125 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37126 *apdev_priv = *pDevice;
37127 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37128
37129 + /* only half broken now */
37130 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37131 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37132
37133 pDevice->apdev->type = ARPHRD_IEEE80211;
37134 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37135 index 51b5adf..098e320 100644
37136 --- a/drivers/staging/vt6656/hostap.c
37137 +++ b/drivers/staging/vt6656/hostap.c
37138 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37139 *
37140 */
37141
37142 +static net_device_ops_no_const apdev_netdev_ops;
37143 +
37144 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37145 {
37146 PSDevice apdev_priv;
37147 struct net_device *dev = pDevice->dev;
37148 int ret;
37149 - const struct net_device_ops apdev_netdev_ops = {
37150 - .ndo_start_xmit = pDevice->tx_80211,
37151 - };
37152
37153 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37154
37155 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37156 *apdev_priv = *pDevice;
37157 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37158
37159 + /* only half broken now */
37160 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37161 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37162
37163 pDevice->apdev->type = ARPHRD_IEEE80211;
37164 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37165 index 7843dfd..3db105f 100644
37166 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37167 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37168 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37169
37170 struct usbctlx_completor {
37171 int (*complete) (struct usbctlx_completor *);
37172 -};
37173 +} __no_const;
37174
37175 static int
37176 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37177 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37178 index 1ca66ea..76f1343 100644
37179 --- a/drivers/staging/zcache/tmem.c
37180 +++ b/drivers/staging/zcache/tmem.c
37181 @@ -39,7 +39,7 @@
37182 * A tmem host implementation must use this function to register callbacks
37183 * for memory allocation.
37184 */
37185 -static struct tmem_hostops tmem_hostops;
37186 +static tmem_hostops_no_const tmem_hostops;
37187
37188 static void tmem_objnode_tree_init(void);
37189
37190 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37191 * A tmem host implementation must use this function to register
37192 * callbacks for a page-accessible memory (PAM) implementation
37193 */
37194 -static struct tmem_pamops tmem_pamops;
37195 +static tmem_pamops_no_const tmem_pamops;
37196
37197 void tmem_register_pamops(struct tmem_pamops *m)
37198 {
37199 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37200 index 0d4aa82..f7832d4 100644
37201 --- a/drivers/staging/zcache/tmem.h
37202 +++ b/drivers/staging/zcache/tmem.h
37203 @@ -180,6 +180,7 @@ struct tmem_pamops {
37204 void (*new_obj)(struct tmem_obj *);
37205 int (*replace_in_obj)(void *, struct tmem_obj *);
37206 };
37207 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37208 extern void tmem_register_pamops(struct tmem_pamops *m);
37209
37210 /* memory allocation methods provided by the host implementation */
37211 @@ -189,6 +190,7 @@ struct tmem_hostops {
37212 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37213 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37214 };
37215 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37216 extern void tmem_register_hostops(struct tmem_hostops *m);
37217
37218 /* core tmem accessor functions */
37219 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37220 index f015839..b15dfc4 100644
37221 --- a/drivers/target/target_core_tmr.c
37222 +++ b/drivers/target/target_core_tmr.c
37223 @@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
37224 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37225 cmd->t_task_list_num,
37226 atomic_read(&cmd->t_task_cdbs_left),
37227 - atomic_read(&cmd->t_task_cdbs_sent),
37228 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37229 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37230 (cmd->transport_state & CMD_T_STOP) != 0,
37231 (cmd->transport_state & CMD_T_SENT) != 0);
37232 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37233 index 443704f..92d3517 100644
37234 --- a/drivers/target/target_core_transport.c
37235 +++ b/drivers/target/target_core_transport.c
37236 @@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_to_core_hba(
37237 spin_lock_init(&dev->se_port_lock);
37238 spin_lock_init(&dev->se_tmr_lock);
37239 spin_lock_init(&dev->qf_cmd_lock);
37240 - atomic_set(&dev->dev_ordered_id, 0);
37241 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37242
37243 se_dev_set_default_attribs(dev, dev_limits);
37244
37245 @@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37246 * Used to determine when ORDERED commands should go from
37247 * Dormant to Active status.
37248 */
37249 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37250 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37251 smp_mb__after_atomic_inc();
37252 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37253 cmd->se_ordered_id, cmd->sam_task_attr,
37254 @@ -1956,7 +1956,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
37255 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
37256 cmd->t_task_list_num,
37257 atomic_read(&cmd->t_task_cdbs_left),
37258 - atomic_read(&cmd->t_task_cdbs_sent),
37259 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37260 atomic_read(&cmd->t_task_cdbs_ex_left),
37261 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37262 (cmd->transport_state & CMD_T_STOP) != 0,
37263 @@ -2216,9 +2216,9 @@ check_depth:
37264 cmd = task->task_se_cmd;
37265 spin_lock_irqsave(&cmd->t_state_lock, flags);
37266 task->task_flags |= (TF_ACTIVE | TF_SENT);
37267 - atomic_inc(&cmd->t_task_cdbs_sent);
37268 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37269
37270 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37271 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37272 cmd->t_task_list_num)
37273 cmd->transport_state |= CMD_T_SENT;
37274
37275 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37276 index 3436436..772237b 100644
37277 --- a/drivers/tty/hvc/hvcs.c
37278 +++ b/drivers/tty/hvc/hvcs.c
37279 @@ -83,6 +83,7 @@
37280 #include <asm/hvcserver.h>
37281 #include <asm/uaccess.h>
37282 #include <asm/vio.h>
37283 +#include <asm/local.h>
37284
37285 /*
37286 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37287 @@ -270,7 +271,7 @@ struct hvcs_struct {
37288 unsigned int index;
37289
37290 struct tty_struct *tty;
37291 - int open_count;
37292 + local_t open_count;
37293
37294 /*
37295 * Used to tell the driver kernel_thread what operations need to take
37296 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37297
37298 spin_lock_irqsave(&hvcsd->lock, flags);
37299
37300 - if (hvcsd->open_count > 0) {
37301 + if (local_read(&hvcsd->open_count) > 0) {
37302 spin_unlock_irqrestore(&hvcsd->lock, flags);
37303 printk(KERN_INFO "HVCS: vterm state unchanged. "
37304 "The hvcs device node is still in use.\n");
37305 @@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37306 if ((retval = hvcs_partner_connect(hvcsd)))
37307 goto error_release;
37308
37309 - hvcsd->open_count = 1;
37310 + local_set(&hvcsd->open_count, 1);
37311 hvcsd->tty = tty;
37312 tty->driver_data = hvcsd;
37313
37314 @@ -1172,7 +1173,7 @@ fast_open:
37315
37316 spin_lock_irqsave(&hvcsd->lock, flags);
37317 kref_get(&hvcsd->kref);
37318 - hvcsd->open_count++;
37319 + local_inc(&hvcsd->open_count);
37320 hvcsd->todo_mask |= HVCS_SCHED_READ;
37321 spin_unlock_irqrestore(&hvcsd->lock, flags);
37322
37323 @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37324 hvcsd = tty->driver_data;
37325
37326 spin_lock_irqsave(&hvcsd->lock, flags);
37327 - if (--hvcsd->open_count == 0) {
37328 + if (local_dec_and_test(&hvcsd->open_count)) {
37329
37330 vio_disable_interrupts(hvcsd->vdev);
37331
37332 @@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37333 free_irq(irq, hvcsd);
37334 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37335 return;
37336 - } else if (hvcsd->open_count < 0) {
37337 + } else if (local_read(&hvcsd->open_count) < 0) {
37338 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37339 " is missmanaged.\n",
37340 - hvcsd->vdev->unit_address, hvcsd->open_count);
37341 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37342 }
37343
37344 spin_unlock_irqrestore(&hvcsd->lock, flags);
37345 @@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37346
37347 spin_lock_irqsave(&hvcsd->lock, flags);
37348 /* Preserve this so that we know how many kref refs to put */
37349 - temp_open_count = hvcsd->open_count;
37350 + temp_open_count = local_read(&hvcsd->open_count);
37351
37352 /*
37353 * Don't kref put inside the spinlock because the destruction
37354 @@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37355 hvcsd->tty->driver_data = NULL;
37356 hvcsd->tty = NULL;
37357
37358 - hvcsd->open_count = 0;
37359 + local_set(&hvcsd->open_count, 0);
37360
37361 /* This will drop any buffered data on the floor which is OK in a hangup
37362 * scenario. */
37363 @@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
37364 * the middle of a write operation? This is a crummy place to do this
37365 * but we want to keep it all in the spinlock.
37366 */
37367 - if (hvcsd->open_count <= 0) {
37368 + if (local_read(&hvcsd->open_count) <= 0) {
37369 spin_unlock_irqrestore(&hvcsd->lock, flags);
37370 return -ENODEV;
37371 }
37372 @@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37373 {
37374 struct hvcs_struct *hvcsd = tty->driver_data;
37375
37376 - if (!hvcsd || hvcsd->open_count <= 0)
37377 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37378 return 0;
37379
37380 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37381 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37382 index 4daf962..b4a2281 100644
37383 --- a/drivers/tty/ipwireless/tty.c
37384 +++ b/drivers/tty/ipwireless/tty.c
37385 @@ -29,6 +29,7 @@
37386 #include <linux/tty_driver.h>
37387 #include <linux/tty_flip.h>
37388 #include <linux/uaccess.h>
37389 +#include <asm/local.h>
37390
37391 #include "tty.h"
37392 #include "network.h"
37393 @@ -51,7 +52,7 @@ struct ipw_tty {
37394 int tty_type;
37395 struct ipw_network *network;
37396 struct tty_struct *linux_tty;
37397 - int open_count;
37398 + local_t open_count;
37399 unsigned int control_lines;
37400 struct mutex ipw_tty_mutex;
37401 int tx_bytes_queued;
37402 @@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37403 mutex_unlock(&tty->ipw_tty_mutex);
37404 return -ENODEV;
37405 }
37406 - if (tty->open_count == 0)
37407 + if (local_read(&tty->open_count) == 0)
37408 tty->tx_bytes_queued = 0;
37409
37410 - tty->open_count++;
37411 + local_inc(&tty->open_count);
37412
37413 tty->linux_tty = linux_tty;
37414 linux_tty->driver_data = tty;
37415 @@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37416
37417 static void do_ipw_close(struct ipw_tty *tty)
37418 {
37419 - tty->open_count--;
37420 -
37421 - if (tty->open_count == 0) {
37422 + if (local_dec_return(&tty->open_count) == 0) {
37423 struct tty_struct *linux_tty = tty->linux_tty;
37424
37425 if (linux_tty != NULL) {
37426 @@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37427 return;
37428
37429 mutex_lock(&tty->ipw_tty_mutex);
37430 - if (tty->open_count == 0) {
37431 + if (local_read(&tty->open_count) == 0) {
37432 mutex_unlock(&tty->ipw_tty_mutex);
37433 return;
37434 }
37435 @@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37436 return;
37437 }
37438
37439 - if (!tty->open_count) {
37440 + if (!local_read(&tty->open_count)) {
37441 mutex_unlock(&tty->ipw_tty_mutex);
37442 return;
37443 }
37444 @@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37445 return -ENODEV;
37446
37447 mutex_lock(&tty->ipw_tty_mutex);
37448 - if (!tty->open_count) {
37449 + if (!local_read(&tty->open_count)) {
37450 mutex_unlock(&tty->ipw_tty_mutex);
37451 return -EINVAL;
37452 }
37453 @@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37454 if (!tty)
37455 return -ENODEV;
37456
37457 - if (!tty->open_count)
37458 + if (!local_read(&tty->open_count))
37459 return -EINVAL;
37460
37461 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37462 @@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37463 if (!tty)
37464 return 0;
37465
37466 - if (!tty->open_count)
37467 + if (!local_read(&tty->open_count))
37468 return 0;
37469
37470 return tty->tx_bytes_queued;
37471 @@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37472 if (!tty)
37473 return -ENODEV;
37474
37475 - if (!tty->open_count)
37476 + if (!local_read(&tty->open_count))
37477 return -EINVAL;
37478
37479 return get_control_lines(tty);
37480 @@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37481 if (!tty)
37482 return -ENODEV;
37483
37484 - if (!tty->open_count)
37485 + if (!local_read(&tty->open_count))
37486 return -EINVAL;
37487
37488 return set_control_lines(tty, set, clear);
37489 @@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37490 if (!tty)
37491 return -ENODEV;
37492
37493 - if (!tty->open_count)
37494 + if (!local_read(&tty->open_count))
37495 return -EINVAL;
37496
37497 /* FIXME: Exactly how is the tty object locked here .. */
37498 @@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37499 against a parallel ioctl etc */
37500 mutex_lock(&ttyj->ipw_tty_mutex);
37501 }
37502 - while (ttyj->open_count)
37503 + while (local_read(&ttyj->open_count))
37504 do_ipw_close(ttyj);
37505 ipwireless_disassociate_network_ttys(network,
37506 ttyj->channel_idx);
37507 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37508 index c43b683..0a88f1c 100644
37509 --- a/drivers/tty/n_gsm.c
37510 +++ b/drivers/tty/n_gsm.c
37511 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37512 kref_init(&dlci->ref);
37513 mutex_init(&dlci->mutex);
37514 dlci->fifo = &dlci->_fifo;
37515 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37516 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37517 kfree(dlci);
37518 return NULL;
37519 }
37520 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37521 index 94b6eda..15f7cec 100644
37522 --- a/drivers/tty/n_tty.c
37523 +++ b/drivers/tty/n_tty.c
37524 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37525 {
37526 *ops = tty_ldisc_N_TTY;
37527 ops->owner = NULL;
37528 - ops->refcount = ops->flags = 0;
37529 + atomic_set(&ops->refcount, 0);
37530 + ops->flags = 0;
37531 }
37532 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37533 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37534 index eeae7fa..177a743 100644
37535 --- a/drivers/tty/pty.c
37536 +++ b/drivers/tty/pty.c
37537 @@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
37538 panic("Couldn't register Unix98 pts driver");
37539
37540 /* Now create the /dev/ptmx special device */
37541 + pax_open_kernel();
37542 tty_default_fops(&ptmx_fops);
37543 - ptmx_fops.open = ptmx_open;
37544 + *(void **)&ptmx_fops.open = ptmx_open;
37545 + pax_close_kernel();
37546
37547 cdev_init(&ptmx_cdev, &ptmx_fops);
37548 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37549 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37550 index 2b42a01..32a2ed3 100644
37551 --- a/drivers/tty/serial/kgdboc.c
37552 +++ b/drivers/tty/serial/kgdboc.c
37553 @@ -24,8 +24,9 @@
37554 #define MAX_CONFIG_LEN 40
37555
37556 static struct kgdb_io kgdboc_io_ops;
37557 +static struct kgdb_io kgdboc_io_ops_console;
37558
37559 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37560 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37561 static int configured = -1;
37562
37563 static char config[MAX_CONFIG_LEN];
37564 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37565 kgdboc_unregister_kbd();
37566 if (configured == 1)
37567 kgdb_unregister_io_module(&kgdboc_io_ops);
37568 + else if (configured == 2)
37569 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37570 }
37571
37572 static int configure_kgdboc(void)
37573 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37574 int err;
37575 char *cptr = config;
37576 struct console *cons;
37577 + int is_console = 0;
37578
37579 err = kgdboc_option_setup(config);
37580 if (err || !strlen(config) || isspace(config[0]))
37581 goto noconfig;
37582
37583 err = -ENODEV;
37584 - kgdboc_io_ops.is_console = 0;
37585 kgdb_tty_driver = NULL;
37586
37587 kgdboc_use_kms = 0;
37588 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37589 int idx;
37590 if (cons->device && cons->device(cons, &idx) == p &&
37591 idx == tty_line) {
37592 - kgdboc_io_ops.is_console = 1;
37593 + is_console = 1;
37594 break;
37595 }
37596 cons = cons->next;
37597 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37598 kgdb_tty_line = tty_line;
37599
37600 do_register:
37601 - err = kgdb_register_io_module(&kgdboc_io_ops);
37602 + if (is_console) {
37603 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37604 + configured = 2;
37605 + } else {
37606 + err = kgdb_register_io_module(&kgdboc_io_ops);
37607 + configured = 1;
37608 + }
37609 if (err)
37610 goto noconfig;
37611
37612 - configured = 1;
37613 -
37614 return 0;
37615
37616 noconfig:
37617 @@ -213,7 +220,7 @@ noconfig:
37618 static int __init init_kgdboc(void)
37619 {
37620 /* Already configured? */
37621 - if (configured == 1)
37622 + if (configured >= 1)
37623 return 0;
37624
37625 return configure_kgdboc();
37626 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37627 if (config[len - 1] == '\n')
37628 config[len - 1] = '\0';
37629
37630 - if (configured == 1)
37631 + if (configured >= 1)
37632 cleanup_kgdboc();
37633
37634 /* Go and configure with the new params. */
37635 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37636 .post_exception = kgdboc_post_exp_handler,
37637 };
37638
37639 +static struct kgdb_io kgdboc_io_ops_console = {
37640 + .name = "kgdboc",
37641 + .read_char = kgdboc_get_char,
37642 + .write_char = kgdboc_put_char,
37643 + .pre_exception = kgdboc_pre_exp_handler,
37644 + .post_exception = kgdboc_post_exp_handler,
37645 + .is_console = 1
37646 +};
37647 +
37648 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37649 /* This is only available if kgdboc is a built in for early debugging */
37650 static int __init kgdboc_early_init(char *opt)
37651 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
37652 index 05728894..b9d44c6 100644
37653 --- a/drivers/tty/sysrq.c
37654 +++ b/drivers/tty/sysrq.c
37655 @@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
37656 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
37657 size_t count, loff_t *ppos)
37658 {
37659 - if (count) {
37660 + if (count && capable(CAP_SYS_ADMIN)) {
37661 char c;
37662
37663 if (get_user(c, buf))
37664 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37665 index d939bd7..33d92cd 100644
37666 --- a/drivers/tty/tty_io.c
37667 +++ b/drivers/tty/tty_io.c
37668 @@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37669
37670 void tty_default_fops(struct file_operations *fops)
37671 {
37672 - *fops = tty_fops;
37673 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37674 }
37675
37676 /*
37677 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37678 index 24b95db..9c078d0 100644
37679 --- a/drivers/tty/tty_ldisc.c
37680 +++ b/drivers/tty/tty_ldisc.c
37681 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37682 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37683 struct tty_ldisc_ops *ldo = ld->ops;
37684
37685 - ldo->refcount--;
37686 + atomic_dec(&ldo->refcount);
37687 module_put(ldo->owner);
37688 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37689
37690 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37691 spin_lock_irqsave(&tty_ldisc_lock, flags);
37692 tty_ldiscs[disc] = new_ldisc;
37693 new_ldisc->num = disc;
37694 - new_ldisc->refcount = 0;
37695 + atomic_set(&new_ldisc->refcount, 0);
37696 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37697
37698 return ret;
37699 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
37700 return -EINVAL;
37701
37702 spin_lock_irqsave(&tty_ldisc_lock, flags);
37703 - if (tty_ldiscs[disc]->refcount)
37704 + if (atomic_read(&tty_ldiscs[disc]->refcount))
37705 ret = -EBUSY;
37706 else
37707 tty_ldiscs[disc] = NULL;
37708 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37709 if (ldops) {
37710 ret = ERR_PTR(-EAGAIN);
37711 if (try_module_get(ldops->owner)) {
37712 - ldops->refcount++;
37713 + atomic_inc(&ldops->refcount);
37714 ret = ldops;
37715 }
37716 }
37717 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37718 unsigned long flags;
37719
37720 spin_lock_irqsave(&tty_ldisc_lock, flags);
37721 - ldops->refcount--;
37722 + atomic_dec(&ldops->refcount);
37723 module_put(ldops->owner);
37724 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37725 }
37726 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37727 index 3b0c4e3..f98a992 100644
37728 --- a/drivers/tty/vt/keyboard.c
37729 +++ b/drivers/tty/vt/keyboard.c
37730 @@ -663,6 +663,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37731 kbd->kbdmode == VC_OFF) &&
37732 value != KVAL(K_SAK))
37733 return; /* SAK is allowed even in raw mode */
37734 +
37735 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37736 + {
37737 + void *func = fn_handler[value];
37738 + if (func == fn_show_state || func == fn_show_ptregs ||
37739 + func == fn_show_mem)
37740 + return;
37741 + }
37742 +#endif
37743 +
37744 fn_handler[value](vc);
37745 }
37746
37747 @@ -1812,9 +1822,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37748 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37749 return -EFAULT;
37750
37751 - if (!capable(CAP_SYS_TTY_CONFIG))
37752 - perm = 0;
37753 -
37754 switch (cmd) {
37755 case KDGKBENT:
37756 /* Ensure another thread doesn't free it under us */
37757 @@ -1829,6 +1836,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
37758 spin_unlock_irqrestore(&kbd_event_lock, flags);
37759 return put_user(val, &user_kbe->kb_value);
37760 case KDSKBENT:
37761 + if (!capable(CAP_SYS_TTY_CONFIG))
37762 + perm = 0;
37763 +
37764 if (!perm)
37765 return -EPERM;
37766 if (!i && v == K_NOSUCHMAP) {
37767 @@ -1919,9 +1929,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37768 int i, j, k;
37769 int ret;
37770
37771 - if (!capable(CAP_SYS_TTY_CONFIG))
37772 - perm = 0;
37773 -
37774 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37775 if (!kbs) {
37776 ret = -ENOMEM;
37777 @@ -1955,6 +1962,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37778 kfree(kbs);
37779 return ((p && *p) ? -EOVERFLOW : 0);
37780 case KDSKBSENT:
37781 + if (!capable(CAP_SYS_TTY_CONFIG))
37782 + perm = 0;
37783 +
37784 if (!perm) {
37785 ret = -EPERM;
37786 goto reterr;
37787 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37788 index a783d53..cb30d94 100644
37789 --- a/drivers/uio/uio.c
37790 +++ b/drivers/uio/uio.c
37791 @@ -25,6 +25,7 @@
37792 #include <linux/kobject.h>
37793 #include <linux/cdev.h>
37794 #include <linux/uio_driver.h>
37795 +#include <asm/local.h>
37796
37797 #define UIO_MAX_DEVICES (1U << MINORBITS)
37798
37799 @@ -32,10 +33,10 @@ struct uio_device {
37800 struct module *owner;
37801 struct device *dev;
37802 int minor;
37803 - atomic_t event;
37804 + atomic_unchecked_t event;
37805 struct fasync_struct *async_queue;
37806 wait_queue_head_t wait;
37807 - int vma_count;
37808 + local_t vma_count;
37809 struct uio_info *info;
37810 struct kobject *map_dir;
37811 struct kobject *portio_dir;
37812 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37813 struct device_attribute *attr, char *buf)
37814 {
37815 struct uio_device *idev = dev_get_drvdata(dev);
37816 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37817 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37818 }
37819
37820 static struct device_attribute uio_class_attributes[] = {
37821 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37822 {
37823 struct uio_device *idev = info->uio_dev;
37824
37825 - atomic_inc(&idev->event);
37826 + atomic_inc_unchecked(&idev->event);
37827 wake_up_interruptible(&idev->wait);
37828 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37829 }
37830 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37831 }
37832
37833 listener->dev = idev;
37834 - listener->event_count = atomic_read(&idev->event);
37835 + listener->event_count = atomic_read_unchecked(&idev->event);
37836 filep->private_data = listener;
37837
37838 if (idev->info->open) {
37839 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37840 return -EIO;
37841
37842 poll_wait(filep, &idev->wait, wait);
37843 - if (listener->event_count != atomic_read(&idev->event))
37844 + if (listener->event_count != atomic_read_unchecked(&idev->event))
37845 return POLLIN | POLLRDNORM;
37846 return 0;
37847 }
37848 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37849 do {
37850 set_current_state(TASK_INTERRUPTIBLE);
37851
37852 - event_count = atomic_read(&idev->event);
37853 + event_count = atomic_read_unchecked(&idev->event);
37854 if (event_count != listener->event_count) {
37855 if (copy_to_user(buf, &event_count, count))
37856 retval = -EFAULT;
37857 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37858 static void uio_vma_open(struct vm_area_struct *vma)
37859 {
37860 struct uio_device *idev = vma->vm_private_data;
37861 - idev->vma_count++;
37862 + local_inc(&idev->vma_count);
37863 }
37864
37865 static void uio_vma_close(struct vm_area_struct *vma)
37866 {
37867 struct uio_device *idev = vma->vm_private_data;
37868 - idev->vma_count--;
37869 + local_dec(&idev->vma_count);
37870 }
37871
37872 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37873 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
37874 idev->owner = owner;
37875 idev->info = info;
37876 init_waitqueue_head(&idev->wait);
37877 - atomic_set(&idev->event, 0);
37878 + atomic_set_unchecked(&idev->event, 0);
37879
37880 ret = uio_get_minor(idev);
37881 if (ret)
37882 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
37883 index 98b89fe..aff824e 100644
37884 --- a/drivers/usb/atm/cxacru.c
37885 +++ b/drivers/usb/atm/cxacru.c
37886 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
37887 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37888 if (ret < 2)
37889 return -EINVAL;
37890 - if (index < 0 || index > 0x7f)
37891 + if (index > 0x7f)
37892 return -EINVAL;
37893 pos += tmp;
37894
37895 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
37896 index d3448ca..d2864ca 100644
37897 --- a/drivers/usb/atm/usbatm.c
37898 +++ b/drivers/usb/atm/usbatm.c
37899 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37900 if (printk_ratelimit())
37901 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37902 __func__, vpi, vci);
37903 - atomic_inc(&vcc->stats->rx_err);
37904 + atomic_inc_unchecked(&vcc->stats->rx_err);
37905 return;
37906 }
37907
37908 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37909 if (length > ATM_MAX_AAL5_PDU) {
37910 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37911 __func__, length, vcc);
37912 - atomic_inc(&vcc->stats->rx_err);
37913 + atomic_inc_unchecked(&vcc->stats->rx_err);
37914 goto out;
37915 }
37916
37917 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37918 if (sarb->len < pdu_length) {
37919 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37920 __func__, pdu_length, sarb->len, vcc);
37921 - atomic_inc(&vcc->stats->rx_err);
37922 + atomic_inc_unchecked(&vcc->stats->rx_err);
37923 goto out;
37924 }
37925
37926 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37927 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37928 __func__, vcc);
37929 - atomic_inc(&vcc->stats->rx_err);
37930 + atomic_inc_unchecked(&vcc->stats->rx_err);
37931 goto out;
37932 }
37933
37934 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37935 if (printk_ratelimit())
37936 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37937 __func__, length);
37938 - atomic_inc(&vcc->stats->rx_drop);
37939 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37940 goto out;
37941 }
37942
37943 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37944
37945 vcc->push(vcc, skb);
37946
37947 - atomic_inc(&vcc->stats->rx);
37948 + atomic_inc_unchecked(&vcc->stats->rx);
37949 out:
37950 skb_trim(sarb, 0);
37951 }
37952 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
37953 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37954
37955 usbatm_pop(vcc, skb);
37956 - atomic_inc(&vcc->stats->tx);
37957 + atomic_inc_unchecked(&vcc->stats->tx);
37958
37959 skb = skb_dequeue(&instance->sndqueue);
37960 }
37961 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
37962 if (!left--)
37963 return sprintf(page,
37964 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37965 - atomic_read(&atm_dev->stats.aal5.tx),
37966 - atomic_read(&atm_dev->stats.aal5.tx_err),
37967 - atomic_read(&atm_dev->stats.aal5.rx),
37968 - atomic_read(&atm_dev->stats.aal5.rx_err),
37969 - atomic_read(&atm_dev->stats.aal5.rx_drop));
37970 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37971 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37972 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37973 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37974 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37975
37976 if (!left--) {
37977 if (instance->disconnected)
37978 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
37979 index d956965..4179a77 100644
37980 --- a/drivers/usb/core/devices.c
37981 +++ b/drivers/usb/core/devices.c
37982 @@ -126,7 +126,7 @@ static const char format_endpt[] =
37983 * time it gets called.
37984 */
37985 static struct device_connect_event {
37986 - atomic_t count;
37987 + atomic_unchecked_t count;
37988 wait_queue_head_t wait;
37989 } device_event = {
37990 .count = ATOMIC_INIT(1),
37991 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
37992
37993 void usbfs_conn_disc_event(void)
37994 {
37995 - atomic_add(2, &device_event.count);
37996 + atomic_add_unchecked(2, &device_event.count);
37997 wake_up(&device_event.wait);
37998 }
37999
38000 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38001
38002 poll_wait(file, &device_event.wait, wait);
38003
38004 - event_count = atomic_read(&device_event.count);
38005 + event_count = atomic_read_unchecked(&device_event.count);
38006 if (file->f_version != event_count) {
38007 file->f_version = event_count;
38008 return POLLIN | POLLRDNORM;
38009 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38010 index 1fc8f12..20647c1 100644
38011 --- a/drivers/usb/early/ehci-dbgp.c
38012 +++ b/drivers/usb/early/ehci-dbgp.c
38013 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38014
38015 #ifdef CONFIG_KGDB
38016 static struct kgdb_io kgdbdbgp_io_ops;
38017 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38018 +static struct kgdb_io kgdbdbgp_io_ops_console;
38019 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38020 #else
38021 #define dbgp_kgdb_mode (0)
38022 #endif
38023 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38024 .write_char = kgdbdbgp_write_char,
38025 };
38026
38027 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38028 + .name = "kgdbdbgp",
38029 + .read_char = kgdbdbgp_read_char,
38030 + .write_char = kgdbdbgp_write_char,
38031 + .is_console = 1
38032 +};
38033 +
38034 static int kgdbdbgp_wait_time;
38035
38036 static int __init kgdbdbgp_parse_config(char *str)
38037 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38038 ptr++;
38039 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38040 }
38041 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38042 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38043 + if (early_dbgp_console.index != -1)
38044 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38045 + else
38046 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38047
38048 return 0;
38049 }
38050 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38051 index d6bea3e..60b250e 100644
38052 --- a/drivers/usb/wusbcore/wa-hc.h
38053 +++ b/drivers/usb/wusbcore/wa-hc.h
38054 @@ -192,7 +192,7 @@ struct wahc {
38055 struct list_head xfer_delayed_list;
38056 spinlock_t xfer_list_lock;
38057 struct work_struct xfer_work;
38058 - atomic_t xfer_id_count;
38059 + atomic_unchecked_t xfer_id_count;
38060 };
38061
38062
38063 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38064 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38065 spin_lock_init(&wa->xfer_list_lock);
38066 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38067 - atomic_set(&wa->xfer_id_count, 1);
38068 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38069 }
38070
38071 /**
38072 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38073 index 57c01ab..8a05959 100644
38074 --- a/drivers/usb/wusbcore/wa-xfer.c
38075 +++ b/drivers/usb/wusbcore/wa-xfer.c
38076 @@ -296,7 +296,7 @@ out:
38077 */
38078 static void wa_xfer_id_init(struct wa_xfer *xfer)
38079 {
38080 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38081 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38082 }
38083
38084 /*
38085 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38086 index 51e4c1e..9d87e2a 100644
38087 --- a/drivers/vhost/vhost.c
38088 +++ b/drivers/vhost/vhost.c
38089 @@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38090 return 0;
38091 }
38092
38093 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38094 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38095 {
38096 struct file *eventfp, *filep = NULL,
38097 *pollstart = NULL, *pollstop = NULL;
38098 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38099 index b0b2ac3..89a4399 100644
38100 --- a/drivers/video/aty/aty128fb.c
38101 +++ b/drivers/video/aty/aty128fb.c
38102 @@ -148,7 +148,7 @@ enum {
38103 };
38104
38105 /* Must match above enum */
38106 -static const char *r128_family[] __devinitdata = {
38107 +static const char *r128_family[] __devinitconst = {
38108 "AGP",
38109 "PCI",
38110 "PRO AGP",
38111 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38112 index 5c3960d..15cf8fc 100644
38113 --- a/drivers/video/fbcmap.c
38114 +++ b/drivers/video/fbcmap.c
38115 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38116 rc = -ENODEV;
38117 goto out;
38118 }
38119 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38120 - !info->fbops->fb_setcmap)) {
38121 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38122 rc = -EINVAL;
38123 goto out1;
38124 }
38125 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38126 index c6ce416..3b9b642 100644
38127 --- a/drivers/video/fbmem.c
38128 +++ b/drivers/video/fbmem.c
38129 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38130 image->dx += image->width + 8;
38131 }
38132 } else if (rotate == FB_ROTATE_UD) {
38133 - for (x = 0; x < num && image->dx >= 0; x++) {
38134 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38135 info->fbops->fb_imageblit(info, image);
38136 image->dx -= image->width + 8;
38137 }
38138 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38139 image->dy += image->height + 8;
38140 }
38141 } else if (rotate == FB_ROTATE_CCW) {
38142 - for (x = 0; x < num && image->dy >= 0; x++) {
38143 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38144 info->fbops->fb_imageblit(info, image);
38145 image->dy -= image->height + 8;
38146 }
38147 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38148 return -EFAULT;
38149 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38150 return -EINVAL;
38151 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38152 + if (con2fb.framebuffer >= FB_MAX)
38153 return -EINVAL;
38154 if (!registered_fb[con2fb.framebuffer])
38155 request_module("fb%d", con2fb.framebuffer);
38156 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38157 index 5a5d092..265c5ed 100644
38158 --- a/drivers/video/geode/gx1fb_core.c
38159 +++ b/drivers/video/geode/gx1fb_core.c
38160 @@ -29,7 +29,7 @@ static int crt_option = 1;
38161 static char panel_option[32] = "";
38162
38163 /* Modes relevant to the GX1 (taken from modedb.c) */
38164 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38165 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38166 /* 640x480-60 VESA */
38167 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38168 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38169 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38170 index 0fad23f..0e9afa4 100644
38171 --- a/drivers/video/gxt4500.c
38172 +++ b/drivers/video/gxt4500.c
38173 @@ -156,7 +156,7 @@ struct gxt4500_par {
38174 static char *mode_option;
38175
38176 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38177 -static const struct fb_videomode defaultmode __devinitdata = {
38178 +static const struct fb_videomode defaultmode __devinitconst = {
38179 .refresh = 60,
38180 .xres = 1280,
38181 .yres = 1024,
38182 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38183 return 0;
38184 }
38185
38186 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38187 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38188 .id = "IBM GXT4500P",
38189 .type = FB_TYPE_PACKED_PIXELS,
38190 .visual = FB_VISUAL_PSEUDOCOLOR,
38191 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38192 index 7672d2e..b56437f 100644
38193 --- a/drivers/video/i810/i810_accel.c
38194 +++ b/drivers/video/i810/i810_accel.c
38195 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38196 }
38197 }
38198 printk("ringbuffer lockup!!!\n");
38199 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38200 i810_report_error(mmio);
38201 par->dev_flags |= LOCKUP;
38202 info->pixmap.scan_align = 1;
38203 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38204 index b83f361..2b05a91 100644
38205 --- a/drivers/video/i810/i810_main.c
38206 +++ b/drivers/video/i810/i810_main.c
38207 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38208 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38209
38210 /* PCI */
38211 -static const char *i810_pci_list[] __devinitdata = {
38212 +static const char *i810_pci_list[] __devinitconst = {
38213 "Intel(R) 810 Framebuffer Device" ,
38214 "Intel(R) 810-DC100 Framebuffer Device" ,
38215 "Intel(R) 810E Framebuffer Device" ,
38216 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38217 index de36693..3c63fc2 100644
38218 --- a/drivers/video/jz4740_fb.c
38219 +++ b/drivers/video/jz4740_fb.c
38220 @@ -136,7 +136,7 @@ struct jzfb {
38221 uint32_t pseudo_palette[16];
38222 };
38223
38224 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38225 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38226 .id = "JZ4740 FB",
38227 .type = FB_TYPE_PACKED_PIXELS,
38228 .visual = FB_VISUAL_TRUECOLOR,
38229 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38230 index 3c14e43..eafa544 100644
38231 --- a/drivers/video/logo/logo_linux_clut224.ppm
38232 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38233 @@ -1,1604 +1,1123 @@
38234 P3
38235 -# Standard 224-color Linux logo
38236 80 80
38237 255
38238 - 0 0 0 0 0 0 0 0 0 0 0 0
38239 - 0 0 0 0 0 0 0 0 0 0 0 0
38240 - 0 0 0 0 0 0 0 0 0 0 0 0
38241 - 0 0 0 0 0 0 0 0 0 0 0 0
38242 - 0 0 0 0 0 0 0 0 0 0 0 0
38243 - 0 0 0 0 0 0 0 0 0 0 0 0
38244 - 0 0 0 0 0 0 0 0 0 0 0 0
38245 - 0 0 0 0 0 0 0 0 0 0 0 0
38246 - 0 0 0 0 0 0 0 0 0 0 0 0
38247 - 6 6 6 6 6 6 10 10 10 10 10 10
38248 - 10 10 10 6 6 6 6 6 6 6 6 6
38249 - 0 0 0 0 0 0 0 0 0 0 0 0
38250 - 0 0 0 0 0 0 0 0 0 0 0 0
38251 - 0 0 0 0 0 0 0 0 0 0 0 0
38252 - 0 0 0 0 0 0 0 0 0 0 0 0
38253 - 0 0 0 0 0 0 0 0 0 0 0 0
38254 - 0 0 0 0 0 0 0 0 0 0 0 0
38255 - 0 0 0 0 0 0 0 0 0 0 0 0
38256 - 0 0 0 0 0 0 0 0 0 0 0 0
38257 - 0 0 0 0 0 0 0 0 0 0 0 0
38258 - 0 0 0 0 0 0 0 0 0 0 0 0
38259 - 0 0 0 0 0 0 0 0 0 0 0 0
38260 - 0 0 0 0 0 0 0 0 0 0 0 0
38261 - 0 0 0 0 0 0 0 0 0 0 0 0
38262 - 0 0 0 0 0 0 0 0 0 0 0 0
38263 - 0 0 0 0 0 0 0 0 0 0 0 0
38264 - 0 0 0 0 0 0 0 0 0 0 0 0
38265 - 0 0 0 0 0 0 0 0 0 0 0 0
38266 - 0 0 0 6 6 6 10 10 10 14 14 14
38267 - 22 22 22 26 26 26 30 30 30 34 34 34
38268 - 30 30 30 30 30 30 26 26 26 18 18 18
38269 - 14 14 14 10 10 10 6 6 6 0 0 0
38270 - 0 0 0 0 0 0 0 0 0 0 0 0
38271 - 0 0 0 0 0 0 0 0 0 0 0 0
38272 - 0 0 0 0 0 0 0 0 0 0 0 0
38273 - 0 0 0 0 0 0 0 0 0 0 0 0
38274 - 0 0 0 0 0 0 0 0 0 0 0 0
38275 - 0 0 0 0 0 0 0 0 0 0 0 0
38276 - 0 0 0 0 0 0 0 0 0 0 0 0
38277 - 0 0 0 0 0 0 0 0 0 0 0 0
38278 - 0 0 0 0 0 0 0 0 0 0 0 0
38279 - 0 0 0 0 0 1 0 0 1 0 0 0
38280 - 0 0 0 0 0 0 0 0 0 0 0 0
38281 - 0 0 0 0 0 0 0 0 0 0 0 0
38282 - 0 0 0 0 0 0 0 0 0 0 0 0
38283 - 0 0 0 0 0 0 0 0 0 0 0 0
38284 - 0 0 0 0 0 0 0 0 0 0 0 0
38285 - 0 0 0 0 0 0 0 0 0 0 0 0
38286 - 6 6 6 14 14 14 26 26 26 42 42 42
38287 - 54 54 54 66 66 66 78 78 78 78 78 78
38288 - 78 78 78 74 74 74 66 66 66 54 54 54
38289 - 42 42 42 26 26 26 18 18 18 10 10 10
38290 - 6 6 6 0 0 0 0 0 0 0 0 0
38291 - 0 0 0 0 0 0 0 0 0 0 0 0
38292 - 0 0 0 0 0 0 0 0 0 0 0 0
38293 - 0 0 0 0 0 0 0 0 0 0 0 0
38294 - 0 0 0 0 0 0 0 0 0 0 0 0
38295 - 0 0 0 0 0 0 0 0 0 0 0 0
38296 - 0 0 0 0 0 0 0 0 0 0 0 0
38297 - 0 0 0 0 0 0 0 0 0 0 0 0
38298 - 0 0 0 0 0 0 0 0 0 0 0 0
38299 - 0 0 1 0 0 0 0 0 0 0 0 0
38300 - 0 0 0 0 0 0 0 0 0 0 0 0
38301 - 0 0 0 0 0 0 0 0 0 0 0 0
38302 - 0 0 0 0 0 0 0 0 0 0 0 0
38303 - 0 0 0 0 0 0 0 0 0 0 0 0
38304 - 0 0 0 0 0 0 0 0 0 0 0 0
38305 - 0 0 0 0 0 0 0 0 0 10 10 10
38306 - 22 22 22 42 42 42 66 66 66 86 86 86
38307 - 66 66 66 38 38 38 38 38 38 22 22 22
38308 - 26 26 26 34 34 34 54 54 54 66 66 66
38309 - 86 86 86 70 70 70 46 46 46 26 26 26
38310 - 14 14 14 6 6 6 0 0 0 0 0 0
38311 - 0 0 0 0 0 0 0 0 0 0 0 0
38312 - 0 0 0 0 0 0 0 0 0 0 0 0
38313 - 0 0 0 0 0 0 0 0 0 0 0 0
38314 - 0 0 0 0 0 0 0 0 0 0 0 0
38315 - 0 0 0 0 0 0 0 0 0 0 0 0
38316 - 0 0 0 0 0 0 0 0 0 0 0 0
38317 - 0 0 0 0 0 0 0 0 0 0 0 0
38318 - 0 0 0 0 0 0 0 0 0 0 0 0
38319 - 0 0 1 0 0 1 0 0 1 0 0 0
38320 - 0 0 0 0 0 0 0 0 0 0 0 0
38321 - 0 0 0 0 0 0 0 0 0 0 0 0
38322 - 0 0 0 0 0 0 0 0 0 0 0 0
38323 - 0 0 0 0 0 0 0 0 0 0 0 0
38324 - 0 0 0 0 0 0 0 0 0 0 0 0
38325 - 0 0 0 0 0 0 10 10 10 26 26 26
38326 - 50 50 50 82 82 82 58 58 58 6 6 6
38327 - 2 2 6 2 2 6 2 2 6 2 2 6
38328 - 2 2 6 2 2 6 2 2 6 2 2 6
38329 - 6 6 6 54 54 54 86 86 86 66 66 66
38330 - 38 38 38 18 18 18 6 6 6 0 0 0
38331 - 0 0 0 0 0 0 0 0 0 0 0 0
38332 - 0 0 0 0 0 0 0 0 0 0 0 0
38333 - 0 0 0 0 0 0 0 0 0 0 0 0
38334 - 0 0 0 0 0 0 0 0 0 0 0 0
38335 - 0 0 0 0 0 0 0 0 0 0 0 0
38336 - 0 0 0 0 0 0 0 0 0 0 0 0
38337 - 0 0 0 0 0 0 0 0 0 0 0 0
38338 - 0 0 0 0 0 0 0 0 0 0 0 0
38339 - 0 0 0 0 0 0 0 0 0 0 0 0
38340 - 0 0 0 0 0 0 0 0 0 0 0 0
38341 - 0 0 0 0 0 0 0 0 0 0 0 0
38342 - 0 0 0 0 0 0 0 0 0 0 0 0
38343 - 0 0 0 0 0 0 0 0 0 0 0 0
38344 - 0 0 0 0 0 0 0 0 0 0 0 0
38345 - 0 0 0 6 6 6 22 22 22 50 50 50
38346 - 78 78 78 34 34 34 2 2 6 2 2 6
38347 - 2 2 6 2 2 6 2 2 6 2 2 6
38348 - 2 2 6 2 2 6 2 2 6 2 2 6
38349 - 2 2 6 2 2 6 6 6 6 70 70 70
38350 - 78 78 78 46 46 46 22 22 22 6 6 6
38351 - 0 0 0 0 0 0 0 0 0 0 0 0
38352 - 0 0 0 0 0 0 0 0 0 0 0 0
38353 - 0 0 0 0 0 0 0 0 0 0 0 0
38354 - 0 0 0 0 0 0 0 0 0 0 0 0
38355 - 0 0 0 0 0 0 0 0 0 0 0 0
38356 - 0 0 0 0 0 0 0 0 0 0 0 0
38357 - 0 0 0 0 0 0 0 0 0 0 0 0
38358 - 0 0 0 0 0 0 0 0 0 0 0 0
38359 - 0 0 1 0 0 1 0 0 1 0 0 0
38360 - 0 0 0 0 0 0 0 0 0 0 0 0
38361 - 0 0 0 0 0 0 0 0 0 0 0 0
38362 - 0 0 0 0 0 0 0 0 0 0 0 0
38363 - 0 0 0 0 0 0 0 0 0 0 0 0
38364 - 0 0 0 0 0 0 0 0 0 0 0 0
38365 - 6 6 6 18 18 18 42 42 42 82 82 82
38366 - 26 26 26 2 2 6 2 2 6 2 2 6
38367 - 2 2 6 2 2 6 2 2 6 2 2 6
38368 - 2 2 6 2 2 6 2 2 6 14 14 14
38369 - 46 46 46 34 34 34 6 6 6 2 2 6
38370 - 42 42 42 78 78 78 42 42 42 18 18 18
38371 - 6 6 6 0 0 0 0 0 0 0 0 0
38372 - 0 0 0 0 0 0 0 0 0 0 0 0
38373 - 0 0 0 0 0 0 0 0 0 0 0 0
38374 - 0 0 0 0 0 0 0 0 0 0 0 0
38375 - 0 0 0 0 0 0 0 0 0 0 0 0
38376 - 0 0 0 0 0 0 0 0 0 0 0 0
38377 - 0 0 0 0 0 0 0 0 0 0 0 0
38378 - 0 0 0 0 0 0 0 0 0 0 0 0
38379 - 0 0 1 0 0 0 0 0 1 0 0 0
38380 - 0 0 0 0 0 0 0 0 0 0 0 0
38381 - 0 0 0 0 0 0 0 0 0 0 0 0
38382 - 0 0 0 0 0 0 0 0 0 0 0 0
38383 - 0 0 0 0 0 0 0 0 0 0 0 0
38384 - 0 0 0 0 0 0 0 0 0 0 0 0
38385 - 10 10 10 30 30 30 66 66 66 58 58 58
38386 - 2 2 6 2 2 6 2 2 6 2 2 6
38387 - 2 2 6 2 2 6 2 2 6 2 2 6
38388 - 2 2 6 2 2 6 2 2 6 26 26 26
38389 - 86 86 86 101 101 101 46 46 46 10 10 10
38390 - 2 2 6 58 58 58 70 70 70 34 34 34
38391 - 10 10 10 0 0 0 0 0 0 0 0 0
38392 - 0 0 0 0 0 0 0 0 0 0 0 0
38393 - 0 0 0 0 0 0 0 0 0 0 0 0
38394 - 0 0 0 0 0 0 0 0 0 0 0 0
38395 - 0 0 0 0 0 0 0 0 0 0 0 0
38396 - 0 0 0 0 0 0 0 0 0 0 0 0
38397 - 0 0 0 0 0 0 0 0 0 0 0 0
38398 - 0 0 0 0 0 0 0 0 0 0 0 0
38399 - 0 0 1 0 0 1 0 0 1 0 0 0
38400 - 0 0 0 0 0 0 0 0 0 0 0 0
38401 - 0 0 0 0 0 0 0 0 0 0 0 0
38402 - 0 0 0 0 0 0 0 0 0 0 0 0
38403 - 0 0 0 0 0 0 0 0 0 0 0 0
38404 - 0 0 0 0 0 0 0 0 0 0 0 0
38405 - 14 14 14 42 42 42 86 86 86 10 10 10
38406 - 2 2 6 2 2 6 2 2 6 2 2 6
38407 - 2 2 6 2 2 6 2 2 6 2 2 6
38408 - 2 2 6 2 2 6 2 2 6 30 30 30
38409 - 94 94 94 94 94 94 58 58 58 26 26 26
38410 - 2 2 6 6 6 6 78 78 78 54 54 54
38411 - 22 22 22 6 6 6 0 0 0 0 0 0
38412 - 0 0 0 0 0 0 0 0 0 0 0 0
38413 - 0 0 0 0 0 0 0 0 0 0 0 0
38414 - 0 0 0 0 0 0 0 0 0 0 0 0
38415 - 0 0 0 0 0 0 0 0 0 0 0 0
38416 - 0 0 0 0 0 0 0 0 0 0 0 0
38417 - 0 0 0 0 0 0 0 0 0 0 0 0
38418 - 0 0 0 0 0 0 0 0 0 0 0 0
38419 - 0 0 0 0 0 0 0 0 0 0 0 0
38420 - 0 0 0 0 0 0 0 0 0 0 0 0
38421 - 0 0 0 0 0 0 0 0 0 0 0 0
38422 - 0 0 0 0 0 0 0 0 0 0 0 0
38423 - 0 0 0 0 0 0 0 0 0 0 0 0
38424 - 0 0 0 0 0 0 0 0 0 6 6 6
38425 - 22 22 22 62 62 62 62 62 62 2 2 6
38426 - 2 2 6 2 2 6 2 2 6 2 2 6
38427 - 2 2 6 2 2 6 2 2 6 2 2 6
38428 - 2 2 6 2 2 6 2 2 6 26 26 26
38429 - 54 54 54 38 38 38 18 18 18 10 10 10
38430 - 2 2 6 2 2 6 34 34 34 82 82 82
38431 - 38 38 38 14 14 14 0 0 0 0 0 0
38432 - 0 0 0 0 0 0 0 0 0 0 0 0
38433 - 0 0 0 0 0 0 0 0 0 0 0 0
38434 - 0 0 0 0 0 0 0 0 0 0 0 0
38435 - 0 0 0 0 0 0 0 0 0 0 0 0
38436 - 0 0 0 0 0 0 0 0 0 0 0 0
38437 - 0 0 0 0 0 0 0 0 0 0 0 0
38438 - 0 0 0 0 0 0 0 0 0 0 0 0
38439 - 0 0 0 0 0 1 0 0 1 0 0 0
38440 - 0 0 0 0 0 0 0 0 0 0 0 0
38441 - 0 0 0 0 0 0 0 0 0 0 0 0
38442 - 0 0 0 0 0 0 0 0 0 0 0 0
38443 - 0 0 0 0 0 0 0 0 0 0 0 0
38444 - 0 0 0 0 0 0 0 0 0 6 6 6
38445 - 30 30 30 78 78 78 30 30 30 2 2 6
38446 - 2 2 6 2 2 6 2 2 6 2 2 6
38447 - 2 2 6 2 2 6 2 2 6 2 2 6
38448 - 2 2 6 2 2 6 2 2 6 10 10 10
38449 - 10 10 10 2 2 6 2 2 6 2 2 6
38450 - 2 2 6 2 2 6 2 2 6 78 78 78
38451 - 50 50 50 18 18 18 6 6 6 0 0 0
38452 - 0 0 0 0 0 0 0 0 0 0 0 0
38453 - 0 0 0 0 0 0 0 0 0 0 0 0
38454 - 0 0 0 0 0 0 0 0 0 0 0 0
38455 - 0 0 0 0 0 0 0 0 0 0 0 0
38456 - 0 0 0 0 0 0 0 0 0 0 0 0
38457 - 0 0 0 0 0 0 0 0 0 0 0 0
38458 - 0 0 0 0 0 0 0 0 0 0 0 0
38459 - 0 0 1 0 0 0 0 0 0 0 0 0
38460 - 0 0 0 0 0 0 0 0 0 0 0 0
38461 - 0 0 0 0 0 0 0 0 0 0 0 0
38462 - 0 0 0 0 0 0 0 0 0 0 0 0
38463 - 0 0 0 0 0 0 0 0 0 0 0 0
38464 - 0 0 0 0 0 0 0 0 0 10 10 10
38465 - 38 38 38 86 86 86 14 14 14 2 2 6
38466 - 2 2 6 2 2 6 2 2 6 2 2 6
38467 - 2 2 6 2 2 6 2 2 6 2 2 6
38468 - 2 2 6 2 2 6 2 2 6 2 2 6
38469 - 2 2 6 2 2 6 2 2 6 2 2 6
38470 - 2 2 6 2 2 6 2 2 6 54 54 54
38471 - 66 66 66 26 26 26 6 6 6 0 0 0
38472 - 0 0 0 0 0 0 0 0 0 0 0 0
38473 - 0 0 0 0 0 0 0 0 0 0 0 0
38474 - 0 0 0 0 0 0 0 0 0 0 0 0
38475 - 0 0 0 0 0 0 0 0 0 0 0 0
38476 - 0 0 0 0 0 0 0 0 0 0 0 0
38477 - 0 0 0 0 0 0 0 0 0 0 0 0
38478 - 0 0 0 0 0 0 0 0 0 0 0 0
38479 - 0 0 0 0 0 1 0 0 1 0 0 0
38480 - 0 0 0 0 0 0 0 0 0 0 0 0
38481 - 0 0 0 0 0 0 0 0 0 0 0 0
38482 - 0 0 0 0 0 0 0 0 0 0 0 0
38483 - 0 0 0 0 0 0 0 0 0 0 0 0
38484 - 0 0 0 0 0 0 0 0 0 14 14 14
38485 - 42 42 42 82 82 82 2 2 6 2 2 6
38486 - 2 2 6 6 6 6 10 10 10 2 2 6
38487 - 2 2 6 2 2 6 2 2 6 2 2 6
38488 - 2 2 6 2 2 6 2 2 6 6 6 6
38489 - 14 14 14 10 10 10 2 2 6 2 2 6
38490 - 2 2 6 2 2 6 2 2 6 18 18 18
38491 - 82 82 82 34 34 34 10 10 10 0 0 0
38492 - 0 0 0 0 0 0 0 0 0 0 0 0
38493 - 0 0 0 0 0 0 0 0 0 0 0 0
38494 - 0 0 0 0 0 0 0 0 0 0 0 0
38495 - 0 0 0 0 0 0 0 0 0 0 0 0
38496 - 0 0 0 0 0 0 0 0 0 0 0 0
38497 - 0 0 0 0 0 0 0 0 0 0 0 0
38498 - 0 0 0 0 0 0 0 0 0 0 0 0
38499 - 0 0 1 0 0 0 0 0 0 0 0 0
38500 - 0 0 0 0 0 0 0 0 0 0 0 0
38501 - 0 0 0 0 0 0 0 0 0 0 0 0
38502 - 0 0 0 0 0 0 0 0 0 0 0 0
38503 - 0 0 0 0 0 0 0 0 0 0 0 0
38504 - 0 0 0 0 0 0 0 0 0 14 14 14
38505 - 46 46 46 86 86 86 2 2 6 2 2 6
38506 - 6 6 6 6 6 6 22 22 22 34 34 34
38507 - 6 6 6 2 2 6 2 2 6 2 2 6
38508 - 2 2 6 2 2 6 18 18 18 34 34 34
38509 - 10 10 10 50 50 50 22 22 22 2 2 6
38510 - 2 2 6 2 2 6 2 2 6 10 10 10
38511 - 86 86 86 42 42 42 14 14 14 0 0 0
38512 - 0 0 0 0 0 0 0 0 0 0 0 0
38513 - 0 0 0 0 0 0 0 0 0 0 0 0
38514 - 0 0 0 0 0 0 0 0 0 0 0 0
38515 - 0 0 0 0 0 0 0 0 0 0 0 0
38516 - 0 0 0 0 0 0 0 0 0 0 0 0
38517 - 0 0 0 0 0 0 0 0 0 0 0 0
38518 - 0 0 0 0 0 0 0 0 0 0 0 0
38519 - 0 0 1 0 0 1 0 0 1 0 0 0
38520 - 0 0 0 0 0 0 0 0 0 0 0 0
38521 - 0 0 0 0 0 0 0 0 0 0 0 0
38522 - 0 0 0 0 0 0 0 0 0 0 0 0
38523 - 0 0 0 0 0 0 0 0 0 0 0 0
38524 - 0 0 0 0 0 0 0 0 0 14 14 14
38525 - 46 46 46 86 86 86 2 2 6 2 2 6
38526 - 38 38 38 116 116 116 94 94 94 22 22 22
38527 - 22 22 22 2 2 6 2 2 6 2 2 6
38528 - 14 14 14 86 86 86 138 138 138 162 162 162
38529 -154 154 154 38 38 38 26 26 26 6 6 6
38530 - 2 2 6 2 2 6 2 2 6 2 2 6
38531 - 86 86 86 46 46 46 14 14 14 0 0 0
38532 - 0 0 0 0 0 0 0 0 0 0 0 0
38533 - 0 0 0 0 0 0 0 0 0 0 0 0
38534 - 0 0 0 0 0 0 0 0 0 0 0 0
38535 - 0 0 0 0 0 0 0 0 0 0 0 0
38536 - 0 0 0 0 0 0 0 0 0 0 0 0
38537 - 0 0 0 0 0 0 0 0 0 0 0 0
38538 - 0 0 0 0 0 0 0 0 0 0 0 0
38539 - 0 0 0 0 0 0 0 0 0 0 0 0
38540 - 0 0 0 0 0 0 0 0 0 0 0 0
38541 - 0 0 0 0 0 0 0 0 0 0 0 0
38542 - 0 0 0 0 0 0 0 0 0 0 0 0
38543 - 0 0 0 0 0 0 0 0 0 0 0 0
38544 - 0 0 0 0 0 0 0 0 0 14 14 14
38545 - 46 46 46 86 86 86 2 2 6 14 14 14
38546 -134 134 134 198 198 198 195 195 195 116 116 116
38547 - 10 10 10 2 2 6 2 2 6 6 6 6
38548 -101 98 89 187 187 187 210 210 210 218 218 218
38549 -214 214 214 134 134 134 14 14 14 6 6 6
38550 - 2 2 6 2 2 6 2 2 6 2 2 6
38551 - 86 86 86 50 50 50 18 18 18 6 6 6
38552 - 0 0 0 0 0 0 0 0 0 0 0 0
38553 - 0 0 0 0 0 0 0 0 0 0 0 0
38554 - 0 0 0 0 0 0 0 0 0 0 0 0
38555 - 0 0 0 0 0 0 0 0 0 0 0 0
38556 - 0 0 0 0 0 0 0 0 0 0 0 0
38557 - 0 0 0 0 0 0 0 0 0 0 0 0
38558 - 0 0 0 0 0 0 0 0 1 0 0 0
38559 - 0 0 1 0 0 1 0 0 1 0 0 0
38560 - 0 0 0 0 0 0 0 0 0 0 0 0
38561 - 0 0 0 0 0 0 0 0 0 0 0 0
38562 - 0 0 0 0 0 0 0 0 0 0 0 0
38563 - 0 0 0 0 0 0 0 0 0 0 0 0
38564 - 0 0 0 0 0 0 0 0 0 14 14 14
38565 - 46 46 46 86 86 86 2 2 6 54 54 54
38566 -218 218 218 195 195 195 226 226 226 246 246 246
38567 - 58 58 58 2 2 6 2 2 6 30 30 30
38568 -210 210 210 253 253 253 174 174 174 123 123 123
38569 -221 221 221 234 234 234 74 74 74 2 2 6
38570 - 2 2 6 2 2 6 2 2 6 2 2 6
38571 - 70 70 70 58 58 58 22 22 22 6 6 6
38572 - 0 0 0 0 0 0 0 0 0 0 0 0
38573 - 0 0 0 0 0 0 0 0 0 0 0 0
38574 - 0 0 0 0 0 0 0 0 0 0 0 0
38575 - 0 0 0 0 0 0 0 0 0 0 0 0
38576 - 0 0 0 0 0 0 0 0 0 0 0 0
38577 - 0 0 0 0 0 0 0 0 0 0 0 0
38578 - 0 0 0 0 0 0 0 0 0 0 0 0
38579 - 0 0 0 0 0 0 0 0 0 0 0 0
38580 - 0 0 0 0 0 0 0 0 0 0 0 0
38581 - 0 0 0 0 0 0 0 0 0 0 0 0
38582 - 0 0 0 0 0 0 0 0 0 0 0 0
38583 - 0 0 0 0 0 0 0 0 0 0 0 0
38584 - 0 0 0 0 0 0 0 0 0 14 14 14
38585 - 46 46 46 82 82 82 2 2 6 106 106 106
38586 -170 170 170 26 26 26 86 86 86 226 226 226
38587 -123 123 123 10 10 10 14 14 14 46 46 46
38588 -231 231 231 190 190 190 6 6 6 70 70 70
38589 - 90 90 90 238 238 238 158 158 158 2 2 6
38590 - 2 2 6 2 2 6 2 2 6 2 2 6
38591 - 70 70 70 58 58 58 22 22 22 6 6 6
38592 - 0 0 0 0 0 0 0 0 0 0 0 0
38593 - 0 0 0 0 0 0 0 0 0 0 0 0
38594 - 0 0 0 0 0 0 0 0 0 0 0 0
38595 - 0 0 0 0 0 0 0 0 0 0 0 0
38596 - 0 0 0 0 0 0 0 0 0 0 0 0
38597 - 0 0 0 0 0 0 0 0 0 0 0 0
38598 - 0 0 0 0 0 0 0 0 1 0 0 0
38599 - 0 0 1 0 0 1 0 0 1 0 0 0
38600 - 0 0 0 0 0 0 0 0 0 0 0 0
38601 - 0 0 0 0 0 0 0 0 0 0 0 0
38602 - 0 0 0 0 0 0 0 0 0 0 0 0
38603 - 0 0 0 0 0 0 0 0 0 0 0 0
38604 - 0 0 0 0 0 0 0 0 0 14 14 14
38605 - 42 42 42 86 86 86 6 6 6 116 116 116
38606 -106 106 106 6 6 6 70 70 70 149 149 149
38607 -128 128 128 18 18 18 38 38 38 54 54 54
38608 -221 221 221 106 106 106 2 2 6 14 14 14
38609 - 46 46 46 190 190 190 198 198 198 2 2 6
38610 - 2 2 6 2 2 6 2 2 6 2 2 6
38611 - 74 74 74 62 62 62 22 22 22 6 6 6
38612 - 0 0 0 0 0 0 0 0 0 0 0 0
38613 - 0 0 0 0 0 0 0 0 0 0 0 0
38614 - 0 0 0 0 0 0 0 0 0 0 0 0
38615 - 0 0 0 0 0 0 0 0 0 0 0 0
38616 - 0 0 0 0 0 0 0 0 0 0 0 0
38617 - 0 0 0 0 0 0 0 0 0 0 0 0
38618 - 0 0 0 0 0 0 0 0 1 0 0 0
38619 - 0 0 1 0 0 0 0 0 1 0 0 0
38620 - 0 0 0 0 0 0 0 0 0 0 0 0
38621 - 0 0 0 0 0 0 0 0 0 0 0 0
38622 - 0 0 0 0 0 0 0 0 0 0 0 0
38623 - 0 0 0 0 0 0 0 0 0 0 0 0
38624 - 0 0 0 0 0 0 0 0 0 14 14 14
38625 - 42 42 42 94 94 94 14 14 14 101 101 101
38626 -128 128 128 2 2 6 18 18 18 116 116 116
38627 -118 98 46 121 92 8 121 92 8 98 78 10
38628 -162 162 162 106 106 106 2 2 6 2 2 6
38629 - 2 2 6 195 195 195 195 195 195 6 6 6
38630 - 2 2 6 2 2 6 2 2 6 2 2 6
38631 - 74 74 74 62 62 62 22 22 22 6 6 6
38632 - 0 0 0 0 0 0 0 0 0 0 0 0
38633 - 0 0 0 0 0 0 0 0 0 0 0 0
38634 - 0 0 0 0 0 0 0 0 0 0 0 0
38635 - 0 0 0 0 0 0 0 0 0 0 0 0
38636 - 0 0 0 0 0 0 0 0 0 0 0 0
38637 - 0 0 0 0 0 0 0 0 0 0 0 0
38638 - 0 0 0 0 0 0 0 0 1 0 0 1
38639 - 0 0 1 0 0 0 0 0 1 0 0 0
38640 - 0 0 0 0 0 0 0 0 0 0 0 0
38641 - 0 0 0 0 0 0 0 0 0 0 0 0
38642 - 0 0 0 0 0 0 0 0 0 0 0 0
38643 - 0 0 0 0 0 0 0 0 0 0 0 0
38644 - 0 0 0 0 0 0 0 0 0 10 10 10
38645 - 38 38 38 90 90 90 14 14 14 58 58 58
38646 -210 210 210 26 26 26 54 38 6 154 114 10
38647 -226 170 11 236 186 11 225 175 15 184 144 12
38648 -215 174 15 175 146 61 37 26 9 2 2 6
38649 - 70 70 70 246 246 246 138 138 138 2 2 6
38650 - 2 2 6 2 2 6 2 2 6 2 2 6
38651 - 70 70 70 66 66 66 26 26 26 6 6 6
38652 - 0 0 0 0 0 0 0 0 0 0 0 0
38653 - 0 0 0 0 0 0 0 0 0 0 0 0
38654 - 0 0 0 0 0 0 0 0 0 0 0 0
38655 - 0 0 0 0 0 0 0 0 0 0 0 0
38656 - 0 0 0 0 0 0 0 0 0 0 0 0
38657 - 0 0 0 0 0 0 0 0 0 0 0 0
38658 - 0 0 0 0 0 0 0 0 0 0 0 0
38659 - 0 0 0 0 0 0 0 0 0 0 0 0
38660 - 0 0 0 0 0 0 0 0 0 0 0 0
38661 - 0 0 0 0 0 0 0 0 0 0 0 0
38662 - 0 0 0 0 0 0 0 0 0 0 0 0
38663 - 0 0 0 0 0 0 0 0 0 0 0 0
38664 - 0 0 0 0 0 0 0 0 0 10 10 10
38665 - 38 38 38 86 86 86 14 14 14 10 10 10
38666 -195 195 195 188 164 115 192 133 9 225 175 15
38667 -239 182 13 234 190 10 232 195 16 232 200 30
38668 -245 207 45 241 208 19 232 195 16 184 144 12
38669 -218 194 134 211 206 186 42 42 42 2 2 6
38670 - 2 2 6 2 2 6 2 2 6 2 2 6
38671 - 50 50 50 74 74 74 30 30 30 6 6 6
38672 - 0 0 0 0 0 0 0 0 0 0 0 0
38673 - 0 0 0 0 0 0 0 0 0 0 0 0
38674 - 0 0 0 0 0 0 0 0 0 0 0 0
38675 - 0 0 0 0 0 0 0 0 0 0 0 0
38676 - 0 0 0 0 0 0 0 0 0 0 0 0
38677 - 0 0 0 0 0 0 0 0 0 0 0 0
38678 - 0 0 0 0 0 0 0 0 0 0 0 0
38679 - 0 0 0 0 0 0 0 0 0 0 0 0
38680 - 0 0 0 0 0 0 0 0 0 0 0 0
38681 - 0 0 0 0 0 0 0 0 0 0 0 0
38682 - 0 0 0 0 0 0 0 0 0 0 0 0
38683 - 0 0 0 0 0 0 0 0 0 0 0 0
38684 - 0 0 0 0 0 0 0 0 0 10 10 10
38685 - 34 34 34 86 86 86 14 14 14 2 2 6
38686 -121 87 25 192 133 9 219 162 10 239 182 13
38687 -236 186 11 232 195 16 241 208 19 244 214 54
38688 -246 218 60 246 218 38 246 215 20 241 208 19
38689 -241 208 19 226 184 13 121 87 25 2 2 6
38690 - 2 2 6 2 2 6 2 2 6 2 2 6
38691 - 50 50 50 82 82 82 34 34 34 10 10 10
38692 - 0 0 0 0 0 0 0 0 0 0 0 0
38693 - 0 0 0 0 0 0 0 0 0 0 0 0
38694 - 0 0 0 0 0 0 0 0 0 0 0 0
38695 - 0 0 0 0 0 0 0 0 0 0 0 0
38696 - 0 0 0 0 0 0 0 0 0 0 0 0
38697 - 0 0 0 0 0 0 0 0 0 0 0 0
38698 - 0 0 0 0 0 0 0 0 0 0 0 0
38699 - 0 0 0 0 0 0 0 0 0 0 0 0
38700 - 0 0 0 0 0 0 0 0 0 0 0 0
38701 - 0 0 0 0 0 0 0 0 0 0 0 0
38702 - 0 0 0 0 0 0 0 0 0 0 0 0
38703 - 0 0 0 0 0 0 0 0 0 0 0 0
38704 - 0 0 0 0 0 0 0 0 0 10 10 10
38705 - 34 34 34 82 82 82 30 30 30 61 42 6
38706 -180 123 7 206 145 10 230 174 11 239 182 13
38707 -234 190 10 238 202 15 241 208 19 246 218 74
38708 -246 218 38 246 215 20 246 215 20 246 215 20
38709 -226 184 13 215 174 15 184 144 12 6 6 6
38710 - 2 2 6 2 2 6 2 2 6 2 2 6
38711 - 26 26 26 94 94 94 42 42 42 14 14 14
38712 - 0 0 0 0 0 0 0 0 0 0 0 0
38713 - 0 0 0 0 0 0 0 0 0 0 0 0
38714 - 0 0 0 0 0 0 0 0 0 0 0 0
38715 - 0 0 0 0 0 0 0 0 0 0 0 0
38716 - 0 0 0 0 0 0 0 0 0 0 0 0
38717 - 0 0 0 0 0 0 0 0 0 0 0 0
38718 - 0 0 0 0 0 0 0 0 0 0 0 0
38719 - 0 0 0 0 0 0 0 0 0 0 0 0
38720 - 0 0 0 0 0 0 0 0 0 0 0 0
38721 - 0 0 0 0 0 0 0 0 0 0 0 0
38722 - 0 0 0 0 0 0 0 0 0 0 0 0
38723 - 0 0 0 0 0 0 0 0 0 0 0 0
38724 - 0 0 0 0 0 0 0 0 0 10 10 10
38725 - 30 30 30 78 78 78 50 50 50 104 69 6
38726 -192 133 9 216 158 10 236 178 12 236 186 11
38727 -232 195 16 241 208 19 244 214 54 245 215 43
38728 -246 215 20 246 215 20 241 208 19 198 155 10
38729 -200 144 11 216 158 10 156 118 10 2 2 6
38730 - 2 2 6 2 2 6 2 2 6 2 2 6
38731 - 6 6 6 90 90 90 54 54 54 18 18 18
38732 - 6 6 6 0 0 0 0 0 0 0 0 0
38733 - 0 0 0 0 0 0 0 0 0 0 0 0
38734 - 0 0 0 0 0 0 0 0 0 0 0 0
38735 - 0 0 0 0 0 0 0 0 0 0 0 0
38736 - 0 0 0 0 0 0 0 0 0 0 0 0
38737 - 0 0 0 0 0 0 0 0 0 0 0 0
38738 - 0 0 0 0 0 0 0 0 0 0 0 0
38739 - 0 0 0 0 0 0 0 0 0 0 0 0
38740 - 0 0 0 0 0 0 0 0 0 0 0 0
38741 - 0 0 0 0 0 0 0 0 0 0 0 0
38742 - 0 0 0 0 0 0 0 0 0 0 0 0
38743 - 0 0 0 0 0 0 0 0 0 0 0 0
38744 - 0 0 0 0 0 0 0 0 0 10 10 10
38745 - 30 30 30 78 78 78 46 46 46 22 22 22
38746 -137 92 6 210 162 10 239 182 13 238 190 10
38747 -238 202 15 241 208 19 246 215 20 246 215 20
38748 -241 208 19 203 166 17 185 133 11 210 150 10
38749 -216 158 10 210 150 10 102 78 10 2 2 6
38750 - 6 6 6 54 54 54 14 14 14 2 2 6
38751 - 2 2 6 62 62 62 74 74 74 30 30 30
38752 - 10 10 10 0 0 0 0 0 0 0 0 0
38753 - 0 0 0 0 0 0 0 0 0 0 0 0
38754 - 0 0 0 0 0 0 0 0 0 0 0 0
38755 - 0 0 0 0 0 0 0 0 0 0 0 0
38756 - 0 0 0 0 0 0 0 0 0 0 0 0
38757 - 0 0 0 0 0 0 0 0 0 0 0 0
38758 - 0 0 0 0 0 0 0 0 0 0 0 0
38759 - 0 0 0 0 0 0 0 0 0 0 0 0
38760 - 0 0 0 0 0 0 0 0 0 0 0 0
38761 - 0 0 0 0 0 0 0 0 0 0 0 0
38762 - 0 0 0 0 0 0 0 0 0 0 0 0
38763 - 0 0 0 0 0 0 0 0 0 0 0 0
38764 - 0 0 0 0 0 0 0 0 0 10 10 10
38765 - 34 34 34 78 78 78 50 50 50 6 6 6
38766 - 94 70 30 139 102 15 190 146 13 226 184 13
38767 -232 200 30 232 195 16 215 174 15 190 146 13
38768 -168 122 10 192 133 9 210 150 10 213 154 11
38769 -202 150 34 182 157 106 101 98 89 2 2 6
38770 - 2 2 6 78 78 78 116 116 116 58 58 58
38771 - 2 2 6 22 22 22 90 90 90 46 46 46
38772 - 18 18 18 6 6 6 0 0 0 0 0 0
38773 - 0 0 0 0 0 0 0 0 0 0 0 0
38774 - 0 0 0 0 0 0 0 0 0 0 0 0
38775 - 0 0 0 0 0 0 0 0 0 0 0 0
38776 - 0 0 0 0 0 0 0 0 0 0 0 0
38777 - 0 0 0 0 0 0 0 0 0 0 0 0
38778 - 0 0 0 0 0 0 0 0 0 0 0 0
38779 - 0 0 0 0 0 0 0 0 0 0 0 0
38780 - 0 0 0 0 0 0 0 0 0 0 0 0
38781 - 0 0 0 0 0 0 0 0 0 0 0 0
38782 - 0 0 0 0 0 0 0 0 0 0 0 0
38783 - 0 0 0 0 0 0 0 0 0 0 0 0
38784 - 0 0 0 0 0 0 0 0 0 10 10 10
38785 - 38 38 38 86 86 86 50 50 50 6 6 6
38786 -128 128 128 174 154 114 156 107 11 168 122 10
38787 -198 155 10 184 144 12 197 138 11 200 144 11
38788 -206 145 10 206 145 10 197 138 11 188 164 115
38789 -195 195 195 198 198 198 174 174 174 14 14 14
38790 - 2 2 6 22 22 22 116 116 116 116 116 116
38791 - 22 22 22 2 2 6 74 74 74 70 70 70
38792 - 30 30 30 10 10 10 0 0 0 0 0 0
38793 - 0 0 0 0 0 0 0 0 0 0 0 0
38794 - 0 0 0 0 0 0 0 0 0 0 0 0
38795 - 0 0 0 0 0 0 0 0 0 0 0 0
38796 - 0 0 0 0 0 0 0 0 0 0 0 0
38797 - 0 0 0 0 0 0 0 0 0 0 0 0
38798 - 0 0 0 0 0 0 0 0 0 0 0 0
38799 - 0 0 0 0 0 0 0 0 0 0 0 0
38800 - 0 0 0 0 0 0 0 0 0 0 0 0
38801 - 0 0 0 0 0 0 0 0 0 0 0 0
38802 - 0 0 0 0 0 0 0 0 0 0 0 0
38803 - 0 0 0 0 0 0 0 0 0 0 0 0
38804 - 0 0 0 0 0 0 6 6 6 18 18 18
38805 - 50 50 50 101 101 101 26 26 26 10 10 10
38806 -138 138 138 190 190 190 174 154 114 156 107 11
38807 -197 138 11 200 144 11 197 138 11 192 133 9
38808 -180 123 7 190 142 34 190 178 144 187 187 187
38809 -202 202 202 221 221 221 214 214 214 66 66 66
38810 - 2 2 6 2 2 6 50 50 50 62 62 62
38811 - 6 6 6 2 2 6 10 10 10 90 90 90
38812 - 50 50 50 18 18 18 6 6 6 0 0 0
38813 - 0 0 0 0 0 0 0 0 0 0 0 0
38814 - 0 0 0 0 0 0 0 0 0 0 0 0
38815 - 0 0 0 0 0 0 0 0 0 0 0 0
38816 - 0 0 0 0 0 0 0 0 0 0 0 0
38817 - 0 0 0 0 0 0 0 0 0 0 0 0
38818 - 0 0 0 0 0 0 0 0 0 0 0 0
38819 - 0 0 0 0 0 0 0 0 0 0 0 0
38820 - 0 0 0 0 0 0 0 0 0 0 0 0
38821 - 0 0 0 0 0 0 0 0 0 0 0 0
38822 - 0 0 0 0 0 0 0 0 0 0 0 0
38823 - 0 0 0 0 0 0 0 0 0 0 0 0
38824 - 0 0 0 0 0 0 10 10 10 34 34 34
38825 - 74 74 74 74 74 74 2 2 6 6 6 6
38826 -144 144 144 198 198 198 190 190 190 178 166 146
38827 -154 121 60 156 107 11 156 107 11 168 124 44
38828 -174 154 114 187 187 187 190 190 190 210 210 210
38829 -246 246 246 253 253 253 253 253 253 182 182 182
38830 - 6 6 6 2 2 6 2 2 6 2 2 6
38831 - 2 2 6 2 2 6 2 2 6 62 62 62
38832 - 74 74 74 34 34 34 14 14 14 0 0 0
38833 - 0 0 0 0 0 0 0 0 0 0 0 0
38834 - 0 0 0 0 0 0 0 0 0 0 0 0
38835 - 0 0 0 0 0 0 0 0 0 0 0 0
38836 - 0 0 0 0 0 0 0 0 0 0 0 0
38837 - 0 0 0 0 0 0 0 0 0 0 0 0
38838 - 0 0 0 0 0 0 0 0 0 0 0 0
38839 - 0 0 0 0 0 0 0 0 0 0 0 0
38840 - 0 0 0 0 0 0 0 0 0 0 0 0
38841 - 0 0 0 0 0 0 0 0 0 0 0 0
38842 - 0 0 0 0 0 0 0 0 0 0 0 0
38843 - 0 0 0 0 0 0 0 0 0 0 0 0
38844 - 0 0 0 10 10 10 22 22 22 54 54 54
38845 - 94 94 94 18 18 18 2 2 6 46 46 46
38846 -234 234 234 221 221 221 190 190 190 190 190 190
38847 -190 190 190 187 187 187 187 187 187 190 190 190
38848 -190 190 190 195 195 195 214 214 214 242 242 242
38849 -253 253 253 253 253 253 253 253 253 253 253 253
38850 - 82 82 82 2 2 6 2 2 6 2 2 6
38851 - 2 2 6 2 2 6 2 2 6 14 14 14
38852 - 86 86 86 54 54 54 22 22 22 6 6 6
38853 - 0 0 0 0 0 0 0 0 0 0 0 0
38854 - 0 0 0 0 0 0 0 0 0 0 0 0
38855 - 0 0 0 0 0 0 0 0 0 0 0 0
38856 - 0 0 0 0 0 0 0 0 0 0 0 0
38857 - 0 0 0 0 0 0 0 0 0 0 0 0
38858 - 0 0 0 0 0 0 0 0 0 0 0 0
38859 - 0 0 0 0 0 0 0 0 0 0 0 0
38860 - 0 0 0 0 0 0 0 0 0 0 0 0
38861 - 0 0 0 0 0 0 0 0 0 0 0 0
38862 - 0 0 0 0 0 0 0 0 0 0 0 0
38863 - 0 0 0 0 0 0 0 0 0 0 0 0
38864 - 6 6 6 18 18 18 46 46 46 90 90 90
38865 - 46 46 46 18 18 18 6 6 6 182 182 182
38866 -253 253 253 246 246 246 206 206 206 190 190 190
38867 -190 190 190 190 190 190 190 190 190 190 190 190
38868 -206 206 206 231 231 231 250 250 250 253 253 253
38869 -253 253 253 253 253 253 253 253 253 253 253 253
38870 -202 202 202 14 14 14 2 2 6 2 2 6
38871 - 2 2 6 2 2 6 2 2 6 2 2 6
38872 - 42 42 42 86 86 86 42 42 42 18 18 18
38873 - 6 6 6 0 0 0 0 0 0 0 0 0
38874 - 0 0 0 0 0 0 0 0 0 0 0 0
38875 - 0 0 0 0 0 0 0 0 0 0 0 0
38876 - 0 0 0 0 0 0 0 0 0 0 0 0
38877 - 0 0 0 0 0 0 0 0 0 0 0 0
38878 - 0 0 0 0 0 0 0 0 0 0 0 0
38879 - 0 0 0 0 0 0 0 0 0 0 0 0
38880 - 0 0 0 0 0 0 0 0 0 0 0 0
38881 - 0 0 0 0 0 0 0 0 0 0 0 0
38882 - 0 0 0 0 0 0 0 0 0 0 0 0
38883 - 0 0 0 0 0 0 0 0 0 6 6 6
38884 - 14 14 14 38 38 38 74 74 74 66 66 66
38885 - 2 2 6 6 6 6 90 90 90 250 250 250
38886 -253 253 253 253 253 253 238 238 238 198 198 198
38887 -190 190 190 190 190 190 195 195 195 221 221 221
38888 -246 246 246 253 253 253 253 253 253 253 253 253
38889 -253 253 253 253 253 253 253 253 253 253 253 253
38890 -253 253 253 82 82 82 2 2 6 2 2 6
38891 - 2 2 6 2 2 6 2 2 6 2 2 6
38892 - 2 2 6 78 78 78 70 70 70 34 34 34
38893 - 14 14 14 6 6 6 0 0 0 0 0 0
38894 - 0 0 0 0 0 0 0 0 0 0 0 0
38895 - 0 0 0 0 0 0 0 0 0 0 0 0
38896 - 0 0 0 0 0 0 0 0 0 0 0 0
38897 - 0 0 0 0 0 0 0 0 0 0 0 0
38898 - 0 0 0 0 0 0 0 0 0 0 0 0
38899 - 0 0 0 0 0 0 0 0 0 0 0 0
38900 - 0 0 0 0 0 0 0 0 0 0 0 0
38901 - 0 0 0 0 0 0 0 0 0 0 0 0
38902 - 0 0 0 0 0 0 0 0 0 0 0 0
38903 - 0 0 0 0 0 0 0 0 0 14 14 14
38904 - 34 34 34 66 66 66 78 78 78 6 6 6
38905 - 2 2 6 18 18 18 218 218 218 253 253 253
38906 -253 253 253 253 253 253 253 253 253 246 246 246
38907 -226 226 226 231 231 231 246 246 246 253 253 253
38908 -253 253 253 253 253 253 253 253 253 253 253 253
38909 -253 253 253 253 253 253 253 253 253 253 253 253
38910 -253 253 253 178 178 178 2 2 6 2 2 6
38911 - 2 2 6 2 2 6 2 2 6 2 2 6
38912 - 2 2 6 18 18 18 90 90 90 62 62 62
38913 - 30 30 30 10 10 10 0 0 0 0 0 0
38914 - 0 0 0 0 0 0 0 0 0 0 0 0
38915 - 0 0 0 0 0 0 0 0 0 0 0 0
38916 - 0 0 0 0 0 0 0 0 0 0 0 0
38917 - 0 0 0 0 0 0 0 0 0 0 0 0
38918 - 0 0 0 0 0 0 0 0 0 0 0 0
38919 - 0 0 0 0 0 0 0 0 0 0 0 0
38920 - 0 0 0 0 0 0 0 0 0 0 0 0
38921 - 0 0 0 0 0 0 0 0 0 0 0 0
38922 - 0 0 0 0 0 0 0 0 0 0 0 0
38923 - 0 0 0 0 0 0 10 10 10 26 26 26
38924 - 58 58 58 90 90 90 18 18 18 2 2 6
38925 - 2 2 6 110 110 110 253 253 253 253 253 253
38926 -253 253 253 253 253 253 253 253 253 253 253 253
38927 -250 250 250 253 253 253 253 253 253 253 253 253
38928 -253 253 253 253 253 253 253 253 253 253 253 253
38929 -253 253 253 253 253 253 253 253 253 253 253 253
38930 -253 253 253 231 231 231 18 18 18 2 2 6
38931 - 2 2 6 2 2 6 2 2 6 2 2 6
38932 - 2 2 6 2 2 6 18 18 18 94 94 94
38933 - 54 54 54 26 26 26 10 10 10 0 0 0
38934 - 0 0 0 0 0 0 0 0 0 0 0 0
38935 - 0 0 0 0 0 0 0 0 0 0 0 0
38936 - 0 0 0 0 0 0 0 0 0 0 0 0
38937 - 0 0 0 0 0 0 0 0 0 0 0 0
38938 - 0 0 0 0 0 0 0 0 0 0 0 0
38939 - 0 0 0 0 0 0 0 0 0 0 0 0
38940 - 0 0 0 0 0 0 0 0 0 0 0 0
38941 - 0 0 0 0 0 0 0 0 0 0 0 0
38942 - 0 0 0 0 0 0 0 0 0 0 0 0
38943 - 0 0 0 6 6 6 22 22 22 50 50 50
38944 - 90 90 90 26 26 26 2 2 6 2 2 6
38945 - 14 14 14 195 195 195 250 250 250 253 253 253
38946 -253 253 253 253 253 253 253 253 253 253 253 253
38947 -253 253 253 253 253 253 253 253 253 253 253 253
38948 -253 253 253 253 253 253 253 253 253 253 253 253
38949 -253 253 253 253 253 253 253 253 253 253 253 253
38950 -250 250 250 242 242 242 54 54 54 2 2 6
38951 - 2 2 6 2 2 6 2 2 6 2 2 6
38952 - 2 2 6 2 2 6 2 2 6 38 38 38
38953 - 86 86 86 50 50 50 22 22 22 6 6 6
38954 - 0 0 0 0 0 0 0 0 0 0 0 0
38955 - 0 0 0 0 0 0 0 0 0 0 0 0
38956 - 0 0 0 0 0 0 0 0 0 0 0 0
38957 - 0 0 0 0 0 0 0 0 0 0 0 0
38958 - 0 0 0 0 0 0 0 0 0 0 0 0
38959 - 0 0 0 0 0 0 0 0 0 0 0 0
38960 - 0 0 0 0 0 0 0 0 0 0 0 0
38961 - 0 0 0 0 0 0 0 0 0 0 0 0
38962 - 0 0 0 0 0 0 0 0 0 0 0 0
38963 - 6 6 6 14 14 14 38 38 38 82 82 82
38964 - 34 34 34 2 2 6 2 2 6 2 2 6
38965 - 42 42 42 195 195 195 246 246 246 253 253 253
38966 -253 253 253 253 253 253 253 253 253 250 250 250
38967 -242 242 242 242 242 242 250 250 250 253 253 253
38968 -253 253 253 253 253 253 253 253 253 253 253 253
38969 -253 253 253 250 250 250 246 246 246 238 238 238
38970 -226 226 226 231 231 231 101 101 101 6 6 6
38971 - 2 2 6 2 2 6 2 2 6 2 2 6
38972 - 2 2 6 2 2 6 2 2 6 2 2 6
38973 - 38 38 38 82 82 82 42 42 42 14 14 14
38974 - 6 6 6 0 0 0 0 0 0 0 0 0
38975 - 0 0 0 0 0 0 0 0 0 0 0 0
38976 - 0 0 0 0 0 0 0 0 0 0 0 0
38977 - 0 0 0 0 0 0 0 0 0 0 0 0
38978 - 0 0 0 0 0 0 0 0 0 0 0 0
38979 - 0 0 0 0 0 0 0 0 0 0 0 0
38980 - 0 0 0 0 0 0 0 0 0 0 0 0
38981 - 0 0 0 0 0 0 0 0 0 0 0 0
38982 - 0 0 0 0 0 0 0 0 0 0 0 0
38983 - 10 10 10 26 26 26 62 62 62 66 66 66
38984 - 2 2 6 2 2 6 2 2 6 6 6 6
38985 - 70 70 70 170 170 170 206 206 206 234 234 234
38986 -246 246 246 250 250 250 250 250 250 238 238 238
38987 -226 226 226 231 231 231 238 238 238 250 250 250
38988 -250 250 250 250 250 250 246 246 246 231 231 231
38989 -214 214 214 206 206 206 202 202 202 202 202 202
38990 -198 198 198 202 202 202 182 182 182 18 18 18
38991 - 2 2 6 2 2 6 2 2 6 2 2 6
38992 - 2 2 6 2 2 6 2 2 6 2 2 6
38993 - 2 2 6 62 62 62 66 66 66 30 30 30
38994 - 10 10 10 0 0 0 0 0 0 0 0 0
38995 - 0 0 0 0 0 0 0 0 0 0 0 0
38996 - 0 0 0 0 0 0 0 0 0 0 0 0
38997 - 0 0 0 0 0 0 0 0 0 0 0 0
38998 - 0 0 0 0 0 0 0 0 0 0 0 0
38999 - 0 0 0 0 0 0 0 0 0 0 0 0
39000 - 0 0 0 0 0 0 0 0 0 0 0 0
39001 - 0 0 0 0 0 0 0 0 0 0 0 0
39002 - 0 0 0 0 0 0 0 0 0 0 0 0
39003 - 14 14 14 42 42 42 82 82 82 18 18 18
39004 - 2 2 6 2 2 6 2 2 6 10 10 10
39005 - 94 94 94 182 182 182 218 218 218 242 242 242
39006 -250 250 250 253 253 253 253 253 253 250 250 250
39007 -234 234 234 253 253 253 253 253 253 253 253 253
39008 -253 253 253 253 253 253 253 253 253 246 246 246
39009 -238 238 238 226 226 226 210 210 210 202 202 202
39010 -195 195 195 195 195 195 210 210 210 158 158 158
39011 - 6 6 6 14 14 14 50 50 50 14 14 14
39012 - 2 2 6 2 2 6 2 2 6 2 2 6
39013 - 2 2 6 6 6 6 86 86 86 46 46 46
39014 - 18 18 18 6 6 6 0 0 0 0 0 0
39015 - 0 0 0 0 0 0 0 0 0 0 0 0
39016 - 0 0 0 0 0 0 0 0 0 0 0 0
39017 - 0 0 0 0 0 0 0 0 0 0 0 0
39018 - 0 0 0 0 0 0 0 0 0 0 0 0
39019 - 0 0 0 0 0 0 0 0 0 0 0 0
39020 - 0 0 0 0 0 0 0 0 0 0 0 0
39021 - 0 0 0 0 0 0 0 0 0 0 0 0
39022 - 0 0 0 0 0 0 0 0 0 6 6 6
39023 - 22 22 22 54 54 54 70 70 70 2 2 6
39024 - 2 2 6 10 10 10 2 2 6 22 22 22
39025 -166 166 166 231 231 231 250 250 250 253 253 253
39026 -253 253 253 253 253 253 253 253 253 250 250 250
39027 -242 242 242 253 253 253 253 253 253 253 253 253
39028 -253 253 253 253 253 253 253 253 253 253 253 253
39029 -253 253 253 253 253 253 253 253 253 246 246 246
39030 -231 231 231 206 206 206 198 198 198 226 226 226
39031 - 94 94 94 2 2 6 6 6 6 38 38 38
39032 - 30 30 30 2 2 6 2 2 6 2 2 6
39033 - 2 2 6 2 2 6 62 62 62 66 66 66
39034 - 26 26 26 10 10 10 0 0 0 0 0 0
39035 - 0 0 0 0 0 0 0 0 0 0 0 0
39036 - 0 0 0 0 0 0 0 0 0 0 0 0
39037 - 0 0 0 0 0 0 0 0 0 0 0 0
39038 - 0 0 0 0 0 0 0 0 0 0 0 0
39039 - 0 0 0 0 0 0 0 0 0 0 0 0
39040 - 0 0 0 0 0 0 0 0 0 0 0 0
39041 - 0 0 0 0 0 0 0 0 0 0 0 0
39042 - 0 0 0 0 0 0 0 0 0 10 10 10
39043 - 30 30 30 74 74 74 50 50 50 2 2 6
39044 - 26 26 26 26 26 26 2 2 6 106 106 106
39045 -238 238 238 253 253 253 253 253 253 253 253 253
39046 -253 253 253 253 253 253 253 253 253 253 253 253
39047 -253 253 253 253 253 253 253 253 253 253 253 253
39048 -253 253 253 253 253 253 253 253 253 253 253 253
39049 -253 253 253 253 253 253 253 253 253 253 253 253
39050 -253 253 253 246 246 246 218 218 218 202 202 202
39051 -210 210 210 14 14 14 2 2 6 2 2 6
39052 - 30 30 30 22 22 22 2 2 6 2 2 6
39053 - 2 2 6 2 2 6 18 18 18 86 86 86
39054 - 42 42 42 14 14 14 0 0 0 0 0 0
39055 - 0 0 0 0 0 0 0 0 0 0 0 0
39056 - 0 0 0 0 0 0 0 0 0 0 0 0
39057 - 0 0 0 0 0 0 0 0 0 0 0 0
39058 - 0 0 0 0 0 0 0 0 0 0 0 0
39059 - 0 0 0 0 0 0 0 0 0 0 0 0
39060 - 0 0 0 0 0 0 0 0 0 0 0 0
39061 - 0 0 0 0 0 0 0 0 0 0 0 0
39062 - 0 0 0 0 0 0 0 0 0 14 14 14
39063 - 42 42 42 90 90 90 22 22 22 2 2 6
39064 - 42 42 42 2 2 6 18 18 18 218 218 218
39065 -253 253 253 253 253 253 253 253 253 253 253 253
39066 -253 253 253 253 253 253 253 253 253 253 253 253
39067 -253 253 253 253 253 253 253 253 253 253 253 253
39068 -253 253 253 253 253 253 253 253 253 253 253 253
39069 -253 253 253 253 253 253 253 253 253 253 253 253
39070 -253 253 253 253 253 253 250 250 250 221 221 221
39071 -218 218 218 101 101 101 2 2 6 14 14 14
39072 - 18 18 18 38 38 38 10 10 10 2 2 6
39073 - 2 2 6 2 2 6 2 2 6 78 78 78
39074 - 58 58 58 22 22 22 6 6 6 0 0 0
39075 - 0 0 0 0 0 0 0 0 0 0 0 0
39076 - 0 0 0 0 0 0 0 0 0 0 0 0
39077 - 0 0 0 0 0 0 0 0 0 0 0 0
39078 - 0 0 0 0 0 0 0 0 0 0 0 0
39079 - 0 0 0 0 0 0 0 0 0 0 0 0
39080 - 0 0 0 0 0 0 0 0 0 0 0 0
39081 - 0 0 0 0 0 0 0 0 0 0 0 0
39082 - 0 0 0 0 0 0 6 6 6 18 18 18
39083 - 54 54 54 82 82 82 2 2 6 26 26 26
39084 - 22 22 22 2 2 6 123 123 123 253 253 253
39085 -253 253 253 253 253 253 253 253 253 253 253 253
39086 -253 253 253 253 253 253 253 253 253 253 253 253
39087 -253 253 253 253 253 253 253 253 253 253 253 253
39088 -253 253 253 253 253 253 253 253 253 253 253 253
39089 -253 253 253 253 253 253 253 253 253 253 253 253
39090 -253 253 253 253 253 253 253 253 253 250 250 250
39091 -238 238 238 198 198 198 6 6 6 38 38 38
39092 - 58 58 58 26 26 26 38 38 38 2 2 6
39093 - 2 2 6 2 2 6 2 2 6 46 46 46
39094 - 78 78 78 30 30 30 10 10 10 0 0 0
39095 - 0 0 0 0 0 0 0 0 0 0 0 0
39096 - 0 0 0 0 0 0 0 0 0 0 0 0
39097 - 0 0 0 0 0 0 0 0 0 0 0 0
39098 - 0 0 0 0 0 0 0 0 0 0 0 0
39099 - 0 0 0 0 0 0 0 0 0 0 0 0
39100 - 0 0 0 0 0 0 0 0 0 0 0 0
39101 - 0 0 0 0 0 0 0 0 0 0 0 0
39102 - 0 0 0 0 0 0 10 10 10 30 30 30
39103 - 74 74 74 58 58 58 2 2 6 42 42 42
39104 - 2 2 6 22 22 22 231 231 231 253 253 253
39105 -253 253 253 253 253 253 253 253 253 253 253 253
39106 -253 253 253 253 253 253 253 253 253 250 250 250
39107 -253 253 253 253 253 253 253 253 253 253 253 253
39108 -253 253 253 253 253 253 253 253 253 253 253 253
39109 -253 253 253 253 253 253 253 253 253 253 253 253
39110 -253 253 253 253 253 253 253 253 253 253 253 253
39111 -253 253 253 246 246 246 46 46 46 38 38 38
39112 - 42 42 42 14 14 14 38 38 38 14 14 14
39113 - 2 2 6 2 2 6 2 2 6 6 6 6
39114 - 86 86 86 46 46 46 14 14 14 0 0 0
39115 - 0 0 0 0 0 0 0 0 0 0 0 0
39116 - 0 0 0 0 0 0 0 0 0 0 0 0
39117 - 0 0 0 0 0 0 0 0 0 0 0 0
39118 - 0 0 0 0 0 0 0 0 0 0 0 0
39119 - 0 0 0 0 0 0 0 0 0 0 0 0
39120 - 0 0 0 0 0 0 0 0 0 0 0 0
39121 - 0 0 0 0 0 0 0 0 0 0 0 0
39122 - 0 0 0 6 6 6 14 14 14 42 42 42
39123 - 90 90 90 18 18 18 18 18 18 26 26 26
39124 - 2 2 6 116 116 116 253 253 253 253 253 253
39125 -253 253 253 253 253 253 253 253 253 253 253 253
39126 -253 253 253 253 253 253 250 250 250 238 238 238
39127 -253 253 253 253 253 253 253 253 253 253 253 253
39128 -253 253 253 253 253 253 253 253 253 253 253 253
39129 -253 253 253 253 253 253 253 253 253 253 253 253
39130 -253 253 253 253 253 253 253 253 253 253 253 253
39131 -253 253 253 253 253 253 94 94 94 6 6 6
39132 - 2 2 6 2 2 6 10 10 10 34 34 34
39133 - 2 2 6 2 2 6 2 2 6 2 2 6
39134 - 74 74 74 58 58 58 22 22 22 6 6 6
39135 - 0 0 0 0 0 0 0 0 0 0 0 0
39136 - 0 0 0 0 0 0 0 0 0 0 0 0
39137 - 0 0 0 0 0 0 0 0 0 0 0 0
39138 - 0 0 0 0 0 0 0 0 0 0 0 0
39139 - 0 0 0 0 0 0 0 0 0 0 0 0
39140 - 0 0 0 0 0 0 0 0 0 0 0 0
39141 - 0 0 0 0 0 0 0 0 0 0 0 0
39142 - 0 0 0 10 10 10 26 26 26 66 66 66
39143 - 82 82 82 2 2 6 38 38 38 6 6 6
39144 - 14 14 14 210 210 210 253 253 253 253 253 253
39145 -253 253 253 253 253 253 253 253 253 253 253 253
39146 -253 253 253 253 253 253 246 246 246 242 242 242
39147 -253 253 253 253 253 253 253 253 253 253 253 253
39148 -253 253 253 253 253 253 253 253 253 253 253 253
39149 -253 253 253 253 253 253 253 253 253 253 253 253
39150 -253 253 253 253 253 253 253 253 253 253 253 253
39151 -253 253 253 253 253 253 144 144 144 2 2 6
39152 - 2 2 6 2 2 6 2 2 6 46 46 46
39153 - 2 2 6 2 2 6 2 2 6 2 2 6
39154 - 42 42 42 74 74 74 30 30 30 10 10 10
39155 - 0 0 0 0 0 0 0 0 0 0 0 0
39156 - 0 0 0 0 0 0 0 0 0 0 0 0
39157 - 0 0 0 0 0 0 0 0 0 0 0 0
39158 - 0 0 0 0 0 0 0 0 0 0 0 0
39159 - 0 0 0 0 0 0 0 0 0 0 0 0
39160 - 0 0 0 0 0 0 0 0 0 0 0 0
39161 - 0 0 0 0 0 0 0 0 0 0 0 0
39162 - 6 6 6 14 14 14 42 42 42 90 90 90
39163 - 26 26 26 6 6 6 42 42 42 2 2 6
39164 - 74 74 74 250 250 250 253 253 253 253 253 253
39165 -253 253 253 253 253 253 253 253 253 253 253 253
39166 -253 253 253 253 253 253 242 242 242 242 242 242
39167 -253 253 253 253 253 253 253 253 253 253 253 253
39168 -253 253 253 253 253 253 253 253 253 253 253 253
39169 -253 253 253 253 253 253 253 253 253 253 253 253
39170 -253 253 253 253 253 253 253 253 253 253 253 253
39171 -253 253 253 253 253 253 182 182 182 2 2 6
39172 - 2 2 6 2 2 6 2 2 6 46 46 46
39173 - 2 2 6 2 2 6 2 2 6 2 2 6
39174 - 10 10 10 86 86 86 38 38 38 10 10 10
39175 - 0 0 0 0 0 0 0 0 0 0 0 0
39176 - 0 0 0 0 0 0 0 0 0 0 0 0
39177 - 0 0 0 0 0 0 0 0 0 0 0 0
39178 - 0 0 0 0 0 0 0 0 0 0 0 0
39179 - 0 0 0 0 0 0 0 0 0 0 0 0
39180 - 0 0 0 0 0 0 0 0 0 0 0 0
39181 - 0 0 0 0 0 0 0 0 0 0 0 0
39182 - 10 10 10 26 26 26 66 66 66 82 82 82
39183 - 2 2 6 22 22 22 18 18 18 2 2 6
39184 -149 149 149 253 253 253 253 253 253 253 253 253
39185 -253 253 253 253 253 253 253 253 253 253 253 253
39186 -253 253 253 253 253 253 234 234 234 242 242 242
39187 -253 253 253 253 253 253 253 253 253 253 253 253
39188 -253 253 253 253 253 253 253 253 253 253 253 253
39189 -253 253 253 253 253 253 253 253 253 253 253 253
39190 -253 253 253 253 253 253 253 253 253 253 253 253
39191 -253 253 253 253 253 253 206 206 206 2 2 6
39192 - 2 2 6 2 2 6 2 2 6 38 38 38
39193 - 2 2 6 2 2 6 2 2 6 2 2 6
39194 - 6 6 6 86 86 86 46 46 46 14 14 14
39195 - 0 0 0 0 0 0 0 0 0 0 0 0
39196 - 0 0 0 0 0 0 0 0 0 0 0 0
39197 - 0 0 0 0 0 0 0 0 0 0 0 0
39198 - 0 0 0 0 0 0 0 0 0 0 0 0
39199 - 0 0 0 0 0 0 0 0 0 0 0 0
39200 - 0 0 0 0 0 0 0 0 0 0 0 0
39201 - 0 0 0 0 0 0 0 0 0 6 6 6
39202 - 18 18 18 46 46 46 86 86 86 18 18 18
39203 - 2 2 6 34 34 34 10 10 10 6 6 6
39204 -210 210 210 253 253 253 253 253 253 253 253 253
39205 -253 253 253 253 253 253 253 253 253 253 253 253
39206 -253 253 253 253 253 253 234 234 234 242 242 242
39207 -253 253 253 253 253 253 253 253 253 253 253 253
39208 -253 253 253 253 253 253 253 253 253 253 253 253
39209 -253 253 253 253 253 253 253 253 253 253 253 253
39210 -253 253 253 253 253 253 253 253 253 253 253 253
39211 -253 253 253 253 253 253 221 221 221 6 6 6
39212 - 2 2 6 2 2 6 6 6 6 30 30 30
39213 - 2 2 6 2 2 6 2 2 6 2 2 6
39214 - 2 2 6 82 82 82 54 54 54 18 18 18
39215 - 6 6 6 0 0 0 0 0 0 0 0 0
39216 - 0 0 0 0 0 0 0 0 0 0 0 0
39217 - 0 0 0 0 0 0 0 0 0 0 0 0
39218 - 0 0 0 0 0 0 0 0 0 0 0 0
39219 - 0 0 0 0 0 0 0 0 0 0 0 0
39220 - 0 0 0 0 0 0 0 0 0 0 0 0
39221 - 0 0 0 0 0 0 0 0 0 10 10 10
39222 - 26 26 26 66 66 66 62 62 62 2 2 6
39223 - 2 2 6 38 38 38 10 10 10 26 26 26
39224 -238 238 238 253 253 253 253 253 253 253 253 253
39225 -253 253 253 253 253 253 253 253 253 253 253 253
39226 -253 253 253 253 253 253 231 231 231 238 238 238
39227 -253 253 253 253 253 253 253 253 253 253 253 253
39228 -253 253 253 253 253 253 253 253 253 253 253 253
39229 -253 253 253 253 253 253 253 253 253 253 253 253
39230 -253 253 253 253 253 253 253 253 253 253 253 253
39231 -253 253 253 253 253 253 231 231 231 6 6 6
39232 - 2 2 6 2 2 6 10 10 10 30 30 30
39233 - 2 2 6 2 2 6 2 2 6 2 2 6
39234 - 2 2 6 66 66 66 58 58 58 22 22 22
39235 - 6 6 6 0 0 0 0 0 0 0 0 0
39236 - 0 0 0 0 0 0 0 0 0 0 0 0
39237 - 0 0 0 0 0 0 0 0 0 0 0 0
39238 - 0 0 0 0 0 0 0 0 0 0 0 0
39239 - 0 0 0 0 0 0 0 0 0 0 0 0
39240 - 0 0 0 0 0 0 0 0 0 0 0 0
39241 - 0 0 0 0 0 0 0 0 0 10 10 10
39242 - 38 38 38 78 78 78 6 6 6 2 2 6
39243 - 2 2 6 46 46 46 14 14 14 42 42 42
39244 -246 246 246 253 253 253 253 253 253 253 253 253
39245 -253 253 253 253 253 253 253 253 253 253 253 253
39246 -253 253 253 253 253 253 231 231 231 242 242 242
39247 -253 253 253 253 253 253 253 253 253 253 253 253
39248 -253 253 253 253 253 253 253 253 253 253 253 253
39249 -253 253 253 253 253 253 253 253 253 253 253 253
39250 -253 253 253 253 253 253 253 253 253 253 253 253
39251 -253 253 253 253 253 253 234 234 234 10 10 10
39252 - 2 2 6 2 2 6 22 22 22 14 14 14
39253 - 2 2 6 2 2 6 2 2 6 2 2 6
39254 - 2 2 6 66 66 66 62 62 62 22 22 22
39255 - 6 6 6 0 0 0 0 0 0 0 0 0
39256 - 0 0 0 0 0 0 0 0 0 0 0 0
39257 - 0 0 0 0 0 0 0 0 0 0 0 0
39258 - 0 0 0 0 0 0 0 0 0 0 0 0
39259 - 0 0 0 0 0 0 0 0 0 0 0 0
39260 - 0 0 0 0 0 0 0 0 0 0 0 0
39261 - 0 0 0 0 0 0 6 6 6 18 18 18
39262 - 50 50 50 74 74 74 2 2 6 2 2 6
39263 - 14 14 14 70 70 70 34 34 34 62 62 62
39264 -250 250 250 253 253 253 253 253 253 253 253 253
39265 -253 253 253 253 253 253 253 253 253 253 253 253
39266 -253 253 253 253 253 253 231 231 231 246 246 246
39267 -253 253 253 253 253 253 253 253 253 253 253 253
39268 -253 253 253 253 253 253 253 253 253 253 253 253
39269 -253 253 253 253 253 253 253 253 253 253 253 253
39270 -253 253 253 253 253 253 253 253 253 253 253 253
39271 -253 253 253 253 253 253 234 234 234 14 14 14
39272 - 2 2 6 2 2 6 30 30 30 2 2 6
39273 - 2 2 6 2 2 6 2 2 6 2 2 6
39274 - 2 2 6 66 66 66 62 62 62 22 22 22
39275 - 6 6 6 0 0 0 0 0 0 0 0 0
39276 - 0 0 0 0 0 0 0 0 0 0 0 0
39277 - 0 0 0 0 0 0 0 0 0 0 0 0
39278 - 0 0 0 0 0 0 0 0 0 0 0 0
39279 - 0 0 0 0 0 0 0 0 0 0 0 0
39280 - 0 0 0 0 0 0 0 0 0 0 0 0
39281 - 0 0 0 0 0 0 6 6 6 18 18 18
39282 - 54 54 54 62 62 62 2 2 6 2 2 6
39283 - 2 2 6 30 30 30 46 46 46 70 70 70
39284 -250 250 250 253 253 253 253 253 253 253 253 253
39285 -253 253 253 253 253 253 253 253 253 253 253 253
39286 -253 253 253 253 253 253 231 231 231 246 246 246
39287 -253 253 253 253 253 253 253 253 253 253 253 253
39288 -253 253 253 253 253 253 253 253 253 253 253 253
39289 -253 253 253 253 253 253 253 253 253 253 253 253
39290 -253 253 253 253 253 253 253 253 253 253 253 253
39291 -253 253 253 253 253 253 226 226 226 10 10 10
39292 - 2 2 6 6 6 6 30 30 30 2 2 6
39293 - 2 2 6 2 2 6 2 2 6 2 2 6
39294 - 2 2 6 66 66 66 58 58 58 22 22 22
39295 - 6 6 6 0 0 0 0 0 0 0 0 0
39296 - 0 0 0 0 0 0 0 0 0 0 0 0
39297 - 0 0 0 0 0 0 0 0 0 0 0 0
39298 - 0 0 0 0 0 0 0 0 0 0 0 0
39299 - 0 0 0 0 0 0 0 0 0 0 0 0
39300 - 0 0 0 0 0 0 0 0 0 0 0 0
39301 - 0 0 0 0 0 0 6 6 6 22 22 22
39302 - 58 58 58 62 62 62 2 2 6 2 2 6
39303 - 2 2 6 2 2 6 30 30 30 78 78 78
39304 -250 250 250 253 253 253 253 253 253 253 253 253
39305 -253 253 253 253 253 253 253 253 253 253 253 253
39306 -253 253 253 253 253 253 231 231 231 246 246 246
39307 -253 253 253 253 253 253 253 253 253 253 253 253
39308 -253 253 253 253 253 253 253 253 253 253 253 253
39309 -253 253 253 253 253 253 253 253 253 253 253 253
39310 -253 253 253 253 253 253 253 253 253 253 253 253
39311 -253 253 253 253 253 253 206 206 206 2 2 6
39312 - 22 22 22 34 34 34 18 14 6 22 22 22
39313 - 26 26 26 18 18 18 6 6 6 2 2 6
39314 - 2 2 6 82 82 82 54 54 54 18 18 18
39315 - 6 6 6 0 0 0 0 0 0 0 0 0
39316 - 0 0 0 0 0 0 0 0 0 0 0 0
39317 - 0 0 0 0 0 0 0 0 0 0 0 0
39318 - 0 0 0 0 0 0 0 0 0 0 0 0
39319 - 0 0 0 0 0 0 0 0 0 0 0 0
39320 - 0 0 0 0 0 0 0 0 0 0 0 0
39321 - 0 0 0 0 0 0 6 6 6 26 26 26
39322 - 62 62 62 106 106 106 74 54 14 185 133 11
39323 -210 162 10 121 92 8 6 6 6 62 62 62
39324 -238 238 238 253 253 253 253 253 253 253 253 253
39325 -253 253 253 253 253 253 253 253 253 253 253 253
39326 -253 253 253 253 253 253 231 231 231 246 246 246
39327 -253 253 253 253 253 253 253 253 253 253 253 253
39328 -253 253 253 253 253 253 253 253 253 253 253 253
39329 -253 253 253 253 253 253 253 253 253 253 253 253
39330 -253 253 253 253 253 253 253 253 253 253 253 253
39331 -253 253 253 253 253 253 158 158 158 18 18 18
39332 - 14 14 14 2 2 6 2 2 6 2 2 6
39333 - 6 6 6 18 18 18 66 66 66 38 38 38
39334 - 6 6 6 94 94 94 50 50 50 18 18 18
39335 - 6 6 6 0 0 0 0 0 0 0 0 0
39336 - 0 0 0 0 0 0 0 0 0 0 0 0
39337 - 0 0 0 0 0 0 0 0 0 0 0 0
39338 - 0 0 0 0 0 0 0 0 0 0 0 0
39339 - 0 0 0 0 0 0 0 0 0 0 0 0
39340 - 0 0 0 0 0 0 0 0 0 6 6 6
39341 - 10 10 10 10 10 10 18 18 18 38 38 38
39342 - 78 78 78 142 134 106 216 158 10 242 186 14
39343 -246 190 14 246 190 14 156 118 10 10 10 10
39344 - 90 90 90 238 238 238 253 253 253 253 253 253
39345 -253 253 253 253 253 253 253 253 253 253 253 253
39346 -253 253 253 253 253 253 231 231 231 250 250 250
39347 -253 253 253 253 253 253 253 253 253 253 253 253
39348 -253 253 253 253 253 253 253 253 253 253 253 253
39349 -253 253 253 253 253 253 253 253 253 253 253 253
39350 -253 253 253 253 253 253 253 253 253 246 230 190
39351 -238 204 91 238 204 91 181 142 44 37 26 9
39352 - 2 2 6 2 2 6 2 2 6 2 2 6
39353 - 2 2 6 2 2 6 38 38 38 46 46 46
39354 - 26 26 26 106 106 106 54 54 54 18 18 18
39355 - 6 6 6 0 0 0 0 0 0 0 0 0
39356 - 0 0 0 0 0 0 0 0 0 0 0 0
39357 - 0 0 0 0 0 0 0 0 0 0 0 0
39358 - 0 0 0 0 0 0 0 0 0 0 0 0
39359 - 0 0 0 0 0 0 0 0 0 0 0 0
39360 - 0 0 0 6 6 6 14 14 14 22 22 22
39361 - 30 30 30 38 38 38 50 50 50 70 70 70
39362 -106 106 106 190 142 34 226 170 11 242 186 14
39363 -246 190 14 246 190 14 246 190 14 154 114 10
39364 - 6 6 6 74 74 74 226 226 226 253 253 253
39365 -253 253 253 253 253 253 253 253 253 253 253 253
39366 -253 253 253 253 253 253 231 231 231 250 250 250
39367 -253 253 253 253 253 253 253 253 253 253 253 253
39368 -253 253 253 253 253 253 253 253 253 253 253 253
39369 -253 253 253 253 253 253 253 253 253 253 253 253
39370 -253 253 253 253 253 253 253 253 253 228 184 62
39371 -241 196 14 241 208 19 232 195 16 38 30 10
39372 - 2 2 6 2 2 6 2 2 6 2 2 6
39373 - 2 2 6 6 6 6 30 30 30 26 26 26
39374 -203 166 17 154 142 90 66 66 66 26 26 26
39375 - 6 6 6 0 0 0 0 0 0 0 0 0
39376 - 0 0 0 0 0 0 0 0 0 0 0 0
39377 - 0 0 0 0 0 0 0 0 0 0 0 0
39378 - 0 0 0 0 0 0 0 0 0 0 0 0
39379 - 0 0 0 0 0 0 0 0 0 0 0 0
39380 - 6 6 6 18 18 18 38 38 38 58 58 58
39381 - 78 78 78 86 86 86 101 101 101 123 123 123
39382 -175 146 61 210 150 10 234 174 13 246 186 14
39383 -246 190 14 246 190 14 246 190 14 238 190 10
39384 -102 78 10 2 2 6 46 46 46 198 198 198
39385 -253 253 253 253 253 253 253 253 253 253 253 253
39386 -253 253 253 253 253 253 234 234 234 242 242 242
39387 -253 253 253 253 253 253 253 253 253 253 253 253
39388 -253 253 253 253 253 253 253 253 253 253 253 253
39389 -253 253 253 253 253 253 253 253 253 253 253 253
39390 -253 253 253 253 253 253 253 253 253 224 178 62
39391 -242 186 14 241 196 14 210 166 10 22 18 6
39392 - 2 2 6 2 2 6 2 2 6 2 2 6
39393 - 2 2 6 2 2 6 6 6 6 121 92 8
39394 -238 202 15 232 195 16 82 82 82 34 34 34
39395 - 10 10 10 0 0 0 0 0 0 0 0 0
39396 - 0 0 0 0 0 0 0 0 0 0 0 0
39397 - 0 0 0 0 0 0 0 0 0 0 0 0
39398 - 0 0 0 0 0 0 0 0 0 0 0 0
39399 - 0 0 0 0 0 0 0 0 0 0 0 0
39400 - 14 14 14 38 38 38 70 70 70 154 122 46
39401 -190 142 34 200 144 11 197 138 11 197 138 11
39402 -213 154 11 226 170 11 242 186 14 246 190 14
39403 -246 190 14 246 190 14 246 190 14 246 190 14
39404 -225 175 15 46 32 6 2 2 6 22 22 22
39405 -158 158 158 250 250 250 253 253 253 253 253 253
39406 -253 253 253 253 253 253 253 253 253 253 253 253
39407 -253 253 253 253 253 253 253 253 253 253 253 253
39408 -253 253 253 253 253 253 253 253 253 253 253 253
39409 -253 253 253 253 253 253 253 253 253 253 253 253
39410 -253 253 253 250 250 250 242 242 242 224 178 62
39411 -239 182 13 236 186 11 213 154 11 46 32 6
39412 - 2 2 6 2 2 6 2 2 6 2 2 6
39413 - 2 2 6 2 2 6 61 42 6 225 175 15
39414 -238 190 10 236 186 11 112 100 78 42 42 42
39415 - 14 14 14 0 0 0 0 0 0 0 0 0
39416 - 0 0 0 0 0 0 0 0 0 0 0 0
39417 - 0 0 0 0 0 0 0 0 0 0 0 0
39418 - 0 0 0 0 0 0 0 0 0 0 0 0
39419 - 0 0 0 0 0 0 0 0 0 6 6 6
39420 - 22 22 22 54 54 54 154 122 46 213 154 11
39421 -226 170 11 230 174 11 226 170 11 226 170 11
39422 -236 178 12 242 186 14 246 190 14 246 190 14
39423 -246 190 14 246 190 14 246 190 14 246 190 14
39424 -241 196 14 184 144 12 10 10 10 2 2 6
39425 - 6 6 6 116 116 116 242 242 242 253 253 253
39426 -253 253 253 253 253 253 253 253 253 253 253 253
39427 -253 253 253 253 253 253 253 253 253 253 253 253
39428 -253 253 253 253 253 253 253 253 253 253 253 253
39429 -253 253 253 253 253 253 253 253 253 253 253 253
39430 -253 253 253 231 231 231 198 198 198 214 170 54
39431 -236 178 12 236 178 12 210 150 10 137 92 6
39432 - 18 14 6 2 2 6 2 2 6 2 2 6
39433 - 6 6 6 70 47 6 200 144 11 236 178 12
39434 -239 182 13 239 182 13 124 112 88 58 58 58
39435 - 22 22 22 6 6 6 0 0 0 0 0 0
39436 - 0 0 0 0 0 0 0 0 0 0 0 0
39437 - 0 0 0 0 0 0 0 0 0 0 0 0
39438 - 0 0 0 0 0 0 0 0 0 0 0 0
39439 - 0 0 0 0 0 0 0 0 0 10 10 10
39440 - 30 30 30 70 70 70 180 133 36 226 170 11
39441 -239 182 13 242 186 14 242 186 14 246 186 14
39442 -246 190 14 246 190 14 246 190 14 246 190 14
39443 -246 190 14 246 190 14 246 190 14 246 190 14
39444 -246 190 14 232 195 16 98 70 6 2 2 6
39445 - 2 2 6 2 2 6 66 66 66 221 221 221
39446 -253 253 253 253 253 253 253 253 253 253 253 253
39447 -253 253 253 253 253 253 253 253 253 253 253 253
39448 -253 253 253 253 253 253 253 253 253 253 253 253
39449 -253 253 253 253 253 253 253 253 253 253 253 253
39450 -253 253 253 206 206 206 198 198 198 214 166 58
39451 -230 174 11 230 174 11 216 158 10 192 133 9
39452 -163 110 8 116 81 8 102 78 10 116 81 8
39453 -167 114 7 197 138 11 226 170 11 239 182 13
39454 -242 186 14 242 186 14 162 146 94 78 78 78
39455 - 34 34 34 14 14 14 6 6 6 0 0 0
39456 - 0 0 0 0 0 0 0 0 0 0 0 0
39457 - 0 0 0 0 0 0 0 0 0 0 0 0
39458 - 0 0 0 0 0 0 0 0 0 0 0 0
39459 - 0 0 0 0 0 0 0 0 0 6 6 6
39460 - 30 30 30 78 78 78 190 142 34 226 170 11
39461 -239 182 13 246 190 14 246 190 14 246 190 14
39462 -246 190 14 246 190 14 246 190 14 246 190 14
39463 -246 190 14 246 190 14 246 190 14 246 190 14
39464 -246 190 14 241 196 14 203 166 17 22 18 6
39465 - 2 2 6 2 2 6 2 2 6 38 38 38
39466 -218 218 218 253 253 253 253 253 253 253 253 253
39467 -253 253 253 253 253 253 253 253 253 253 253 253
39468 -253 253 253 253 253 253 253 253 253 253 253 253
39469 -253 253 253 253 253 253 253 253 253 253 253 253
39470 -250 250 250 206 206 206 198 198 198 202 162 69
39471 -226 170 11 236 178 12 224 166 10 210 150 10
39472 -200 144 11 197 138 11 192 133 9 197 138 11
39473 -210 150 10 226 170 11 242 186 14 246 190 14
39474 -246 190 14 246 186 14 225 175 15 124 112 88
39475 - 62 62 62 30 30 30 14 14 14 6 6 6
39476 - 0 0 0 0 0 0 0 0 0 0 0 0
39477 - 0 0 0 0 0 0 0 0 0 0 0 0
39478 - 0 0 0 0 0 0 0 0 0 0 0 0
39479 - 0 0 0 0 0 0 0 0 0 10 10 10
39480 - 30 30 30 78 78 78 174 135 50 224 166 10
39481 -239 182 13 246 190 14 246 190 14 246 190 14
39482 -246 190 14 246 190 14 246 190 14 246 190 14
39483 -246 190 14 246 190 14 246 190 14 246 190 14
39484 -246 190 14 246 190 14 241 196 14 139 102 15
39485 - 2 2 6 2 2 6 2 2 6 2 2 6
39486 - 78 78 78 250 250 250 253 253 253 253 253 253
39487 -253 253 253 253 253 253 253 253 253 253 253 253
39488 -253 253 253 253 253 253 253 253 253 253 253 253
39489 -253 253 253 253 253 253 253 253 253 253 253 253
39490 -250 250 250 214 214 214 198 198 198 190 150 46
39491 -219 162 10 236 178 12 234 174 13 224 166 10
39492 -216 158 10 213 154 11 213 154 11 216 158 10
39493 -226 170 11 239 182 13 246 190 14 246 190 14
39494 -246 190 14 246 190 14 242 186 14 206 162 42
39495 -101 101 101 58 58 58 30 30 30 14 14 14
39496 - 6 6 6 0 0 0 0 0 0 0 0 0
39497 - 0 0 0 0 0 0 0 0 0 0 0 0
39498 - 0 0 0 0 0 0 0 0 0 0 0 0
39499 - 0 0 0 0 0 0 0 0 0 10 10 10
39500 - 30 30 30 74 74 74 174 135 50 216 158 10
39501 -236 178 12 246 190 14 246 190 14 246 190 14
39502 -246 190 14 246 190 14 246 190 14 246 190 14
39503 -246 190 14 246 190 14 246 190 14 246 190 14
39504 -246 190 14 246 190 14 241 196 14 226 184 13
39505 - 61 42 6 2 2 6 2 2 6 2 2 6
39506 - 22 22 22 238 238 238 253 253 253 253 253 253
39507 -253 253 253 253 253 253 253 253 253 253 253 253
39508 -253 253 253 253 253 253 253 253 253 253 253 253
39509 -253 253 253 253 253 253 253 253 253 253 253 253
39510 -253 253 253 226 226 226 187 187 187 180 133 36
39511 -216 158 10 236 178 12 239 182 13 236 178 12
39512 -230 174 11 226 170 11 226 170 11 230 174 11
39513 -236 178 12 242 186 14 246 190 14 246 190 14
39514 -246 190 14 246 190 14 246 186 14 239 182 13
39515 -206 162 42 106 106 106 66 66 66 34 34 34
39516 - 14 14 14 6 6 6 0 0 0 0 0 0
39517 - 0 0 0 0 0 0 0 0 0 0 0 0
39518 - 0 0 0 0 0 0 0 0 0 0 0 0
39519 - 0 0 0 0 0 0 0 0 0 6 6 6
39520 - 26 26 26 70 70 70 163 133 67 213 154 11
39521 -236 178 12 246 190 14 246 190 14 246 190 14
39522 -246 190 14 246 190 14 246 190 14 246 190 14
39523 -246 190 14 246 190 14 246 190 14 246 190 14
39524 -246 190 14 246 190 14 246 190 14 241 196 14
39525 -190 146 13 18 14 6 2 2 6 2 2 6
39526 - 46 46 46 246 246 246 253 253 253 253 253 253
39527 -253 253 253 253 253 253 253 253 253 253 253 253
39528 -253 253 253 253 253 253 253 253 253 253 253 253
39529 -253 253 253 253 253 253 253 253 253 253 253 253
39530 -253 253 253 221 221 221 86 86 86 156 107 11
39531 -216 158 10 236 178 12 242 186 14 246 186 14
39532 -242 186 14 239 182 13 239 182 13 242 186 14
39533 -242 186 14 246 186 14 246 190 14 246 190 14
39534 -246 190 14 246 190 14 246 190 14 246 190 14
39535 -242 186 14 225 175 15 142 122 72 66 66 66
39536 - 30 30 30 10 10 10 0 0 0 0 0 0
39537 - 0 0 0 0 0 0 0 0 0 0 0 0
39538 - 0 0 0 0 0 0 0 0 0 0 0 0
39539 - 0 0 0 0 0 0 0 0 0 6 6 6
39540 - 26 26 26 70 70 70 163 133 67 210 150 10
39541 -236 178 12 246 190 14 246 190 14 246 190 14
39542 -246 190 14 246 190 14 246 190 14 246 190 14
39543 -246 190 14 246 190 14 246 190 14 246 190 14
39544 -246 190 14 246 190 14 246 190 14 246 190 14
39545 -232 195 16 121 92 8 34 34 34 106 106 106
39546 -221 221 221 253 253 253 253 253 253 253 253 253
39547 -253 253 253 253 253 253 253 253 253 253 253 253
39548 -253 253 253 253 253 253 253 253 253 253 253 253
39549 -253 253 253 253 253 253 253 253 253 253 253 253
39550 -242 242 242 82 82 82 18 14 6 163 110 8
39551 -216 158 10 236 178 12 242 186 14 246 190 14
39552 -246 190 14 246 190 14 246 190 14 246 190 14
39553 -246 190 14 246 190 14 246 190 14 246 190 14
39554 -246 190 14 246 190 14 246 190 14 246 190 14
39555 -246 190 14 246 190 14 242 186 14 163 133 67
39556 - 46 46 46 18 18 18 6 6 6 0 0 0
39557 - 0 0 0 0 0 0 0 0 0 0 0 0
39558 - 0 0 0 0 0 0 0 0 0 0 0 0
39559 - 0 0 0 0 0 0 0 0 0 10 10 10
39560 - 30 30 30 78 78 78 163 133 67 210 150 10
39561 -236 178 12 246 186 14 246 190 14 246 190 14
39562 -246 190 14 246 190 14 246 190 14 246 190 14
39563 -246 190 14 246 190 14 246 190 14 246 190 14
39564 -246 190 14 246 190 14 246 190 14 246 190 14
39565 -241 196 14 215 174 15 190 178 144 253 253 253
39566 -253 253 253 253 253 253 253 253 253 253 253 253
39567 -253 253 253 253 253 253 253 253 253 253 253 253
39568 -253 253 253 253 253 253 253 253 253 253 253 253
39569 -253 253 253 253 253 253 253 253 253 218 218 218
39570 - 58 58 58 2 2 6 22 18 6 167 114 7
39571 -216 158 10 236 178 12 246 186 14 246 190 14
39572 -246 190 14 246 190 14 246 190 14 246 190 14
39573 -246 190 14 246 190 14 246 190 14 246 190 14
39574 -246 190 14 246 190 14 246 190 14 246 190 14
39575 -246 190 14 246 186 14 242 186 14 190 150 46
39576 - 54 54 54 22 22 22 6 6 6 0 0 0
39577 - 0 0 0 0 0 0 0 0 0 0 0 0
39578 - 0 0 0 0 0 0 0 0 0 0 0 0
39579 - 0 0 0 0 0 0 0 0 0 14 14 14
39580 - 38 38 38 86 86 86 180 133 36 213 154 11
39581 -236 178 12 246 186 14 246 190 14 246 190 14
39582 -246 190 14 246 190 14 246 190 14 246 190 14
39583 -246 190 14 246 190 14 246 190 14 246 190 14
39584 -246 190 14 246 190 14 246 190 14 246 190 14
39585 -246 190 14 232 195 16 190 146 13 214 214 214
39586 -253 253 253 253 253 253 253 253 253 253 253 253
39587 -253 253 253 253 253 253 253 253 253 253 253 253
39588 -253 253 253 253 253 253 253 253 253 253 253 253
39589 -253 253 253 250 250 250 170 170 170 26 26 26
39590 - 2 2 6 2 2 6 37 26 9 163 110 8
39591 -219 162 10 239 182 13 246 186 14 246 190 14
39592 -246 190 14 246 190 14 246 190 14 246 190 14
39593 -246 190 14 246 190 14 246 190 14 246 190 14
39594 -246 190 14 246 190 14 246 190 14 246 190 14
39595 -246 186 14 236 178 12 224 166 10 142 122 72
39596 - 46 46 46 18 18 18 6 6 6 0 0 0
39597 - 0 0 0 0 0 0 0 0 0 0 0 0
39598 - 0 0 0 0 0 0 0 0 0 0 0 0
39599 - 0 0 0 0 0 0 6 6 6 18 18 18
39600 - 50 50 50 109 106 95 192 133 9 224 166 10
39601 -242 186 14 246 190 14 246 190 14 246 190 14
39602 -246 190 14 246 190 14 246 190 14 246 190 14
39603 -246 190 14 246 190 14 246 190 14 246 190 14
39604 -246 190 14 246 190 14 246 190 14 246 190 14
39605 -242 186 14 226 184 13 210 162 10 142 110 46
39606 -226 226 226 253 253 253 253 253 253 253 253 253
39607 -253 253 253 253 253 253 253 253 253 253 253 253
39608 -253 253 253 253 253 253 253 253 253 253 253 253
39609 -198 198 198 66 66 66 2 2 6 2 2 6
39610 - 2 2 6 2 2 6 50 34 6 156 107 11
39611 -219 162 10 239 182 13 246 186 14 246 190 14
39612 -246 190 14 246 190 14 246 190 14 246 190 14
39613 -246 190 14 246 190 14 246 190 14 246 190 14
39614 -246 190 14 246 190 14 246 190 14 242 186 14
39615 -234 174 13 213 154 11 154 122 46 66 66 66
39616 - 30 30 30 10 10 10 0 0 0 0 0 0
39617 - 0 0 0 0 0 0 0 0 0 0 0 0
39618 - 0 0 0 0 0 0 0 0 0 0 0 0
39619 - 0 0 0 0 0 0 6 6 6 22 22 22
39620 - 58 58 58 154 121 60 206 145 10 234 174 13
39621 -242 186 14 246 186 14 246 190 14 246 190 14
39622 -246 190 14 246 190 14 246 190 14 246 190 14
39623 -246 190 14 246 190 14 246 190 14 246 190 14
39624 -246 190 14 246 190 14 246 190 14 246 190 14
39625 -246 186 14 236 178 12 210 162 10 163 110 8
39626 - 61 42 6 138 138 138 218 218 218 250 250 250
39627 -253 253 253 253 253 253 253 253 253 250 250 250
39628 -242 242 242 210 210 210 144 144 144 66 66 66
39629 - 6 6 6 2 2 6 2 2 6 2 2 6
39630 - 2 2 6 2 2 6 61 42 6 163 110 8
39631 -216 158 10 236 178 12 246 190 14 246 190 14
39632 -246 190 14 246 190 14 246 190 14 246 190 14
39633 -246 190 14 246 190 14 246 190 14 246 190 14
39634 -246 190 14 239 182 13 230 174 11 216 158 10
39635 -190 142 34 124 112 88 70 70 70 38 38 38
39636 - 18 18 18 6 6 6 0 0 0 0 0 0
39637 - 0 0 0 0 0 0 0 0 0 0 0 0
39638 - 0 0 0 0 0 0 0 0 0 0 0 0
39639 - 0 0 0 0 0 0 6 6 6 22 22 22
39640 - 62 62 62 168 124 44 206 145 10 224 166 10
39641 -236 178 12 239 182 13 242 186 14 242 186 14
39642 -246 186 14 246 190 14 246 190 14 246 190 14
39643 -246 190 14 246 190 14 246 190 14 246 190 14
39644 -246 190 14 246 190 14 246 190 14 246 190 14
39645 -246 190 14 236 178 12 216 158 10 175 118 6
39646 - 80 54 7 2 2 6 6 6 6 30 30 30
39647 - 54 54 54 62 62 62 50 50 50 38 38 38
39648 - 14 14 14 2 2 6 2 2 6 2 2 6
39649 - 2 2 6 2 2 6 2 2 6 2 2 6
39650 - 2 2 6 6 6 6 80 54 7 167 114 7
39651 -213 154 11 236 178 12 246 190 14 246 190 14
39652 -246 190 14 246 190 14 246 190 14 246 190 14
39653 -246 190 14 242 186 14 239 182 13 239 182 13
39654 -230 174 11 210 150 10 174 135 50 124 112 88
39655 - 82 82 82 54 54 54 34 34 34 18 18 18
39656 - 6 6 6 0 0 0 0 0 0 0 0 0
39657 - 0 0 0 0 0 0 0 0 0 0 0 0
39658 - 0 0 0 0 0 0 0 0 0 0 0 0
39659 - 0 0 0 0 0 0 6 6 6 18 18 18
39660 - 50 50 50 158 118 36 192 133 9 200 144 11
39661 -216 158 10 219 162 10 224 166 10 226 170 11
39662 -230 174 11 236 178 12 239 182 13 239 182 13
39663 -242 186 14 246 186 14 246 190 14 246 190 14
39664 -246 190 14 246 190 14 246 190 14 246 190 14
39665 -246 186 14 230 174 11 210 150 10 163 110 8
39666 -104 69 6 10 10 10 2 2 6 2 2 6
39667 - 2 2 6 2 2 6 2 2 6 2 2 6
39668 - 2 2 6 2 2 6 2 2 6 2 2 6
39669 - 2 2 6 2 2 6 2 2 6 2 2 6
39670 - 2 2 6 6 6 6 91 60 6 167 114 7
39671 -206 145 10 230 174 11 242 186 14 246 190 14
39672 -246 190 14 246 190 14 246 186 14 242 186 14
39673 -239 182 13 230 174 11 224 166 10 213 154 11
39674 -180 133 36 124 112 88 86 86 86 58 58 58
39675 - 38 38 38 22 22 22 10 10 10 6 6 6
39676 - 0 0 0 0 0 0 0 0 0 0 0 0
39677 - 0 0 0 0 0 0 0 0 0 0 0 0
39678 - 0 0 0 0 0 0 0 0 0 0 0 0
39679 - 0 0 0 0 0 0 0 0 0 14 14 14
39680 - 34 34 34 70 70 70 138 110 50 158 118 36
39681 -167 114 7 180 123 7 192 133 9 197 138 11
39682 -200 144 11 206 145 10 213 154 11 219 162 10
39683 -224 166 10 230 174 11 239 182 13 242 186 14
39684 -246 186 14 246 186 14 246 186 14 246 186 14
39685 -239 182 13 216 158 10 185 133 11 152 99 6
39686 -104 69 6 18 14 6 2 2 6 2 2 6
39687 - 2 2 6 2 2 6 2 2 6 2 2 6
39688 - 2 2 6 2 2 6 2 2 6 2 2 6
39689 - 2 2 6 2 2 6 2 2 6 2 2 6
39690 - 2 2 6 6 6 6 80 54 7 152 99 6
39691 -192 133 9 219 162 10 236 178 12 239 182 13
39692 -246 186 14 242 186 14 239 182 13 236 178 12
39693 -224 166 10 206 145 10 192 133 9 154 121 60
39694 - 94 94 94 62 62 62 42 42 42 22 22 22
39695 - 14 14 14 6 6 6 0 0 0 0 0 0
39696 - 0 0 0 0 0 0 0 0 0 0 0 0
39697 - 0 0 0 0 0 0 0 0 0 0 0 0
39698 - 0 0 0 0 0 0 0 0 0 0 0 0
39699 - 0 0 0 0 0 0 0 0 0 6 6 6
39700 - 18 18 18 34 34 34 58 58 58 78 78 78
39701 -101 98 89 124 112 88 142 110 46 156 107 11
39702 -163 110 8 167 114 7 175 118 6 180 123 7
39703 -185 133 11 197 138 11 210 150 10 219 162 10
39704 -226 170 11 236 178 12 236 178 12 234 174 13
39705 -219 162 10 197 138 11 163 110 8 130 83 6
39706 - 91 60 6 10 10 10 2 2 6 2 2 6
39707 - 18 18 18 38 38 38 38 38 38 38 38 38
39708 - 38 38 38 38 38 38 38 38 38 38 38 38
39709 - 38 38 38 38 38 38 26 26 26 2 2 6
39710 - 2 2 6 6 6 6 70 47 6 137 92 6
39711 -175 118 6 200 144 11 219 162 10 230 174 11
39712 -234 174 13 230 174 11 219 162 10 210 150 10
39713 -192 133 9 163 110 8 124 112 88 82 82 82
39714 - 50 50 50 30 30 30 14 14 14 6 6 6
39715 - 0 0 0 0 0 0 0 0 0 0 0 0
39716 - 0 0 0 0 0 0 0 0 0 0 0 0
39717 - 0 0 0 0 0 0 0 0 0 0 0 0
39718 - 0 0 0 0 0 0 0 0 0 0 0 0
39719 - 0 0 0 0 0 0 0 0 0 0 0 0
39720 - 6 6 6 14 14 14 22 22 22 34 34 34
39721 - 42 42 42 58 58 58 74 74 74 86 86 86
39722 -101 98 89 122 102 70 130 98 46 121 87 25
39723 -137 92 6 152 99 6 163 110 8 180 123 7
39724 -185 133 11 197 138 11 206 145 10 200 144 11
39725 -180 123 7 156 107 11 130 83 6 104 69 6
39726 - 50 34 6 54 54 54 110 110 110 101 98 89
39727 - 86 86 86 82 82 82 78 78 78 78 78 78
39728 - 78 78 78 78 78 78 78 78 78 78 78 78
39729 - 78 78 78 82 82 82 86 86 86 94 94 94
39730 -106 106 106 101 101 101 86 66 34 124 80 6
39731 -156 107 11 180 123 7 192 133 9 200 144 11
39732 -206 145 10 200 144 11 192 133 9 175 118 6
39733 -139 102 15 109 106 95 70 70 70 42 42 42
39734 - 22 22 22 10 10 10 0 0 0 0 0 0
39735 - 0 0 0 0 0 0 0 0 0 0 0 0
39736 - 0 0 0 0 0 0 0 0 0 0 0 0
39737 - 0 0 0 0 0 0 0 0 0 0 0 0
39738 - 0 0 0 0 0 0 0 0 0 0 0 0
39739 - 0 0 0 0 0 0 0 0 0 0 0 0
39740 - 0 0 0 0 0 0 6 6 6 10 10 10
39741 - 14 14 14 22 22 22 30 30 30 38 38 38
39742 - 50 50 50 62 62 62 74 74 74 90 90 90
39743 -101 98 89 112 100 78 121 87 25 124 80 6
39744 -137 92 6 152 99 6 152 99 6 152 99 6
39745 -138 86 6 124 80 6 98 70 6 86 66 30
39746 -101 98 89 82 82 82 58 58 58 46 46 46
39747 - 38 38 38 34 34 34 34 34 34 34 34 34
39748 - 34 34 34 34 34 34 34 34 34 34 34 34
39749 - 34 34 34 34 34 34 38 38 38 42 42 42
39750 - 54 54 54 82 82 82 94 86 76 91 60 6
39751 -134 86 6 156 107 11 167 114 7 175 118 6
39752 -175 118 6 167 114 7 152 99 6 121 87 25
39753 -101 98 89 62 62 62 34 34 34 18 18 18
39754 - 6 6 6 0 0 0 0 0 0 0 0 0
39755 - 0 0 0 0 0 0 0 0 0 0 0 0
39756 - 0 0 0 0 0 0 0 0 0 0 0 0
39757 - 0 0 0 0 0 0 0 0 0 0 0 0
39758 - 0 0 0 0 0 0 0 0 0 0 0 0
39759 - 0 0 0 0 0 0 0 0 0 0 0 0
39760 - 0 0 0 0 0 0 0 0 0 0 0 0
39761 - 0 0 0 6 6 6 6 6 6 10 10 10
39762 - 18 18 18 22 22 22 30 30 30 42 42 42
39763 - 50 50 50 66 66 66 86 86 86 101 98 89
39764 -106 86 58 98 70 6 104 69 6 104 69 6
39765 -104 69 6 91 60 6 82 62 34 90 90 90
39766 - 62 62 62 38 38 38 22 22 22 14 14 14
39767 - 10 10 10 10 10 10 10 10 10 10 10 10
39768 - 10 10 10 10 10 10 6 6 6 10 10 10
39769 - 10 10 10 10 10 10 10 10 10 14 14 14
39770 - 22 22 22 42 42 42 70 70 70 89 81 66
39771 - 80 54 7 104 69 6 124 80 6 137 92 6
39772 -134 86 6 116 81 8 100 82 52 86 86 86
39773 - 58 58 58 30 30 30 14 14 14 6 6 6
39774 - 0 0 0 0 0 0 0 0 0 0 0 0
39775 - 0 0 0 0 0 0 0 0 0 0 0 0
39776 - 0 0 0 0 0 0 0 0 0 0 0 0
39777 - 0 0 0 0 0 0 0 0 0 0 0 0
39778 - 0 0 0 0 0 0 0 0 0 0 0 0
39779 - 0 0 0 0 0 0 0 0 0 0 0 0
39780 - 0 0 0 0 0 0 0 0 0 0 0 0
39781 - 0 0 0 0 0 0 0 0 0 0 0 0
39782 - 0 0 0 6 6 6 10 10 10 14 14 14
39783 - 18 18 18 26 26 26 38 38 38 54 54 54
39784 - 70 70 70 86 86 86 94 86 76 89 81 66
39785 - 89 81 66 86 86 86 74 74 74 50 50 50
39786 - 30 30 30 14 14 14 6 6 6 0 0 0
39787 - 0 0 0 0 0 0 0 0 0 0 0 0
39788 - 0 0 0 0 0 0 0 0 0 0 0 0
39789 - 0 0 0 0 0 0 0 0 0 0 0 0
39790 - 6 6 6 18 18 18 34 34 34 58 58 58
39791 - 82 82 82 89 81 66 89 81 66 89 81 66
39792 - 94 86 66 94 86 76 74 74 74 50 50 50
39793 - 26 26 26 14 14 14 6 6 6 0 0 0
39794 - 0 0 0 0 0 0 0 0 0 0 0 0
39795 - 0 0 0 0 0 0 0 0 0 0 0 0
39796 - 0 0 0 0 0 0 0 0 0 0 0 0
39797 - 0 0 0 0 0 0 0 0 0 0 0 0
39798 - 0 0 0 0 0 0 0 0 0 0 0 0
39799 - 0 0 0 0 0 0 0 0 0 0 0 0
39800 - 0 0 0 0 0 0 0 0 0 0 0 0
39801 - 0 0 0 0 0 0 0 0 0 0 0 0
39802 - 0 0 0 0 0 0 0 0 0 0 0 0
39803 - 6 6 6 6 6 6 14 14 14 18 18 18
39804 - 30 30 30 38 38 38 46 46 46 54 54 54
39805 - 50 50 50 42 42 42 30 30 30 18 18 18
39806 - 10 10 10 0 0 0 0 0 0 0 0 0
39807 - 0 0 0 0 0 0 0 0 0 0 0 0
39808 - 0 0 0 0 0 0 0 0 0 0 0 0
39809 - 0 0 0 0 0 0 0 0 0 0 0 0
39810 - 0 0 0 6 6 6 14 14 14 26 26 26
39811 - 38 38 38 50 50 50 58 58 58 58 58 58
39812 - 54 54 54 42 42 42 30 30 30 18 18 18
39813 - 10 10 10 0 0 0 0 0 0 0 0 0
39814 - 0 0 0 0 0 0 0 0 0 0 0 0
39815 - 0 0 0 0 0 0 0 0 0 0 0 0
39816 - 0 0 0 0 0 0 0 0 0 0 0 0
39817 - 0 0 0 0 0 0 0 0 0 0 0 0
39818 - 0 0 0 0 0 0 0 0 0 0 0 0
39819 - 0 0 0 0 0 0 0 0 0 0 0 0
39820 - 0 0 0 0 0 0 0 0 0 0 0 0
39821 - 0 0 0 0 0 0 0 0 0 0 0 0
39822 - 0 0 0 0 0 0 0 0 0 0 0 0
39823 - 0 0 0 0 0 0 0 0 0 6 6 6
39824 - 6 6 6 10 10 10 14 14 14 18 18 18
39825 - 18 18 18 14 14 14 10 10 10 6 6 6
39826 - 0 0 0 0 0 0 0 0 0 0 0 0
39827 - 0 0 0 0 0 0 0 0 0 0 0 0
39828 - 0 0 0 0 0 0 0 0 0 0 0 0
39829 - 0 0 0 0 0 0 0 0 0 0 0 0
39830 - 0 0 0 0 0 0 0 0 0 6 6 6
39831 - 14 14 14 18 18 18 22 22 22 22 22 22
39832 - 18 18 18 14 14 14 10 10 10 6 6 6
39833 - 0 0 0 0 0 0 0 0 0 0 0 0
39834 - 0 0 0 0 0 0 0 0 0 0 0 0
39835 - 0 0 0 0 0 0 0 0 0 0 0 0
39836 - 0 0 0 0 0 0 0 0 0 0 0 0
39837 - 0 0 0 0 0 0 0 0 0 0 0 0
39838 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39839 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39840 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39851 +4 4 4 4 4 4
39852 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39853 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39854 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39865 +4 4 4 4 4 4
39866 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39867 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39868 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39879 +4 4 4 4 4 4
39880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39882 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39893 +4 4 4 4 4 4
39894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39907 +4 4 4 4 4 4
39908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39921 +4 4 4 4 4 4
39922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39926 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39927 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39931 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39932 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39933 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39935 +4 4 4 4 4 4
39936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39940 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
39941 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
39942 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39945 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
39946 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
39947 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
39948 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39949 +4 4 4 4 4 4
39950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39954 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
39955 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
39956 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39959 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
39960 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
39961 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
39962 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
39963 +4 4 4 4 4 4
39964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39967 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
39968 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
39969 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
39970 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
39971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39972 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39973 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
39974 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
39975 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
39976 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
39977 +4 4 4 4 4 4
39978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39981 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
39982 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
39983 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
39984 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
39985 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39986 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
39987 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
39988 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
39989 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
39990 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
39991 +4 4 4 4 4 4
39992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39995 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
39996 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
39997 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
39998 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
39999 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40000 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40001 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40002 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40003 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40004 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40005 +4 4 4 4 4 4
40006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40008 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40009 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40010 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40011 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40012 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40013 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40014 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40015 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40016 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40017 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40018 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40019 +4 4 4 4 4 4
40020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40022 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40023 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40024 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40025 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40026 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40027 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40028 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40029 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40030 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40031 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40032 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40033 +4 4 4 4 4 4
40034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40036 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40037 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40038 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40039 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40040 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40041 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40042 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40043 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40044 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40045 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40046 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40047 +4 4 4 4 4 4
40048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40050 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40051 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40052 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40053 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40054 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40055 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40056 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40057 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40058 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40059 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40060 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40061 +4 4 4 4 4 4
40062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40063 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40064 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40065 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40066 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40067 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40068 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40069 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40070 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40071 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40072 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40073 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40074 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40075 +4 4 4 4 4 4
40076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40077 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40078 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40079 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40080 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40081 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40082 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40083 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40084 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40085 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40086 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40087 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40088 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40089 +0 0 0 4 4 4
40090 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40091 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40092 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40093 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40094 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40095 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40096 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40097 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40098 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40099 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40100 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40101 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40102 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40103 +2 0 0 0 0 0
40104 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40105 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40106 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40107 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40108 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40109 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40110 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40111 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40112 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40113 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40114 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40115 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40116 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40117 +37 38 37 0 0 0
40118 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40119 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40120 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40121 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40122 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40123 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40124 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40125 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40126 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40127 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40128 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40129 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40130 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40131 +85 115 134 4 0 0
40132 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40133 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40134 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40135 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40136 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40137 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40138 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40139 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40140 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40141 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40142 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40143 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40144 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40145 +60 73 81 4 0 0
40146 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40147 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40148 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40149 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40150 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40151 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40152 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40153 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40154 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40155 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40156 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40157 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40158 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40159 +16 19 21 4 0 0
40160 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40161 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40162 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40163 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40164 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40165 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40166 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40167 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40168 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40169 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40170 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40171 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40172 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40173 +4 0 0 4 3 3
40174 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40175 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40176 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40178 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40179 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40180 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40181 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40182 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40183 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40184 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40185 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40186 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40187 +3 2 2 4 4 4
40188 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40189 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40190 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40191 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40192 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40193 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40194 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40195 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40196 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40197 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40198 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40199 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40200 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40201 +4 4 4 4 4 4
40202 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40203 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40204 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40205 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40206 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40207 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40208 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40209 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40210 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40211 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40212 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40213 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40214 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40215 +4 4 4 4 4 4
40216 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40217 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40218 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40219 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40220 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40221 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40222 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40223 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40224 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40225 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40226 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40227 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40228 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40229 +5 5 5 5 5 5
40230 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40231 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40232 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40233 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40234 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40235 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40236 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40237 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40238 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40239 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40240 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40241 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40242 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40243 +5 5 5 4 4 4
40244 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40245 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40246 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40247 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40248 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40249 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40250 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40251 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40252 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40253 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40254 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40255 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40257 +4 4 4 4 4 4
40258 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40259 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40260 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40261 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40262 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40263 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40264 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40265 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40266 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40267 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40268 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40269 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40271 +4 4 4 4 4 4
40272 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40273 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40274 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40275 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40276 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40277 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40278 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40279 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40280 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40281 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40282 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40285 +4 4 4 4 4 4
40286 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40287 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40288 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40289 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40290 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40291 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40292 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40293 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40294 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40295 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40296 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40299 +4 4 4 4 4 4
40300 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40301 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40302 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40303 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40304 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40305 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40306 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40307 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40308 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40309 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40310 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40313 +4 4 4 4 4 4
40314 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40315 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40316 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40317 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40318 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40319 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40320 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40321 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40322 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40323 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40324 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40327 +4 4 4 4 4 4
40328 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40329 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40330 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40331 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40332 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40333 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40334 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40335 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40336 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40337 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40338 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40341 +4 4 4 4 4 4
40342 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40343 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40344 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40345 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40346 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40347 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40348 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40349 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40350 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40351 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40352 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40355 +4 4 4 4 4 4
40356 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40357 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40358 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40359 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40360 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40361 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40362 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40363 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40364 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40365 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40366 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40369 +4 4 4 4 4 4
40370 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40371 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40372 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40373 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40374 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40375 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40376 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40377 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40378 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40379 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40380 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40383 +4 4 4 4 4 4
40384 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40385 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40386 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40387 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40388 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40389 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40390 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40391 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40392 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40393 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40394 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40395 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40396 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40397 +4 4 4 4 4 4
40398 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40399 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40400 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40401 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40402 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40403 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40404 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40405 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40406 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40407 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40408 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40410 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40411 +4 4 4 4 4 4
40412 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40413 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40414 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40415 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40416 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40417 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40418 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40419 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40420 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40421 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40422 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40425 +4 4 4 4 4 4
40426 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40427 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40428 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40429 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40430 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40431 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40432 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40433 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40434 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40435 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40436 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40439 +4 4 4 4 4 4
40440 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40441 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40442 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40443 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40444 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40445 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40446 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40447 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40448 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40449 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40450 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40453 +4 4 4 4 4 4
40454 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40455 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40456 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40457 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40458 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40459 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40460 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40461 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40462 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40463 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40464 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40467 +4 4 4 4 4 4
40468 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40469 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40470 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40471 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40472 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40473 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40474 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40475 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40476 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40477 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40478 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40481 +4 4 4 4 4 4
40482 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40483 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40484 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40485 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40486 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40487 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40488 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40489 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40490 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40491 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40492 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40495 +4 4 4 4 4 4
40496 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40497 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40498 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40499 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40500 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40501 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40502 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40503 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40504 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40505 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40506 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40509 +4 4 4 4 4 4
40510 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40511 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40512 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40513 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40514 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40515 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40516 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40517 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40518 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40519 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40520 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40523 +4 4 4 4 4 4
40524 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40525 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40526 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40527 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40528 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40529 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40530 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40531 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40532 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40533 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40534 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40537 +4 4 4 4 4 4
40538 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40539 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40540 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40541 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40542 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40543 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40544 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40545 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40546 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40547 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40548 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40550 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40551 +4 4 4 4 4 4
40552 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40553 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40554 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40555 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40556 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40557 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40558 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40559 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40560 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40561 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40562 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40564 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40565 +4 4 4 4 4 4
40566 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40567 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40568 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40569 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40570 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40571 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40572 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40573 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40574 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40575 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40576 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40578 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40579 +4 4 4 4 4 4
40580 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40581 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40582 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40583 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40584 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40585 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40586 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40587 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40588 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40589 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40590 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40592 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40593 +4 4 4 4 4 4
40594 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40595 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40596 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40597 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40598 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40599 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40600 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40601 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40602 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40603 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40604 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40606 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40607 +4 4 4 4 4 4
40608 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40609 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40610 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40611 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40612 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40613 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40614 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40615 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40616 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40617 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40618 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40621 +4 4 4 4 4 4
40622 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40623 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40624 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40625 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40626 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40627 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40628 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40629 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40630 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40631 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40632 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40635 +4 4 4 4 4 4
40636 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40637 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40638 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40639 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40640 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40641 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40642 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40643 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40644 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40645 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40646 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40649 +4 4 4 4 4 4
40650 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40651 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40652 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40653 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40654 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40655 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40656 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40657 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40658 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40659 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40663 +4 4 4 4 4 4
40664 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40665 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40666 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40667 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40668 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40669 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40670 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40671 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40672 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40673 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40677 +4 4 4 4 4 4
40678 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40679 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40680 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40681 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40682 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40683 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40684 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40685 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40686 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40687 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40691 +4 4 4 4 4 4
40692 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40693 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40694 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40695 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40696 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40697 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40698 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40699 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40700 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40701 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40705 +4 4 4 4 4 4
40706 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40707 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40708 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40709 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40710 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40711 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40712 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40713 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40714 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40719 +4 4 4 4 4 4
40720 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40721 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40722 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40723 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40724 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40725 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40726 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40727 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40728 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40733 +4 4 4 4 4 4
40734 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40735 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40736 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40737 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40738 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40739 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40740 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40741 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40742 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40747 +4 4 4 4 4 4
40748 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40749 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40750 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40751 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40752 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40753 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40754 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40755 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40756 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40761 +4 4 4 4 4 4
40762 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40763 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40764 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40765 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40766 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40767 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40768 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40769 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40770 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40775 +4 4 4 4 4 4
40776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40777 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40778 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40779 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40780 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40781 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40782 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40783 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40784 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40789 +4 4 4 4 4 4
40790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40791 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40792 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40793 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40794 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40795 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40796 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40797 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40798 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40803 +4 4 4 4 4 4
40804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40805 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40806 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40807 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40808 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40809 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40810 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40811 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40812 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40813 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40817 +4 4 4 4 4 4
40818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40820 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40821 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40822 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40823 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40824 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40825 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40826 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40831 +4 4 4 4 4 4
40832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40835 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40836 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40837 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40838 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40839 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40840 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40845 +4 4 4 4 4 4
40846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40849 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40850 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40851 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40852 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40853 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40854 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40859 +4 4 4 4 4 4
40860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40863 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40864 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40865 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40866 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40867 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40868 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40873 +4 4 4 4 4 4
40874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40877 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40878 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40879 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40880 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40882 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40887 +4 4 4 4 4 4
40888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40892 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40893 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40894 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40901 +4 4 4 4 4 4
40902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40906 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40907 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40908 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40915 +4 4 4 4 4 4
40916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40920 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40921 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40922 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40929 +4 4 4 4 4 4
40930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40934 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
40935 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
40936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40943 +4 4 4 4 4 4
40944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40948 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40949 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
40950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40957 +4 4 4 4 4 4
40958 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
40959 index a159b63..4ab532d 100644
40960 --- a/drivers/video/udlfb.c
40961 +++ b/drivers/video/udlfb.c
40962 @@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
40963 dlfb_urb_completion(urb);
40964
40965 error:
40966 - atomic_add(bytes_sent, &dev->bytes_sent);
40967 - atomic_add(bytes_identical, &dev->bytes_identical);
40968 - atomic_add(width*height*2, &dev->bytes_rendered);
40969 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40970 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40971 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
40972 end_cycles = get_cycles();
40973 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40974 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40975 >> 10)), /* Kcycles */
40976 &dev->cpu_kcycles_used);
40977
40978 @@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
40979 dlfb_urb_completion(urb);
40980
40981 error:
40982 - atomic_add(bytes_sent, &dev->bytes_sent);
40983 - atomic_add(bytes_identical, &dev->bytes_identical);
40984 - atomic_add(bytes_rendered, &dev->bytes_rendered);
40985 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40986 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40987 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
40988 end_cycles = get_cycles();
40989 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40990 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40991 >> 10)), /* Kcycles */
40992 &dev->cpu_kcycles_used);
40993 }
40994 @@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
40995 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40996 struct dlfb_data *dev = fb_info->par;
40997 return snprintf(buf, PAGE_SIZE, "%u\n",
40998 - atomic_read(&dev->bytes_rendered));
40999 + atomic_read_unchecked(&dev->bytes_rendered));
41000 }
41001
41002 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41003 @@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41004 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41005 struct dlfb_data *dev = fb_info->par;
41006 return snprintf(buf, PAGE_SIZE, "%u\n",
41007 - atomic_read(&dev->bytes_identical));
41008 + atomic_read_unchecked(&dev->bytes_identical));
41009 }
41010
41011 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41012 @@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41013 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41014 struct dlfb_data *dev = fb_info->par;
41015 return snprintf(buf, PAGE_SIZE, "%u\n",
41016 - atomic_read(&dev->bytes_sent));
41017 + atomic_read_unchecked(&dev->bytes_sent));
41018 }
41019
41020 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41021 @@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41022 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41023 struct dlfb_data *dev = fb_info->par;
41024 return snprintf(buf, PAGE_SIZE, "%u\n",
41025 - atomic_read(&dev->cpu_kcycles_used));
41026 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41027 }
41028
41029 static ssize_t edid_show(
41030 @@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41031 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41032 struct dlfb_data *dev = fb_info->par;
41033
41034 - atomic_set(&dev->bytes_rendered, 0);
41035 - atomic_set(&dev->bytes_identical, 0);
41036 - atomic_set(&dev->bytes_sent, 0);
41037 - atomic_set(&dev->cpu_kcycles_used, 0);
41038 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41039 + atomic_set_unchecked(&dev->bytes_identical, 0);
41040 + atomic_set_unchecked(&dev->bytes_sent, 0);
41041 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41042
41043 return count;
41044 }
41045 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41046 index b0e2a42..e2df3ad 100644
41047 --- a/drivers/video/uvesafb.c
41048 +++ b/drivers/video/uvesafb.c
41049 @@ -19,6 +19,7 @@
41050 #include <linux/io.h>
41051 #include <linux/mutex.h>
41052 #include <linux/slab.h>
41053 +#include <linux/moduleloader.h>
41054 #include <video/edid.h>
41055 #include <video/uvesafb.h>
41056 #ifdef CONFIG_X86
41057 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41058 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41059 par->pmi_setpal = par->ypan = 0;
41060 } else {
41061 +
41062 +#ifdef CONFIG_PAX_KERNEXEC
41063 +#ifdef CONFIG_MODULES
41064 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41065 +#endif
41066 + if (!par->pmi_code) {
41067 + par->pmi_setpal = par->ypan = 0;
41068 + return 0;
41069 + }
41070 +#endif
41071 +
41072 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41073 + task->t.regs.edi);
41074 +
41075 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41076 + pax_open_kernel();
41077 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41078 + pax_close_kernel();
41079 +
41080 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41081 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41082 +#else
41083 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41084 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41085 +#endif
41086 +
41087 printk(KERN_INFO "uvesafb: protected mode interface info at "
41088 "%04x:%04x\n",
41089 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41090 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41091 par->ypan = ypan;
41092
41093 if (par->pmi_setpal || par->ypan) {
41094 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41095 if (__supported_pte_mask & _PAGE_NX) {
41096 par->pmi_setpal = par->ypan = 0;
41097 printk(KERN_WARNING "uvesafb: NX protection is actively."
41098 "We have better not to use the PMI.\n");
41099 - } else {
41100 + } else
41101 +#endif
41102 uvesafb_vbe_getpmi(task, par);
41103 - }
41104 }
41105 #else
41106 /* The protected mode interface is not available on non-x86. */
41107 @@ -1836,6 +1860,11 @@ out:
41108 if (par->vbe_modes)
41109 kfree(par->vbe_modes);
41110
41111 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41112 + if (par->pmi_code)
41113 + module_free_exec(NULL, par->pmi_code);
41114 +#endif
41115 +
41116 framebuffer_release(info);
41117 return err;
41118 }
41119 @@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
41120 kfree(par->vbe_state_orig);
41121 if (par->vbe_state_saved)
41122 kfree(par->vbe_state_saved);
41123 +
41124 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41125 + if (par->pmi_code)
41126 + module_free_exec(NULL, par->pmi_code);
41127 +#endif
41128 +
41129 }
41130
41131 framebuffer_release(info);
41132 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41133 index 501b340..86bd4cf 100644
41134 --- a/drivers/video/vesafb.c
41135 +++ b/drivers/video/vesafb.c
41136 @@ -9,6 +9,7 @@
41137 */
41138
41139 #include <linux/module.h>
41140 +#include <linux/moduleloader.h>
41141 #include <linux/kernel.h>
41142 #include <linux/errno.h>
41143 #include <linux/string.h>
41144 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41145 static int vram_total __initdata; /* Set total amount of memory */
41146 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41147 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41148 -static void (*pmi_start)(void) __read_mostly;
41149 -static void (*pmi_pal) (void) __read_mostly;
41150 +static void (*pmi_start)(void) __read_only;
41151 +static void (*pmi_pal) (void) __read_only;
41152 static int depth __read_mostly;
41153 static int vga_compat __read_mostly;
41154 /* --------------------------------------------------------------------- */
41155 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41156 unsigned int size_vmode;
41157 unsigned int size_remap;
41158 unsigned int size_total;
41159 + void *pmi_code = NULL;
41160
41161 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41162 return -ENODEV;
41163 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41164 size_remap = size_total;
41165 vesafb_fix.smem_len = size_remap;
41166
41167 -#ifndef __i386__
41168 - screen_info.vesapm_seg = 0;
41169 -#endif
41170 -
41171 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41172 printk(KERN_WARNING
41173 "vesafb: cannot reserve video memory at 0x%lx\n",
41174 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41175 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41176 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41177
41178 +#ifdef __i386__
41179 +
41180 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41181 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41182 + if (!pmi_code)
41183 +#elif !defined(CONFIG_PAX_KERNEXEC)
41184 + if (0)
41185 +#endif
41186 +
41187 +#endif
41188 + screen_info.vesapm_seg = 0;
41189 +
41190 if (screen_info.vesapm_seg) {
41191 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41192 - screen_info.vesapm_seg,screen_info.vesapm_off);
41193 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41194 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41195 }
41196
41197 if (screen_info.vesapm_seg < 0xc000)
41198 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41199
41200 if (ypan || pmi_setpal) {
41201 unsigned short *pmi_base;
41202 +
41203 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41204 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41205 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41206 +
41207 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41208 + pax_open_kernel();
41209 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41210 +#else
41211 + pmi_code = pmi_base;
41212 +#endif
41213 +
41214 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41215 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41216 +
41217 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41218 + pmi_start = ktva_ktla(pmi_start);
41219 + pmi_pal = ktva_ktla(pmi_pal);
41220 + pax_close_kernel();
41221 +#endif
41222 +
41223 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41224 if (pmi_base[3]) {
41225 printk(KERN_INFO "vesafb: pmi: ports = ");
41226 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41227 info->node, info->fix.id);
41228 return 0;
41229 err:
41230 +
41231 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41232 + module_free_exec(NULL, pmi_code);
41233 +#endif
41234 +
41235 if (info->screen_base)
41236 iounmap(info->screen_base);
41237 framebuffer_release(info);
41238 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41239 index 88714ae..16c2e11 100644
41240 --- a/drivers/video/via/via_clock.h
41241 +++ b/drivers/video/via/via_clock.h
41242 @@ -56,7 +56,7 @@ struct via_clock {
41243
41244 void (*set_engine_pll_state)(u8 state);
41245 void (*set_engine_pll)(struct via_pll_config config);
41246 -};
41247 +} __no_const;
41248
41249
41250 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41251 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41252 index e56c934..fc22f4b 100644
41253 --- a/drivers/xen/xen-pciback/conf_space.h
41254 +++ b/drivers/xen/xen-pciback/conf_space.h
41255 @@ -44,15 +44,15 @@ struct config_field {
41256 struct {
41257 conf_dword_write write;
41258 conf_dword_read read;
41259 - } dw;
41260 + } __no_const dw;
41261 struct {
41262 conf_word_write write;
41263 conf_word_read read;
41264 - } w;
41265 + } __no_const w;
41266 struct {
41267 conf_byte_write write;
41268 conf_byte_read read;
41269 - } b;
41270 + } __no_const b;
41271 } u;
41272 struct list_head list;
41273 };
41274 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41275 index 014c8dd..6f3dfe6 100644
41276 --- a/fs/9p/vfs_inode.c
41277 +++ b/fs/9p/vfs_inode.c
41278 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41279 void
41280 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41281 {
41282 - char *s = nd_get_link(nd);
41283 + const char *s = nd_get_link(nd);
41284
41285 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41286 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41287 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41288 index e95d1b6..3454244 100644
41289 --- a/fs/Kconfig.binfmt
41290 +++ b/fs/Kconfig.binfmt
41291 @@ -89,7 +89,7 @@ config HAVE_AOUT
41292
41293 config BINFMT_AOUT
41294 tristate "Kernel support for a.out and ECOFF binaries"
41295 - depends on HAVE_AOUT
41296 + depends on HAVE_AOUT && BROKEN
41297 ---help---
41298 A.out (Assembler.OUTput) is a set of formats for libraries and
41299 executables used in the earliest versions of UNIX. Linux used
41300 diff --git a/fs/aio.c b/fs/aio.c
41301 index e7f2fad..15ad8a4 100644
41302 --- a/fs/aio.c
41303 +++ b/fs/aio.c
41304 @@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41305 size += sizeof(struct io_event) * nr_events;
41306 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41307
41308 - if (nr_pages < 0)
41309 + if (nr_pages <= 0)
41310 return -EINVAL;
41311
41312 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41313 @@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41314 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41315 {
41316 ssize_t ret;
41317 + struct iovec iovstack;
41318
41319 #ifdef CONFIG_COMPAT
41320 if (compat)
41321 ret = compat_rw_copy_check_uvector(type,
41322 (struct compat_iovec __user *)kiocb->ki_buf,
41323 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41324 + kiocb->ki_nbytes, 1, &iovstack,
41325 &kiocb->ki_iovec, 1);
41326 else
41327 #endif
41328 ret = rw_copy_check_uvector(type,
41329 (struct iovec __user *)kiocb->ki_buf,
41330 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41331 + kiocb->ki_nbytes, 1, &iovstack,
41332 &kiocb->ki_iovec, 1);
41333 if (ret < 0)
41334 goto out;
41335 @@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41336 if (ret < 0)
41337 goto out;
41338
41339 + if (kiocb->ki_iovec == &iovstack) {
41340 + kiocb->ki_inline_vec = iovstack;
41341 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41342 + }
41343 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41344 kiocb->ki_cur_seg = 0;
41345 /* ki_nbytes/left now reflect bytes instead of segs */
41346 diff --git a/fs/attr.c b/fs/attr.c
41347 index 73f69a6..cc501ba 100644
41348 --- a/fs/attr.c
41349 +++ b/fs/attr.c
41350 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41351 unsigned long limit;
41352
41353 limit = rlimit(RLIMIT_FSIZE);
41354 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41355 if (limit != RLIM_INFINITY && offset > limit)
41356 goto out_sig;
41357 if (offset > inode->i_sb->s_maxbytes)
41358 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41359 index da8876d..9f3e6d8 100644
41360 --- a/fs/autofs4/waitq.c
41361 +++ b/fs/autofs4/waitq.c
41362 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41363 {
41364 unsigned long sigpipe, flags;
41365 mm_segment_t fs;
41366 - const char *data = (const char *)addr;
41367 + const char __user *data = (const char __force_user *)addr;
41368 ssize_t wr = 0;
41369
41370 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41371 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41372 index e18da23..affc30e 100644
41373 --- a/fs/befs/linuxvfs.c
41374 +++ b/fs/befs/linuxvfs.c
41375 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41376 {
41377 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41378 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41379 - char *link = nd_get_link(nd);
41380 + const char *link = nd_get_link(nd);
41381 if (!IS_ERR(link))
41382 kfree(link);
41383 }
41384 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41385 index d146e18..12d1bd1 100644
41386 --- a/fs/binfmt_aout.c
41387 +++ b/fs/binfmt_aout.c
41388 @@ -16,6 +16,7 @@
41389 #include <linux/string.h>
41390 #include <linux/fs.h>
41391 #include <linux/file.h>
41392 +#include <linux/security.h>
41393 #include <linux/stat.h>
41394 #include <linux/fcntl.h>
41395 #include <linux/ptrace.h>
41396 @@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41397 #endif
41398 # define START_STACK(u) ((void __user *)u.start_stack)
41399
41400 + memset(&dump, 0, sizeof(dump));
41401 +
41402 fs = get_fs();
41403 set_fs(KERNEL_DS);
41404 has_dumped = 1;
41405 @@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41406
41407 /* If the size of the dump file exceeds the rlimit, then see what would happen
41408 if we wrote the stack, but not the data area. */
41409 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41410 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41411 dump.u_dsize = 0;
41412
41413 /* Make sure we have enough room to write the stack and data areas. */
41414 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41415 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41416 dump.u_ssize = 0;
41417
41418 @@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41419 rlim = rlimit(RLIMIT_DATA);
41420 if (rlim >= RLIM_INFINITY)
41421 rlim = ~0;
41422 +
41423 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41424 if (ex.a_data + ex.a_bss > rlim)
41425 return -ENOMEM;
41426
41427 @@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41428
41429 install_exec_creds(bprm);
41430
41431 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41432 + current->mm->pax_flags = 0UL;
41433 +#endif
41434 +
41435 +#ifdef CONFIG_PAX_PAGEEXEC
41436 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41437 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41438 +
41439 +#ifdef CONFIG_PAX_EMUTRAMP
41440 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41441 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41442 +#endif
41443 +
41444 +#ifdef CONFIG_PAX_MPROTECT
41445 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41446 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41447 +#endif
41448 +
41449 + }
41450 +#endif
41451 +
41452 if (N_MAGIC(ex) == OMAGIC) {
41453 unsigned long text_addr, map_size;
41454 loff_t pos;
41455 @@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41456 }
41457
41458 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41459 - PROT_READ | PROT_WRITE | PROT_EXEC,
41460 + PROT_READ | PROT_WRITE,
41461 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41462 fd_offset + ex.a_text);
41463 if (error != N_DATADDR(ex)) {
41464 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41465 index 16f7354..185d8dc 100644
41466 --- a/fs/binfmt_elf.c
41467 +++ b/fs/binfmt_elf.c
41468 @@ -32,6 +32,7 @@
41469 #include <linux/elf.h>
41470 #include <linux/utsname.h>
41471 #include <linux/coredump.h>
41472 +#include <linux/xattr.h>
41473 #include <asm/uaccess.h>
41474 #include <asm/param.h>
41475 #include <asm/page.h>
41476 @@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41477 #define elf_core_dump NULL
41478 #endif
41479
41480 +#ifdef CONFIG_PAX_MPROTECT
41481 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41482 +#endif
41483 +
41484 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41485 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41486 #else
41487 @@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
41488 .load_binary = load_elf_binary,
41489 .load_shlib = load_elf_library,
41490 .core_dump = elf_core_dump,
41491 +
41492 +#ifdef CONFIG_PAX_MPROTECT
41493 + .handle_mprotect= elf_handle_mprotect,
41494 +#endif
41495 +
41496 .min_coredump = ELF_EXEC_PAGESIZE,
41497 };
41498
41499 @@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
41500
41501 static int set_brk(unsigned long start, unsigned long end)
41502 {
41503 + unsigned long e = end;
41504 +
41505 start = ELF_PAGEALIGN(start);
41506 end = ELF_PAGEALIGN(end);
41507 if (end > start) {
41508 @@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41509 if (BAD_ADDR(addr))
41510 return addr;
41511 }
41512 - current->mm->start_brk = current->mm->brk = end;
41513 + current->mm->start_brk = current->mm->brk = e;
41514 return 0;
41515 }
41516
41517 @@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41518 elf_addr_t __user *u_rand_bytes;
41519 const char *k_platform = ELF_PLATFORM;
41520 const char *k_base_platform = ELF_BASE_PLATFORM;
41521 - unsigned char k_rand_bytes[16];
41522 + u32 k_rand_bytes[4];
41523 int items;
41524 elf_addr_t *elf_info;
41525 int ei_index = 0;
41526 const struct cred *cred = current_cred();
41527 struct vm_area_struct *vma;
41528 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41529
41530 /*
41531 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41532 @@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41533 * Generate 16 random bytes for userspace PRNG seeding.
41534 */
41535 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41536 - u_rand_bytes = (elf_addr_t __user *)
41537 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41538 + srandom32(k_rand_bytes[0] ^ random32());
41539 + srandom32(k_rand_bytes[1] ^ random32());
41540 + srandom32(k_rand_bytes[2] ^ random32());
41541 + srandom32(k_rand_bytes[3] ^ random32());
41542 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41543 + u_rand_bytes = (elf_addr_t __user *) p;
41544 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41545 return -EFAULT;
41546
41547 @@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41548 return -EFAULT;
41549 current->mm->env_end = p;
41550
41551 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41552 +
41553 /* Put the elf_info on the stack in the right place. */
41554 sp = (elf_addr_t __user *)envp + 1;
41555 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41556 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41557 return -EFAULT;
41558 return 0;
41559 }
41560 @@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41561 {
41562 struct elf_phdr *elf_phdata;
41563 struct elf_phdr *eppnt;
41564 - unsigned long load_addr = 0;
41565 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41566 int load_addr_set = 0;
41567 unsigned long last_bss = 0, elf_bss = 0;
41568 - unsigned long error = ~0UL;
41569 + unsigned long error = -EINVAL;
41570 unsigned long total_size;
41571 int retval, i, size;
41572
41573 @@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41574 goto out_close;
41575 }
41576
41577 +#ifdef CONFIG_PAX_SEGMEXEC
41578 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41579 + pax_task_size = SEGMEXEC_TASK_SIZE;
41580 +#endif
41581 +
41582 eppnt = elf_phdata;
41583 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41584 if (eppnt->p_type == PT_LOAD) {
41585 @@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41586 k = load_addr + eppnt->p_vaddr;
41587 if (BAD_ADDR(k) ||
41588 eppnt->p_filesz > eppnt->p_memsz ||
41589 - eppnt->p_memsz > TASK_SIZE ||
41590 - TASK_SIZE - eppnt->p_memsz < k) {
41591 + eppnt->p_memsz > pax_task_size ||
41592 + pax_task_size - eppnt->p_memsz < k) {
41593 error = -ENOMEM;
41594 goto out_close;
41595 }
41596 @@ -525,6 +549,351 @@ out:
41597 return error;
41598 }
41599
41600 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41601 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41602 +{
41603 + unsigned long pax_flags = 0UL;
41604 +
41605 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41606 +
41607 +#ifdef CONFIG_PAX_PAGEEXEC
41608 + if (elf_phdata->p_flags & PF_PAGEEXEC)
41609 + pax_flags |= MF_PAX_PAGEEXEC;
41610 +#endif
41611 +
41612 +#ifdef CONFIG_PAX_SEGMEXEC
41613 + if (elf_phdata->p_flags & PF_SEGMEXEC)
41614 + pax_flags |= MF_PAX_SEGMEXEC;
41615 +#endif
41616 +
41617 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41618 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41619 + if ((__supported_pte_mask & _PAGE_NX))
41620 + pax_flags &= ~MF_PAX_SEGMEXEC;
41621 + else
41622 + pax_flags &= ~MF_PAX_PAGEEXEC;
41623 + }
41624 +#endif
41625 +
41626 +#ifdef CONFIG_PAX_EMUTRAMP
41627 + if (elf_phdata->p_flags & PF_EMUTRAMP)
41628 + pax_flags |= MF_PAX_EMUTRAMP;
41629 +#endif
41630 +
41631 +#ifdef CONFIG_PAX_MPROTECT
41632 + if (elf_phdata->p_flags & PF_MPROTECT)
41633 + pax_flags |= MF_PAX_MPROTECT;
41634 +#endif
41635 +
41636 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41637 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41638 + pax_flags |= MF_PAX_RANDMMAP;
41639 +#endif
41640 +
41641 +#endif
41642 +
41643 + return pax_flags;
41644 +}
41645 +
41646 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41647 +{
41648 + unsigned long pax_flags = 0UL;
41649 +
41650 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41651 +
41652 +#ifdef CONFIG_PAX_PAGEEXEC
41653 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41654 + pax_flags |= MF_PAX_PAGEEXEC;
41655 +#endif
41656 +
41657 +#ifdef CONFIG_PAX_SEGMEXEC
41658 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41659 + pax_flags |= MF_PAX_SEGMEXEC;
41660 +#endif
41661 +
41662 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41663 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41664 + if ((__supported_pte_mask & _PAGE_NX))
41665 + pax_flags &= ~MF_PAX_SEGMEXEC;
41666 + else
41667 + pax_flags &= ~MF_PAX_PAGEEXEC;
41668 + }
41669 +#endif
41670 +
41671 +#ifdef CONFIG_PAX_EMUTRAMP
41672 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41673 + pax_flags |= MF_PAX_EMUTRAMP;
41674 +#endif
41675 +
41676 +#ifdef CONFIG_PAX_MPROTECT
41677 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41678 + pax_flags |= MF_PAX_MPROTECT;
41679 +#endif
41680 +
41681 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41682 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41683 + pax_flags |= MF_PAX_RANDMMAP;
41684 +#endif
41685 +
41686 +#endif
41687 +
41688 + return pax_flags;
41689 +}
41690 +
41691 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41692 +{
41693 + unsigned long pax_flags = 0UL;
41694 +
41695 +#ifdef CONFIG_PAX_EI_PAX
41696 +
41697 +#ifdef CONFIG_PAX_PAGEEXEC
41698 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41699 + pax_flags |= MF_PAX_PAGEEXEC;
41700 +#endif
41701 +
41702 +#ifdef CONFIG_PAX_SEGMEXEC
41703 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41704 + pax_flags |= MF_PAX_SEGMEXEC;
41705 +#endif
41706 +
41707 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41708 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41709 + if ((__supported_pte_mask & _PAGE_NX))
41710 + pax_flags &= ~MF_PAX_SEGMEXEC;
41711 + else
41712 + pax_flags &= ~MF_PAX_PAGEEXEC;
41713 + }
41714 +#endif
41715 +
41716 +#ifdef CONFIG_PAX_EMUTRAMP
41717 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41718 + pax_flags |= MF_PAX_EMUTRAMP;
41719 +#endif
41720 +
41721 +#ifdef CONFIG_PAX_MPROTECT
41722 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41723 + pax_flags |= MF_PAX_MPROTECT;
41724 +#endif
41725 +
41726 +#ifdef CONFIG_PAX_ASLR
41727 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41728 + pax_flags |= MF_PAX_RANDMMAP;
41729 +#endif
41730 +
41731 +#else
41732 +
41733 +#ifdef CONFIG_PAX_PAGEEXEC
41734 + pax_flags |= MF_PAX_PAGEEXEC;
41735 +#endif
41736 +
41737 +#ifdef CONFIG_PAX_MPROTECT
41738 + pax_flags |= MF_PAX_MPROTECT;
41739 +#endif
41740 +
41741 +#ifdef CONFIG_PAX_RANDMMAP
41742 + pax_flags |= MF_PAX_RANDMMAP;
41743 +#endif
41744 +
41745 +#ifdef CONFIG_PAX_SEGMEXEC
41746 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41747 + pax_flags &= ~MF_PAX_PAGEEXEC;
41748 + pax_flags |= MF_PAX_SEGMEXEC;
41749 + }
41750 +#endif
41751 +
41752 +#endif
41753 +
41754 + return pax_flags;
41755 +}
41756 +
41757 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41758 +{
41759 +
41760 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41761 + unsigned long i;
41762 +
41763 + for (i = 0UL; i < elf_ex->e_phnum; i++)
41764 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41765 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41766 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41767 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41768 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41769 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41770 + return ~0UL;
41771 +
41772 +#ifdef CONFIG_PAX_SOFTMODE
41773 + if (pax_softmode)
41774 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41775 + else
41776 +#endif
41777 +
41778 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41779 + break;
41780 + }
41781 +#endif
41782 +
41783 + return ~0UL;
41784 +}
41785 +
41786 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41787 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41788 +{
41789 + unsigned long pax_flags = 0UL;
41790 +
41791 +#ifdef CONFIG_PAX_PAGEEXEC
41792 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41793 + pax_flags |= MF_PAX_PAGEEXEC;
41794 +#endif
41795 +
41796 +#ifdef CONFIG_PAX_SEGMEXEC
41797 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41798 + pax_flags |= MF_PAX_SEGMEXEC;
41799 +#endif
41800 +
41801 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41802 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41803 + if ((__supported_pte_mask & _PAGE_NX))
41804 + pax_flags &= ~MF_PAX_SEGMEXEC;
41805 + else
41806 + pax_flags &= ~MF_PAX_PAGEEXEC;
41807 + }
41808 +#endif
41809 +
41810 +#ifdef CONFIG_PAX_EMUTRAMP
41811 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41812 + pax_flags |= MF_PAX_EMUTRAMP;
41813 +#endif
41814 +
41815 +#ifdef CONFIG_PAX_MPROTECT
41816 + if (pax_flags_softmode & MF_PAX_MPROTECT)
41817 + pax_flags |= MF_PAX_MPROTECT;
41818 +#endif
41819 +
41820 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41821 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41822 + pax_flags |= MF_PAX_RANDMMAP;
41823 +#endif
41824 +
41825 + return pax_flags;
41826 +}
41827 +
41828 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41829 +{
41830 + unsigned long pax_flags = 0UL;
41831 +
41832 +#ifdef CONFIG_PAX_PAGEEXEC
41833 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41834 + pax_flags |= MF_PAX_PAGEEXEC;
41835 +#endif
41836 +
41837 +#ifdef CONFIG_PAX_SEGMEXEC
41838 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41839 + pax_flags |= MF_PAX_SEGMEXEC;
41840 +#endif
41841 +
41842 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41843 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41844 + if ((__supported_pte_mask & _PAGE_NX))
41845 + pax_flags &= ~MF_PAX_SEGMEXEC;
41846 + else
41847 + pax_flags &= ~MF_PAX_PAGEEXEC;
41848 + }
41849 +#endif
41850 +
41851 +#ifdef CONFIG_PAX_EMUTRAMP
41852 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41853 + pax_flags |= MF_PAX_EMUTRAMP;
41854 +#endif
41855 +
41856 +#ifdef CONFIG_PAX_MPROTECT
41857 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41858 + pax_flags |= MF_PAX_MPROTECT;
41859 +#endif
41860 +
41861 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41862 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41863 + pax_flags |= MF_PAX_RANDMMAP;
41864 +#endif
41865 +
41866 + return pax_flags;
41867 +}
41868 +#endif
41869 +
41870 +static unsigned long pax_parse_xattr_pax(struct file * const file)
41871 +{
41872 +
41873 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41874 + ssize_t xattr_size, i;
41875 + unsigned char xattr_value[5];
41876 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
41877 +
41878 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
41879 + if (xattr_size <= 0)
41880 + return ~0UL;
41881 +
41882 + for (i = 0; i < xattr_size; i++)
41883 + switch (xattr_value[i]) {
41884 + default:
41885 + return ~0UL;
41886 +
41887 +#define parse_flag(option1, option2, flag) \
41888 + case option1: \
41889 + pax_flags_hardmode |= MF_PAX_##flag; \
41890 + break; \
41891 + case option2: \
41892 + pax_flags_softmode |= MF_PAX_##flag; \
41893 + break;
41894 +
41895 + parse_flag('p', 'P', PAGEEXEC);
41896 + parse_flag('e', 'E', EMUTRAMP);
41897 + parse_flag('m', 'M', MPROTECT);
41898 + parse_flag('r', 'R', RANDMMAP);
41899 + parse_flag('s', 'S', SEGMEXEC);
41900 +
41901 +#undef parse_flag
41902 + }
41903 +
41904 + if (pax_flags_hardmode & pax_flags_softmode)
41905 + return ~0UL;
41906 +
41907 +#ifdef CONFIG_PAX_SOFTMODE
41908 + if (pax_softmode)
41909 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
41910 + else
41911 +#endif
41912 +
41913 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
41914 +#else
41915 + return ~0UL;
41916 +#endif
41917 +
41918 +}
41919 +
41920 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
41921 +{
41922 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
41923 +
41924 + pax_flags = pax_parse_ei_pax(elf_ex);
41925 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
41926 + xattr_pax_flags = pax_parse_xattr_pax(file);
41927 +
41928 + if (pt_pax_flags == ~0UL)
41929 + pt_pax_flags = xattr_pax_flags;
41930 + else if (xattr_pax_flags == ~0UL)
41931 + xattr_pax_flags = pt_pax_flags;
41932 + if (pt_pax_flags != xattr_pax_flags)
41933 + return -EINVAL;
41934 + if (pt_pax_flags != ~0UL)
41935 + pax_flags = pt_pax_flags;
41936 +
41937 + if (0 > pax_check_flags(&pax_flags))
41938 + return -EINVAL;
41939 +
41940 + current->mm->pax_flags = pax_flags;
41941 + return 0;
41942 +}
41943 +#endif
41944 +
41945 /*
41946 * These are the functions used to load ELF style executables and shared
41947 * libraries. There is no binary dependent code anywhere else.
41948 @@ -541,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
41949 {
41950 unsigned int random_variable = 0;
41951
41952 +#ifdef CONFIG_PAX_RANDUSTACK
41953 + if (randomize_va_space)
41954 + return stack_top - current->mm->delta_stack;
41955 +#endif
41956 +
41957 if ((current->flags & PF_RANDOMIZE) &&
41958 !(current->personality & ADDR_NO_RANDOMIZE)) {
41959 random_variable = get_random_int() & STACK_RND_MASK;
41960 @@ -559,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41961 unsigned long load_addr = 0, load_bias = 0;
41962 int load_addr_set = 0;
41963 char * elf_interpreter = NULL;
41964 - unsigned long error;
41965 + unsigned long error = 0;
41966 struct elf_phdr *elf_ppnt, *elf_phdata;
41967 unsigned long elf_bss, elf_brk;
41968 int retval, i;
41969 @@ -569,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41970 unsigned long start_code, end_code, start_data, end_data;
41971 unsigned long reloc_func_desc __maybe_unused = 0;
41972 int executable_stack = EXSTACK_DEFAULT;
41973 - unsigned long def_flags = 0;
41974 struct {
41975 struct elfhdr elf_ex;
41976 struct elfhdr interp_elf_ex;
41977 } *loc;
41978 + unsigned long pax_task_size = TASK_SIZE;
41979
41980 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
41981 if (!loc) {
41982 @@ -709,11 +1083,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41983 goto out_free_dentry;
41984
41985 /* OK, This is the point of no return */
41986 - current->mm->def_flags = def_flags;
41987 +
41988 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41989 + current->mm->pax_flags = 0UL;
41990 +#endif
41991 +
41992 +#ifdef CONFIG_PAX_DLRESOLVE
41993 + current->mm->call_dl_resolve = 0UL;
41994 +#endif
41995 +
41996 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
41997 + current->mm->call_syscall = 0UL;
41998 +#endif
41999 +
42000 +#ifdef CONFIG_PAX_ASLR
42001 + current->mm->delta_mmap = 0UL;
42002 + current->mm->delta_stack = 0UL;
42003 +#endif
42004 +
42005 + current->mm->def_flags = 0;
42006 +
42007 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42008 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42009 + send_sig(SIGKILL, current, 0);
42010 + goto out_free_dentry;
42011 + }
42012 +#endif
42013 +
42014 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42015 + pax_set_initial_flags(bprm);
42016 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42017 + if (pax_set_initial_flags_func)
42018 + (pax_set_initial_flags_func)(bprm);
42019 +#endif
42020 +
42021 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42022 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42023 + current->mm->context.user_cs_limit = PAGE_SIZE;
42024 + current->mm->def_flags |= VM_PAGEEXEC;
42025 + }
42026 +#endif
42027 +
42028 +#ifdef CONFIG_PAX_SEGMEXEC
42029 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42030 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42031 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42032 + pax_task_size = SEGMEXEC_TASK_SIZE;
42033 + current->mm->def_flags |= VM_NOHUGEPAGE;
42034 + }
42035 +#endif
42036 +
42037 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42038 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42039 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42040 + put_cpu();
42041 + }
42042 +#endif
42043
42044 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42045 may depend on the personality. */
42046 SET_PERSONALITY(loc->elf_ex);
42047 +
42048 +#ifdef CONFIG_PAX_ASLR
42049 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42050 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42051 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42052 + }
42053 +#endif
42054 +
42055 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42056 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42057 + executable_stack = EXSTACK_DISABLE_X;
42058 + current->personality &= ~READ_IMPLIES_EXEC;
42059 + } else
42060 +#endif
42061 +
42062 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42063 current->personality |= READ_IMPLIES_EXEC;
42064
42065 @@ -804,6 +1248,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42066 #else
42067 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42068 #endif
42069 +
42070 +#ifdef CONFIG_PAX_RANDMMAP
42071 + /* PaX: randomize base address at the default exe base if requested */
42072 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42073 +#ifdef CONFIG_SPARC64
42074 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42075 +#else
42076 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42077 +#endif
42078 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42079 + elf_flags |= MAP_FIXED;
42080 + }
42081 +#endif
42082 +
42083 }
42084
42085 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42086 @@ -836,9 +1294,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42087 * allowed task size. Note that p_filesz must always be
42088 * <= p_memsz so it is only necessary to check p_memsz.
42089 */
42090 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42091 - elf_ppnt->p_memsz > TASK_SIZE ||
42092 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42093 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42094 + elf_ppnt->p_memsz > pax_task_size ||
42095 + pax_task_size - elf_ppnt->p_memsz < k) {
42096 /* set_brk can never work. Avoid overflows. */
42097 send_sig(SIGKILL, current, 0);
42098 retval = -EINVAL;
42099 @@ -877,11 +1335,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42100 goto out_free_dentry;
42101 }
42102 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42103 - send_sig(SIGSEGV, current, 0);
42104 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42105 - goto out_free_dentry;
42106 + /*
42107 + * This bss-zeroing can fail if the ELF
42108 + * file specifies odd protections. So
42109 + * we don't check the return value
42110 + */
42111 }
42112
42113 +#ifdef CONFIG_PAX_RANDMMAP
42114 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42115 + unsigned long start, size;
42116 +
42117 + start = ELF_PAGEALIGN(elf_brk);
42118 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42119 + down_write(&current->mm->mmap_sem);
42120 + retval = -ENOMEM;
42121 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42122 + unsigned long prot = PROT_NONE;
42123 +
42124 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42125 +// if (current->personality & ADDR_NO_RANDOMIZE)
42126 +// prot = PROT_READ;
42127 + start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42128 + retval = IS_ERR_VALUE(start) ? start : 0;
42129 + }
42130 + up_write(&current->mm->mmap_sem);
42131 + if (retval == 0)
42132 + retval = set_brk(start + size, start + size + PAGE_SIZE);
42133 + if (retval < 0) {
42134 + send_sig(SIGKILL, current, 0);
42135 + goto out_free_dentry;
42136 + }
42137 + }
42138 +#endif
42139 +
42140 if (elf_interpreter) {
42141 unsigned long uninitialized_var(interp_map_addr);
42142
42143 @@ -1109,7 +1596,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
42144 * Decide what to dump of a segment, part, all or none.
42145 */
42146 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42147 - unsigned long mm_flags)
42148 + unsigned long mm_flags, long signr)
42149 {
42150 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42151
42152 @@ -1146,7 +1633,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42153 if (vma->vm_file == NULL)
42154 return 0;
42155
42156 - if (FILTER(MAPPED_PRIVATE))
42157 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42158 goto whole;
42159
42160 /*
42161 @@ -1368,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42162 {
42163 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42164 int i = 0;
42165 - do
42166 + do {
42167 i += 2;
42168 - while (auxv[i - 2] != AT_NULL);
42169 + } while (auxv[i - 2] != AT_NULL);
42170 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42171 }
42172
42173 @@ -1892,14 +2379,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42174 }
42175
42176 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42177 - unsigned long mm_flags)
42178 + struct coredump_params *cprm)
42179 {
42180 struct vm_area_struct *vma;
42181 size_t size = 0;
42182
42183 for (vma = first_vma(current, gate_vma); vma != NULL;
42184 vma = next_vma(vma, gate_vma))
42185 - size += vma_dump_size(vma, mm_flags);
42186 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42187 return size;
42188 }
42189
42190 @@ -1993,7 +2480,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42191
42192 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42193
42194 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42195 + offset += elf_core_vma_data_size(gate_vma, cprm);
42196 offset += elf_core_extra_data_size();
42197 e_shoff = offset;
42198
42199 @@ -2007,10 +2494,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42200 offset = dataoff;
42201
42202 size += sizeof(*elf);
42203 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42204 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42205 goto end_coredump;
42206
42207 size += sizeof(*phdr4note);
42208 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42209 if (size > cprm->limit
42210 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42211 goto end_coredump;
42212 @@ -2024,7 +2513,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42213 phdr.p_offset = offset;
42214 phdr.p_vaddr = vma->vm_start;
42215 phdr.p_paddr = 0;
42216 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42217 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42218 phdr.p_memsz = vma->vm_end - vma->vm_start;
42219 offset += phdr.p_filesz;
42220 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42221 @@ -2035,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42222 phdr.p_align = ELF_EXEC_PAGESIZE;
42223
42224 size += sizeof(phdr);
42225 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42226 if (size > cprm->limit
42227 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42228 goto end_coredump;
42229 @@ -2059,7 +2549,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42230 unsigned long addr;
42231 unsigned long end;
42232
42233 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42234 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42235
42236 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42237 struct page *page;
42238 @@ -2068,6 +2558,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42239 page = get_dump_page(addr);
42240 if (page) {
42241 void *kaddr = kmap(page);
42242 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42243 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42244 !dump_write(cprm->file, kaddr,
42245 PAGE_SIZE);
42246 @@ -2085,6 +2576,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42247
42248 if (e_phnum == PN_XNUM) {
42249 size += sizeof(*shdr4extnum);
42250 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42251 if (size > cprm->limit
42252 || !dump_write(cprm->file, shdr4extnum,
42253 sizeof(*shdr4extnum)))
42254 @@ -2105,6 +2597,97 @@ out:
42255
42256 #endif /* CONFIG_ELF_CORE */
42257
42258 +#ifdef CONFIG_PAX_MPROTECT
42259 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42260 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42261 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42262 + *
42263 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42264 + * basis because we want to allow the common case and not the special ones.
42265 + */
42266 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42267 +{
42268 + struct elfhdr elf_h;
42269 + struct elf_phdr elf_p;
42270 + unsigned long i;
42271 + unsigned long oldflags;
42272 + bool is_textrel_rw, is_textrel_rx, is_relro;
42273 +
42274 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42275 + return;
42276 +
42277 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42278 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42279 +
42280 +#ifdef CONFIG_PAX_ELFRELOCS
42281 + /* possible TEXTREL */
42282 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42283 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42284 +#else
42285 + is_textrel_rw = false;
42286 + is_textrel_rx = false;
42287 +#endif
42288 +
42289 + /* possible RELRO */
42290 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42291 +
42292 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42293 + return;
42294 +
42295 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42296 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42297 +
42298 +#ifdef CONFIG_PAX_ETEXECRELOCS
42299 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42300 +#else
42301 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42302 +#endif
42303 +
42304 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42305 + !elf_check_arch(&elf_h) ||
42306 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42307 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42308 + return;
42309 +
42310 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42311 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42312 + return;
42313 + switch (elf_p.p_type) {
42314 + case PT_DYNAMIC:
42315 + if (!is_textrel_rw && !is_textrel_rx)
42316 + continue;
42317 + i = 0UL;
42318 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42319 + elf_dyn dyn;
42320 +
42321 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42322 + return;
42323 + if (dyn.d_tag == DT_NULL)
42324 + return;
42325 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42326 + gr_log_textrel(vma);
42327 + if (is_textrel_rw)
42328 + vma->vm_flags |= VM_MAYWRITE;
42329 + else
42330 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42331 + vma->vm_flags &= ~VM_MAYWRITE;
42332 + return;
42333 + }
42334 + i++;
42335 + }
42336 + return;
42337 +
42338 + case PT_GNU_RELRO:
42339 + if (!is_relro)
42340 + continue;
42341 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42342 + vma->vm_flags &= ~VM_MAYWRITE;
42343 + return;
42344 + }
42345 + }
42346 +}
42347 +#endif
42348 +
42349 static int __init init_elf_binfmt(void)
42350 {
42351 register_binfmt(&elf_format);
42352 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42353 index 6b2daf9..a70dccb 100644
42354 --- a/fs/binfmt_flat.c
42355 +++ b/fs/binfmt_flat.c
42356 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42357 realdatastart = (unsigned long) -ENOMEM;
42358 printk("Unable to allocate RAM for process data, errno %d\n",
42359 (int)-realdatastart);
42360 + down_write(&current->mm->mmap_sem);
42361 do_munmap(current->mm, textpos, text_len);
42362 + up_write(&current->mm->mmap_sem);
42363 ret = realdatastart;
42364 goto err;
42365 }
42366 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42367 }
42368 if (IS_ERR_VALUE(result)) {
42369 printk("Unable to read data+bss, errno %d\n", (int)-result);
42370 + down_write(&current->mm->mmap_sem);
42371 do_munmap(current->mm, textpos, text_len);
42372 do_munmap(current->mm, realdatastart, len);
42373 + up_write(&current->mm->mmap_sem);
42374 ret = result;
42375 goto err;
42376 }
42377 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42378 }
42379 if (IS_ERR_VALUE(result)) {
42380 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42381 + down_write(&current->mm->mmap_sem);
42382 do_munmap(current->mm, textpos, text_len + data_len + extra +
42383 MAX_SHARED_LIBS * sizeof(unsigned long));
42384 + up_write(&current->mm->mmap_sem);
42385 ret = result;
42386 goto err;
42387 }
42388 diff --git a/fs/bio.c b/fs/bio.c
42389 index 84da885..2149cd9 100644
42390 --- a/fs/bio.c
42391 +++ b/fs/bio.c
42392 @@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42393 /*
42394 * Overflow, abort
42395 */
42396 - if (end < start)
42397 + if (end < start || end - start > INT_MAX - nr_pages)
42398 return ERR_PTR(-EINVAL);
42399
42400 nr_pages += end - start;
42401 @@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42402 const int read = bio_data_dir(bio) == READ;
42403 struct bio_map_data *bmd = bio->bi_private;
42404 int i;
42405 - char *p = bmd->sgvecs[0].iov_base;
42406 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42407
42408 __bio_for_each_segment(bvec, bio, i, 0) {
42409 char *addr = page_address(bvec->bv_page);
42410 diff --git a/fs/block_dev.c b/fs/block_dev.c
42411 index ba11c30..623d736 100644
42412 --- a/fs/block_dev.c
42413 +++ b/fs/block_dev.c
42414 @@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42415 else if (bdev->bd_contains == bdev)
42416 return true; /* is a whole device which isn't held */
42417
42418 - else if (whole->bd_holder == bd_may_claim)
42419 + else if (whole->bd_holder == (void *)bd_may_claim)
42420 return true; /* is a partition of a device that is being partitioned */
42421 else if (whole->bd_holder != NULL)
42422 return false; /* is a partition of a held device */
42423 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42424 index c053e90..e5f1afc 100644
42425 --- a/fs/btrfs/check-integrity.c
42426 +++ b/fs/btrfs/check-integrity.c
42427 @@ -156,7 +156,7 @@ struct btrfsic_block {
42428 union {
42429 bio_end_io_t *bio;
42430 bh_end_io_t *bh;
42431 - } orig_bio_bh_end_io;
42432 + } __no_const orig_bio_bh_end_io;
42433 int submit_bio_bh_rw;
42434 u64 flush_gen; /* only valid if !never_written */
42435 };
42436 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42437 index 4106264..8157ede 100644
42438 --- a/fs/btrfs/ctree.c
42439 +++ b/fs/btrfs/ctree.c
42440 @@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42441 free_extent_buffer(buf);
42442 add_root_to_dirty_list(root);
42443 } else {
42444 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42445 - parent_start = parent->start;
42446 - else
42447 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42448 + if (parent)
42449 + parent_start = parent->start;
42450 + else
42451 + parent_start = 0;
42452 + } else
42453 parent_start = 0;
42454
42455 WARN_ON(trans->transid != btrfs_header_generation(parent));
42456 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42457 index 61b16c6..b492c09 100644
42458 --- a/fs/btrfs/inode.c
42459 +++ b/fs/btrfs/inode.c
42460 @@ -7071,7 +7071,7 @@ fail:
42461 return -ENOMEM;
42462 }
42463
42464 -static int btrfs_getattr(struct vfsmount *mnt,
42465 +int btrfs_getattr(struct vfsmount *mnt,
42466 struct dentry *dentry, struct kstat *stat)
42467 {
42468 struct inode *inode = dentry->d_inode;
42469 @@ -7085,6 +7085,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42470 return 0;
42471 }
42472
42473 +EXPORT_SYMBOL(btrfs_getattr);
42474 +
42475 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42476 +{
42477 + return BTRFS_I(inode)->root->anon_dev;
42478 +}
42479 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42480 +
42481 /*
42482 * If a file is moved, it will inherit the cow and compression flags of the new
42483 * directory.
42484 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42485 index 14f8e1f..ab8d81f 100644
42486 --- a/fs/btrfs/ioctl.c
42487 +++ b/fs/btrfs/ioctl.c
42488 @@ -2882,9 +2882,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42489 for (i = 0; i < num_types; i++) {
42490 struct btrfs_space_info *tmp;
42491
42492 + /* Don't copy in more than we allocated */
42493 if (!slot_count)
42494 break;
42495
42496 + slot_count--;
42497 +
42498 info = NULL;
42499 rcu_read_lock();
42500 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42501 @@ -2906,15 +2909,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42502 memcpy(dest, &space, sizeof(space));
42503 dest++;
42504 space_args.total_spaces++;
42505 - slot_count--;
42506 }
42507 - if (!slot_count)
42508 - break;
42509 }
42510 up_read(&info->groups_sem);
42511 }
42512
42513 - user_dest = (struct btrfs_ioctl_space_info *)
42514 + user_dest = (struct btrfs_ioctl_space_info __user *)
42515 (arg + sizeof(struct btrfs_ioctl_space_args));
42516
42517 if (copy_to_user(user_dest, dest_orig, alloc_size))
42518 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42519 index 646ee21..f020f87 100644
42520 --- a/fs/btrfs/relocation.c
42521 +++ b/fs/btrfs/relocation.c
42522 @@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42523 }
42524 spin_unlock(&rc->reloc_root_tree.lock);
42525
42526 - BUG_ON((struct btrfs_root *)node->data != root);
42527 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42528
42529 if (!del) {
42530 spin_lock(&rc->reloc_root_tree.lock);
42531 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42532 index 622f469..e8d2d55 100644
42533 --- a/fs/cachefiles/bind.c
42534 +++ b/fs/cachefiles/bind.c
42535 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42536 args);
42537
42538 /* start by checking things over */
42539 - ASSERT(cache->fstop_percent >= 0 &&
42540 - cache->fstop_percent < cache->fcull_percent &&
42541 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42542 cache->fcull_percent < cache->frun_percent &&
42543 cache->frun_percent < 100);
42544
42545 - ASSERT(cache->bstop_percent >= 0 &&
42546 - cache->bstop_percent < cache->bcull_percent &&
42547 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42548 cache->bcull_percent < cache->brun_percent &&
42549 cache->brun_percent < 100);
42550
42551 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42552 index 0a1467b..6a53245 100644
42553 --- a/fs/cachefiles/daemon.c
42554 +++ b/fs/cachefiles/daemon.c
42555 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42556 if (n > buflen)
42557 return -EMSGSIZE;
42558
42559 - if (copy_to_user(_buffer, buffer, n) != 0)
42560 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42561 return -EFAULT;
42562
42563 return n;
42564 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42565 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42566 return -EIO;
42567
42568 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42569 + if (datalen > PAGE_SIZE - 1)
42570 return -EOPNOTSUPP;
42571
42572 /* drag the command string into the kernel so we can parse it */
42573 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42574 if (args[0] != '%' || args[1] != '\0')
42575 return -EINVAL;
42576
42577 - if (fstop < 0 || fstop >= cache->fcull_percent)
42578 + if (fstop >= cache->fcull_percent)
42579 return cachefiles_daemon_range_error(cache, args);
42580
42581 cache->fstop_percent = fstop;
42582 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42583 if (args[0] != '%' || args[1] != '\0')
42584 return -EINVAL;
42585
42586 - if (bstop < 0 || bstop >= cache->bcull_percent)
42587 + if (bstop >= cache->bcull_percent)
42588 return cachefiles_daemon_range_error(cache, args);
42589
42590 cache->bstop_percent = bstop;
42591 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42592 index bd6bc1b..b627b53 100644
42593 --- a/fs/cachefiles/internal.h
42594 +++ b/fs/cachefiles/internal.h
42595 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42596 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42597 struct rb_root active_nodes; /* active nodes (can't be culled) */
42598 rwlock_t active_lock; /* lock for active_nodes */
42599 - atomic_t gravecounter; /* graveyard uniquifier */
42600 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42601 unsigned frun_percent; /* when to stop culling (% files) */
42602 unsigned fcull_percent; /* when to start culling (% files) */
42603 unsigned fstop_percent; /* when to stop allocating (% files) */
42604 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42605 * proc.c
42606 */
42607 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42608 -extern atomic_t cachefiles_lookup_histogram[HZ];
42609 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42610 -extern atomic_t cachefiles_create_histogram[HZ];
42611 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42612 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42613 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42614
42615 extern int __init cachefiles_proc_init(void);
42616 extern void cachefiles_proc_cleanup(void);
42617 static inline
42618 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42619 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42620 {
42621 unsigned long jif = jiffies - start_jif;
42622 if (jif >= HZ)
42623 jif = HZ - 1;
42624 - atomic_inc(&histogram[jif]);
42625 + atomic_inc_unchecked(&histogram[jif]);
42626 }
42627
42628 #else
42629 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42630 index 7f0771d..87d4f36 100644
42631 --- a/fs/cachefiles/namei.c
42632 +++ b/fs/cachefiles/namei.c
42633 @@ -318,7 +318,7 @@ try_again:
42634 /* first step is to make up a grave dentry in the graveyard */
42635 sprintf(nbuffer, "%08x%08x",
42636 (uint32_t) get_seconds(),
42637 - (uint32_t) atomic_inc_return(&cache->gravecounter));
42638 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42639
42640 /* do the multiway lock magic */
42641 trap = lock_rename(cache->graveyard, dir);
42642 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42643 index eccd339..4c1d995 100644
42644 --- a/fs/cachefiles/proc.c
42645 +++ b/fs/cachefiles/proc.c
42646 @@ -14,9 +14,9 @@
42647 #include <linux/seq_file.h>
42648 #include "internal.h"
42649
42650 -atomic_t cachefiles_lookup_histogram[HZ];
42651 -atomic_t cachefiles_mkdir_histogram[HZ];
42652 -atomic_t cachefiles_create_histogram[HZ];
42653 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42654 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42655 +atomic_unchecked_t cachefiles_create_histogram[HZ];
42656
42657 /*
42658 * display the latency histogram
42659 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42660 return 0;
42661 default:
42662 index = (unsigned long) v - 3;
42663 - x = atomic_read(&cachefiles_lookup_histogram[index]);
42664 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
42665 - z = atomic_read(&cachefiles_create_histogram[index]);
42666 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42667 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42668 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42669 if (x == 0 && y == 0 && z == 0)
42670 return 0;
42671
42672 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42673 index 0e3c092..818480e 100644
42674 --- a/fs/cachefiles/rdwr.c
42675 +++ b/fs/cachefiles/rdwr.c
42676 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42677 old_fs = get_fs();
42678 set_fs(KERNEL_DS);
42679 ret = file->f_op->write(
42680 - file, (const void __user *) data, len, &pos);
42681 + file, (const void __force_user *) data, len, &pos);
42682 set_fs(old_fs);
42683 kunmap(page);
42684 if (ret != len)
42685 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42686 index 3e8094b..cb3ff3d 100644
42687 --- a/fs/ceph/dir.c
42688 +++ b/fs/ceph/dir.c
42689 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42690 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42691 struct ceph_mds_client *mdsc = fsc->mdsc;
42692 unsigned frag = fpos_frag(filp->f_pos);
42693 - int off = fpos_off(filp->f_pos);
42694 + unsigned int off = fpos_off(filp->f_pos);
42695 int err;
42696 u32 ftype;
42697 struct ceph_mds_reply_info_parsed *rinfo;
42698 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
42699 if (nd &&
42700 (nd->flags & LOOKUP_OPEN) &&
42701 !(nd->intent.open.flags & O_CREAT)) {
42702 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
42703 + int mode = nd->intent.open.create_mode & ~current_umask();
42704 return ceph_lookup_open(dir, dentry, nd, mode, 1);
42705 }
42706
42707 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42708 index 2704646..c581c91 100644
42709 --- a/fs/cifs/cifs_debug.c
42710 +++ b/fs/cifs/cifs_debug.c
42711 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42712
42713 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42714 #ifdef CONFIG_CIFS_STATS2
42715 - atomic_set(&totBufAllocCount, 0);
42716 - atomic_set(&totSmBufAllocCount, 0);
42717 + atomic_set_unchecked(&totBufAllocCount, 0);
42718 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42719 #endif /* CONFIG_CIFS_STATS2 */
42720 spin_lock(&cifs_tcp_ses_lock);
42721 list_for_each(tmp1, &cifs_tcp_ses_list) {
42722 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42723 tcon = list_entry(tmp3,
42724 struct cifs_tcon,
42725 tcon_list);
42726 - atomic_set(&tcon->num_smbs_sent, 0);
42727 - atomic_set(&tcon->num_writes, 0);
42728 - atomic_set(&tcon->num_reads, 0);
42729 - atomic_set(&tcon->num_oplock_brks, 0);
42730 - atomic_set(&tcon->num_opens, 0);
42731 - atomic_set(&tcon->num_posixopens, 0);
42732 - atomic_set(&tcon->num_posixmkdirs, 0);
42733 - atomic_set(&tcon->num_closes, 0);
42734 - atomic_set(&tcon->num_deletes, 0);
42735 - atomic_set(&tcon->num_mkdirs, 0);
42736 - atomic_set(&tcon->num_rmdirs, 0);
42737 - atomic_set(&tcon->num_renames, 0);
42738 - atomic_set(&tcon->num_t2renames, 0);
42739 - atomic_set(&tcon->num_ffirst, 0);
42740 - atomic_set(&tcon->num_fnext, 0);
42741 - atomic_set(&tcon->num_fclose, 0);
42742 - atomic_set(&tcon->num_hardlinks, 0);
42743 - atomic_set(&tcon->num_symlinks, 0);
42744 - atomic_set(&tcon->num_locks, 0);
42745 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42746 + atomic_set_unchecked(&tcon->num_writes, 0);
42747 + atomic_set_unchecked(&tcon->num_reads, 0);
42748 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42749 + atomic_set_unchecked(&tcon->num_opens, 0);
42750 + atomic_set_unchecked(&tcon->num_posixopens, 0);
42751 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42752 + atomic_set_unchecked(&tcon->num_closes, 0);
42753 + atomic_set_unchecked(&tcon->num_deletes, 0);
42754 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
42755 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
42756 + atomic_set_unchecked(&tcon->num_renames, 0);
42757 + atomic_set_unchecked(&tcon->num_t2renames, 0);
42758 + atomic_set_unchecked(&tcon->num_ffirst, 0);
42759 + atomic_set_unchecked(&tcon->num_fnext, 0);
42760 + atomic_set_unchecked(&tcon->num_fclose, 0);
42761 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
42762 + atomic_set_unchecked(&tcon->num_symlinks, 0);
42763 + atomic_set_unchecked(&tcon->num_locks, 0);
42764 }
42765 }
42766 }
42767 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42768 smBufAllocCount.counter, cifs_min_small);
42769 #ifdef CONFIG_CIFS_STATS2
42770 seq_printf(m, "Total Large %d Small %d Allocations\n",
42771 - atomic_read(&totBufAllocCount),
42772 - atomic_read(&totSmBufAllocCount));
42773 + atomic_read_unchecked(&totBufAllocCount),
42774 + atomic_read_unchecked(&totSmBufAllocCount));
42775 #endif /* CONFIG_CIFS_STATS2 */
42776
42777 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42778 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42779 if (tcon->need_reconnect)
42780 seq_puts(m, "\tDISCONNECTED ");
42781 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42782 - atomic_read(&tcon->num_smbs_sent),
42783 - atomic_read(&tcon->num_oplock_brks));
42784 + atomic_read_unchecked(&tcon->num_smbs_sent),
42785 + atomic_read_unchecked(&tcon->num_oplock_brks));
42786 seq_printf(m, "\nReads: %d Bytes: %lld",
42787 - atomic_read(&tcon->num_reads),
42788 + atomic_read_unchecked(&tcon->num_reads),
42789 (long long)(tcon->bytes_read));
42790 seq_printf(m, "\nWrites: %d Bytes: %lld",
42791 - atomic_read(&tcon->num_writes),
42792 + atomic_read_unchecked(&tcon->num_writes),
42793 (long long)(tcon->bytes_written));
42794 seq_printf(m, "\nFlushes: %d",
42795 - atomic_read(&tcon->num_flushes));
42796 + atomic_read_unchecked(&tcon->num_flushes));
42797 seq_printf(m, "\nLocks: %d HardLinks: %d "
42798 "Symlinks: %d",
42799 - atomic_read(&tcon->num_locks),
42800 - atomic_read(&tcon->num_hardlinks),
42801 - atomic_read(&tcon->num_symlinks));
42802 + atomic_read_unchecked(&tcon->num_locks),
42803 + atomic_read_unchecked(&tcon->num_hardlinks),
42804 + atomic_read_unchecked(&tcon->num_symlinks));
42805 seq_printf(m, "\nOpens: %d Closes: %d "
42806 "Deletes: %d",
42807 - atomic_read(&tcon->num_opens),
42808 - atomic_read(&tcon->num_closes),
42809 - atomic_read(&tcon->num_deletes));
42810 + atomic_read_unchecked(&tcon->num_opens),
42811 + atomic_read_unchecked(&tcon->num_closes),
42812 + atomic_read_unchecked(&tcon->num_deletes));
42813 seq_printf(m, "\nPosix Opens: %d "
42814 "Posix Mkdirs: %d",
42815 - atomic_read(&tcon->num_posixopens),
42816 - atomic_read(&tcon->num_posixmkdirs));
42817 + atomic_read_unchecked(&tcon->num_posixopens),
42818 + atomic_read_unchecked(&tcon->num_posixmkdirs));
42819 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42820 - atomic_read(&tcon->num_mkdirs),
42821 - atomic_read(&tcon->num_rmdirs));
42822 + atomic_read_unchecked(&tcon->num_mkdirs),
42823 + atomic_read_unchecked(&tcon->num_rmdirs));
42824 seq_printf(m, "\nRenames: %d T2 Renames %d",
42825 - atomic_read(&tcon->num_renames),
42826 - atomic_read(&tcon->num_t2renames));
42827 + atomic_read_unchecked(&tcon->num_renames),
42828 + atomic_read_unchecked(&tcon->num_t2renames));
42829 seq_printf(m, "\nFindFirst: %d FNext %d "
42830 "FClose %d",
42831 - atomic_read(&tcon->num_ffirst),
42832 - atomic_read(&tcon->num_fnext),
42833 - atomic_read(&tcon->num_fclose));
42834 + atomic_read_unchecked(&tcon->num_ffirst),
42835 + atomic_read_unchecked(&tcon->num_fnext),
42836 + atomic_read_unchecked(&tcon->num_fclose));
42837 }
42838 }
42839 }
42840 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42841 index 541ef81..a78deb8 100644
42842 --- a/fs/cifs/cifsfs.c
42843 +++ b/fs/cifs/cifsfs.c
42844 @@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
42845 cifs_req_cachep = kmem_cache_create("cifs_request",
42846 CIFSMaxBufSize +
42847 MAX_CIFS_HDR_SIZE, 0,
42848 - SLAB_HWCACHE_ALIGN, NULL);
42849 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42850 if (cifs_req_cachep == NULL)
42851 return -ENOMEM;
42852
42853 @@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
42854 efficient to alloc 1 per page off the slab compared to 17K (5page)
42855 alloc of large cifs buffers even when page debugging is on */
42856 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42857 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42858 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42859 NULL);
42860 if (cifs_sm_req_cachep == NULL) {
42861 mempool_destroy(cifs_req_poolp);
42862 @@ -1097,8 +1097,8 @@ init_cifs(void)
42863 atomic_set(&bufAllocCount, 0);
42864 atomic_set(&smBufAllocCount, 0);
42865 #ifdef CONFIG_CIFS_STATS2
42866 - atomic_set(&totBufAllocCount, 0);
42867 - atomic_set(&totSmBufAllocCount, 0);
42868 + atomic_set_unchecked(&totBufAllocCount, 0);
42869 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42870 #endif /* CONFIG_CIFS_STATS2 */
42871
42872 atomic_set(&midCount, 0);
42873 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42874 index 4ff6313..815d7fc 100644
42875 --- a/fs/cifs/cifsglob.h
42876 +++ b/fs/cifs/cifsglob.h
42877 @@ -438,28 +438,28 @@ struct cifs_tcon {
42878 __u16 Flags; /* optional support bits */
42879 enum statusEnum tidStatus;
42880 #ifdef CONFIG_CIFS_STATS
42881 - atomic_t num_smbs_sent;
42882 - atomic_t num_writes;
42883 - atomic_t num_reads;
42884 - atomic_t num_flushes;
42885 - atomic_t num_oplock_brks;
42886 - atomic_t num_opens;
42887 - atomic_t num_closes;
42888 - atomic_t num_deletes;
42889 - atomic_t num_mkdirs;
42890 - atomic_t num_posixopens;
42891 - atomic_t num_posixmkdirs;
42892 - atomic_t num_rmdirs;
42893 - atomic_t num_renames;
42894 - atomic_t num_t2renames;
42895 - atomic_t num_ffirst;
42896 - atomic_t num_fnext;
42897 - atomic_t num_fclose;
42898 - atomic_t num_hardlinks;
42899 - atomic_t num_symlinks;
42900 - atomic_t num_locks;
42901 - atomic_t num_acl_get;
42902 - atomic_t num_acl_set;
42903 + atomic_unchecked_t num_smbs_sent;
42904 + atomic_unchecked_t num_writes;
42905 + atomic_unchecked_t num_reads;
42906 + atomic_unchecked_t num_flushes;
42907 + atomic_unchecked_t num_oplock_brks;
42908 + atomic_unchecked_t num_opens;
42909 + atomic_unchecked_t num_closes;
42910 + atomic_unchecked_t num_deletes;
42911 + atomic_unchecked_t num_mkdirs;
42912 + atomic_unchecked_t num_posixopens;
42913 + atomic_unchecked_t num_posixmkdirs;
42914 + atomic_unchecked_t num_rmdirs;
42915 + atomic_unchecked_t num_renames;
42916 + atomic_unchecked_t num_t2renames;
42917 + atomic_unchecked_t num_ffirst;
42918 + atomic_unchecked_t num_fnext;
42919 + atomic_unchecked_t num_fclose;
42920 + atomic_unchecked_t num_hardlinks;
42921 + atomic_unchecked_t num_symlinks;
42922 + atomic_unchecked_t num_locks;
42923 + atomic_unchecked_t num_acl_get;
42924 + atomic_unchecked_t num_acl_set;
42925 #ifdef CONFIG_CIFS_STATS2
42926 unsigned long long time_writes;
42927 unsigned long long time_reads;
42928 @@ -676,7 +676,7 @@ convert_delimiter(char *path, char delim)
42929 }
42930
42931 #ifdef CONFIG_CIFS_STATS
42932 -#define cifs_stats_inc atomic_inc
42933 +#define cifs_stats_inc atomic_inc_unchecked
42934
42935 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
42936 unsigned int bytes)
42937 @@ -1035,8 +1035,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
42938 /* Various Debug counters */
42939 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
42940 #ifdef CONFIG_CIFS_STATS2
42941 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
42942 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
42943 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
42944 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
42945 #endif
42946 GLOBAL_EXTERN atomic_t smBufAllocCount;
42947 GLOBAL_EXTERN atomic_t midCount;
42948 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
42949 index 6b0e064..94e6c3c 100644
42950 --- a/fs/cifs/link.c
42951 +++ b/fs/cifs/link.c
42952 @@ -600,7 +600,7 @@ symlink_exit:
42953
42954 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
42955 {
42956 - char *p = nd_get_link(nd);
42957 + const char *p = nd_get_link(nd);
42958 if (!IS_ERR(p))
42959 kfree(p);
42960 }
42961 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
42962 index c29d1aa..58018da 100644
42963 --- a/fs/cifs/misc.c
42964 +++ b/fs/cifs/misc.c
42965 @@ -156,7 +156,7 @@ cifs_buf_get(void)
42966 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
42967 atomic_inc(&bufAllocCount);
42968 #ifdef CONFIG_CIFS_STATS2
42969 - atomic_inc(&totBufAllocCount);
42970 + atomic_inc_unchecked(&totBufAllocCount);
42971 #endif /* CONFIG_CIFS_STATS2 */
42972 }
42973
42974 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
42975 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
42976 atomic_inc(&smBufAllocCount);
42977 #ifdef CONFIG_CIFS_STATS2
42978 - atomic_inc(&totSmBufAllocCount);
42979 + atomic_inc_unchecked(&totSmBufAllocCount);
42980 #endif /* CONFIG_CIFS_STATS2 */
42981
42982 }
42983 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
42984 index 6901578..d402eb5 100644
42985 --- a/fs/coda/cache.c
42986 +++ b/fs/coda/cache.c
42987 @@ -24,7 +24,7 @@
42988 #include "coda_linux.h"
42989 #include "coda_cache.h"
42990
42991 -static atomic_t permission_epoch = ATOMIC_INIT(0);
42992 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
42993
42994 /* replace or extend an acl cache hit */
42995 void coda_cache_enter(struct inode *inode, int mask)
42996 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
42997 struct coda_inode_info *cii = ITOC(inode);
42998
42999 spin_lock(&cii->c_lock);
43000 - cii->c_cached_epoch = atomic_read(&permission_epoch);
43001 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43002 if (cii->c_uid != current_fsuid()) {
43003 cii->c_uid = current_fsuid();
43004 cii->c_cached_perm = mask;
43005 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43006 {
43007 struct coda_inode_info *cii = ITOC(inode);
43008 spin_lock(&cii->c_lock);
43009 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43010 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43011 spin_unlock(&cii->c_lock);
43012 }
43013
43014 /* remove all acl caches */
43015 void coda_cache_clear_all(struct super_block *sb)
43016 {
43017 - atomic_inc(&permission_epoch);
43018 + atomic_inc_unchecked(&permission_epoch);
43019 }
43020
43021
43022 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43023 spin_lock(&cii->c_lock);
43024 hit = (mask & cii->c_cached_perm) == mask &&
43025 cii->c_uid == current_fsuid() &&
43026 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43027 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43028 spin_unlock(&cii->c_lock);
43029
43030 return hit;
43031 diff --git a/fs/compat.c b/fs/compat.c
43032 index f2944ac..62845d2 100644
43033 --- a/fs/compat.c
43034 +++ b/fs/compat.c
43035 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43036
43037 set_fs(KERNEL_DS);
43038 /* The __user pointer cast is valid because of the set_fs() */
43039 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43040 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43041 set_fs(oldfs);
43042 /* truncating is ok because it's a user address */
43043 if (!ret)
43044 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43045 goto out;
43046
43047 ret = -EINVAL;
43048 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43049 + if (nr_segs > UIO_MAXIOV)
43050 goto out;
43051 if (nr_segs > fast_segs) {
43052 ret = -ENOMEM;
43053 @@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
43054
43055 struct compat_readdir_callback {
43056 struct compat_old_linux_dirent __user *dirent;
43057 + struct file * file;
43058 int result;
43059 };
43060
43061 @@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43062 buf->result = -EOVERFLOW;
43063 return -EOVERFLOW;
43064 }
43065 +
43066 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43067 + return 0;
43068 +
43069 buf->result++;
43070 dirent = buf->dirent;
43071 if (!access_ok(VERIFY_WRITE, dirent,
43072 @@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43073
43074 buf.result = 0;
43075 buf.dirent = dirent;
43076 + buf.file = file;
43077
43078 error = vfs_readdir(file, compat_fillonedir, &buf);
43079 if (buf.result)
43080 @@ -900,6 +906,7 @@ struct compat_linux_dirent {
43081 struct compat_getdents_callback {
43082 struct compat_linux_dirent __user *current_dir;
43083 struct compat_linux_dirent __user *previous;
43084 + struct file * file;
43085 int count;
43086 int error;
43087 };
43088 @@ -921,6 +928,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43089 buf->error = -EOVERFLOW;
43090 return -EOVERFLOW;
43091 }
43092 +
43093 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43094 + return 0;
43095 +
43096 dirent = buf->previous;
43097 if (dirent) {
43098 if (__put_user(offset, &dirent->d_off))
43099 @@ -968,6 +979,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43100 buf.previous = NULL;
43101 buf.count = count;
43102 buf.error = 0;
43103 + buf.file = file;
43104
43105 error = vfs_readdir(file, compat_filldir, &buf);
43106 if (error >= 0)
43107 @@ -989,6 +1001,7 @@ out:
43108 struct compat_getdents_callback64 {
43109 struct linux_dirent64 __user *current_dir;
43110 struct linux_dirent64 __user *previous;
43111 + struct file * file;
43112 int count;
43113 int error;
43114 };
43115 @@ -1005,6 +1018,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43116 buf->error = -EINVAL; /* only used if we fail.. */
43117 if (reclen > buf->count)
43118 return -EINVAL;
43119 +
43120 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43121 + return 0;
43122 +
43123 dirent = buf->previous;
43124
43125 if (dirent) {
43126 @@ -1056,13 +1073,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43127 buf.previous = NULL;
43128 buf.count = count;
43129 buf.error = 0;
43130 + buf.file = file;
43131
43132 error = vfs_readdir(file, compat_filldir64, &buf);
43133 if (error >= 0)
43134 error = buf.error;
43135 lastdirent = buf.previous;
43136 if (lastdirent) {
43137 - typeof(lastdirent->d_off) d_off = file->f_pos;
43138 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43139 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43140 error = -EFAULT;
43141 else
43142 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43143 index 112e45a..b59845b 100644
43144 --- a/fs/compat_binfmt_elf.c
43145 +++ b/fs/compat_binfmt_elf.c
43146 @@ -30,11 +30,13 @@
43147 #undef elf_phdr
43148 #undef elf_shdr
43149 #undef elf_note
43150 +#undef elf_dyn
43151 #undef elf_addr_t
43152 #define elfhdr elf32_hdr
43153 #define elf_phdr elf32_phdr
43154 #define elf_shdr elf32_shdr
43155 #define elf_note elf32_note
43156 +#define elf_dyn Elf32_Dyn
43157 #define elf_addr_t Elf32_Addr
43158
43159 /*
43160 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43161 index debdfe0..75d31d4 100644
43162 --- a/fs/compat_ioctl.c
43163 +++ b/fs/compat_ioctl.c
43164 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43165
43166 err = get_user(palp, &up->palette);
43167 err |= get_user(length, &up->length);
43168 + if (err)
43169 + return -EFAULT;
43170
43171 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43172 err = put_user(compat_ptr(palp), &up_native->palette);
43173 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43174 return -EFAULT;
43175 if (__get_user(udata, &ss32->iomem_base))
43176 return -EFAULT;
43177 - ss.iomem_base = compat_ptr(udata);
43178 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43179 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43180 __get_user(ss.port_high, &ss32->port_high))
43181 return -EFAULT;
43182 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43183 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43184 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43185 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43186 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43187 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43188 return -EFAULT;
43189
43190 return ioctl_preallocate(file, p);
43191 @@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43192 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43193 {
43194 unsigned int a, b;
43195 - a = *(unsigned int *)p;
43196 - b = *(unsigned int *)q;
43197 + a = *(const unsigned int *)p;
43198 + b = *(const unsigned int *)q;
43199 if (a > b)
43200 return 1;
43201 if (a < b)
43202 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43203 index 7e6c52d..94bc756 100644
43204 --- a/fs/configfs/dir.c
43205 +++ b/fs/configfs/dir.c
43206 @@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43207 }
43208 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43209 struct configfs_dirent *next;
43210 - const char * name;
43211 + const unsigned char * name;
43212 + char d_name[sizeof(next->s_dentry->d_iname)];
43213 int len;
43214 struct inode *inode = NULL;
43215
43216 @@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43217 continue;
43218
43219 name = configfs_get_name(next);
43220 - len = strlen(name);
43221 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43222 + len = next->s_dentry->d_name.len;
43223 + memcpy(d_name, name, len);
43224 + name = d_name;
43225 + } else
43226 + len = strlen(name);
43227
43228 /*
43229 * We'll have a dentry and an inode for
43230 diff --git a/fs/dcache.c b/fs/dcache.c
43231 index b80531c..8ca7e2d 100644
43232 --- a/fs/dcache.c
43233 +++ b/fs/dcache.c
43234 @@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned long mempages)
43235 mempages -= reserve;
43236
43237 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43238 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43239 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43240
43241 dcache_init();
43242 inode_init();
43243 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43244 index b80bc84..0d46d1a 100644
43245 --- a/fs/debugfs/inode.c
43246 +++ b/fs/debugfs/inode.c
43247 @@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43248 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43249 {
43250 return debugfs_create_file(name,
43251 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43252 + S_IFDIR | S_IRWXU,
43253 +#else
43254 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43255 +#endif
43256 parent, NULL, NULL);
43257 }
43258 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43259 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43260 index ab35b11..b30af66 100644
43261 --- a/fs/ecryptfs/inode.c
43262 +++ b/fs/ecryptfs/inode.c
43263 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43264 old_fs = get_fs();
43265 set_fs(get_ds());
43266 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43267 - (char __user *)lower_buf,
43268 + (char __force_user *)lower_buf,
43269 lower_bufsiz);
43270 set_fs(old_fs);
43271 if (rc < 0)
43272 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43273 }
43274 old_fs = get_fs();
43275 set_fs(get_ds());
43276 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43277 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43278 set_fs(old_fs);
43279 if (rc < 0) {
43280 kfree(buf);
43281 @@ -733,7 +733,7 @@ out:
43282 static void
43283 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43284 {
43285 - char *buf = nd_get_link(nd);
43286 + const char *buf = nd_get_link(nd);
43287 if (!IS_ERR(buf)) {
43288 /* Free the char* */
43289 kfree(buf);
43290 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43291 index 3a06f40..f7af544 100644
43292 --- a/fs/ecryptfs/miscdev.c
43293 +++ b/fs/ecryptfs/miscdev.c
43294 @@ -345,7 +345,7 @@ check_list:
43295 goto out_unlock_msg_ctx;
43296 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43297 if (msg_ctx->msg) {
43298 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43299 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43300 goto out_unlock_msg_ctx;
43301 i += packet_length_size;
43302 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43303 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43304 index b2a34a1..162fa69 100644
43305 --- a/fs/ecryptfs/read_write.c
43306 +++ b/fs/ecryptfs/read_write.c
43307 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43308 return -EIO;
43309 fs_save = get_fs();
43310 set_fs(get_ds());
43311 - rc = vfs_write(lower_file, data, size, &offset);
43312 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43313 set_fs(fs_save);
43314 mark_inode_dirty_sync(ecryptfs_inode);
43315 return rc;
43316 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43317 return -EIO;
43318 fs_save = get_fs();
43319 set_fs(get_ds());
43320 - rc = vfs_read(lower_file, data, size, &offset);
43321 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43322 set_fs(fs_save);
43323 return rc;
43324 }
43325 diff --git a/fs/exec.c b/fs/exec.c
43326 index b1fd202..9d037f3 100644
43327 --- a/fs/exec.c
43328 +++ b/fs/exec.c
43329 @@ -55,6 +55,13 @@
43330 #include <linux/pipe_fs_i.h>
43331 #include <linux/oom.h>
43332 #include <linux/compat.h>
43333 +#include <linux/random.h>
43334 +#include <linux/seq_file.h>
43335 +
43336 +#ifdef CONFIG_PAX_REFCOUNT
43337 +#include <linux/kallsyms.h>
43338 +#include <linux/kdebug.h>
43339 +#endif
43340
43341 #include <asm/uaccess.h>
43342 #include <asm/mmu_context.h>
43343 @@ -66,6 +73,15 @@
43344
43345 #include <trace/events/sched.h>
43346
43347 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
43348 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
43349 +#endif
43350 +
43351 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43352 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43353 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43354 +#endif
43355 +
43356 int core_uses_pid;
43357 char core_pattern[CORENAME_MAX_SIZE] = "core";
43358 unsigned int core_pipe_limit;
43359 @@ -75,7 +91,7 @@ struct core_name {
43360 char *corename;
43361 int used, size;
43362 };
43363 -static atomic_t call_count = ATOMIC_INIT(1);
43364 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43365
43366 /* The maximal length of core_pattern is also specified in sysctl.c */
43367
43368 @@ -191,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43369 int write)
43370 {
43371 struct page *page;
43372 - int ret;
43373
43374 -#ifdef CONFIG_STACK_GROWSUP
43375 - if (write) {
43376 - ret = expand_downwards(bprm->vma, pos);
43377 - if (ret < 0)
43378 - return NULL;
43379 - }
43380 -#endif
43381 - ret = get_user_pages(current, bprm->mm, pos,
43382 - 1, write, 1, &page, NULL);
43383 - if (ret <= 0)
43384 + if (0 > expand_downwards(bprm->vma, pos))
43385 + return NULL;
43386 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43387 return NULL;
43388
43389 if (write) {
43390 @@ -218,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43391 if (size <= ARG_MAX)
43392 return page;
43393
43394 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43395 + // only allow 512KB for argv+env on suid/sgid binaries
43396 + // to prevent easy ASLR exhaustion
43397 + if (((bprm->cred->euid != current_euid()) ||
43398 + (bprm->cred->egid != current_egid())) &&
43399 + (size > (512 * 1024))) {
43400 + put_page(page);
43401 + return NULL;
43402 + }
43403 +#endif
43404 +
43405 /*
43406 * Limit to 1/4-th the stack size for the argv+env strings.
43407 * This ensures that:
43408 @@ -277,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43409 vma->vm_end = STACK_TOP_MAX;
43410 vma->vm_start = vma->vm_end - PAGE_SIZE;
43411 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43412 +
43413 +#ifdef CONFIG_PAX_SEGMEXEC
43414 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43415 +#endif
43416 +
43417 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43418 INIT_LIST_HEAD(&vma->anon_vma_chain);
43419
43420 @@ -291,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43421 mm->stack_vm = mm->total_vm = 1;
43422 up_write(&mm->mmap_sem);
43423 bprm->p = vma->vm_end - sizeof(void *);
43424 +
43425 +#ifdef CONFIG_PAX_RANDUSTACK
43426 + if (randomize_va_space)
43427 + bprm->p ^= random32() & ~PAGE_MASK;
43428 +#endif
43429 +
43430 return 0;
43431 err:
43432 up_write(&mm->mmap_sem);
43433 @@ -399,19 +429,7 @@ err:
43434 return err;
43435 }
43436
43437 -struct user_arg_ptr {
43438 -#ifdef CONFIG_COMPAT
43439 - bool is_compat;
43440 -#endif
43441 - union {
43442 - const char __user *const __user *native;
43443 -#ifdef CONFIG_COMPAT
43444 - compat_uptr_t __user *compat;
43445 -#endif
43446 - } ptr;
43447 -};
43448 -
43449 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43450 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43451 {
43452 const char __user *native;
43453
43454 @@ -420,14 +438,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43455 compat_uptr_t compat;
43456
43457 if (get_user(compat, argv.ptr.compat + nr))
43458 - return ERR_PTR(-EFAULT);
43459 + return (const char __force_user *)ERR_PTR(-EFAULT);
43460
43461 return compat_ptr(compat);
43462 }
43463 #endif
43464
43465 if (get_user(native, argv.ptr.native + nr))
43466 - return ERR_PTR(-EFAULT);
43467 + return (const char __force_user *)ERR_PTR(-EFAULT);
43468
43469 return native;
43470 }
43471 @@ -446,7 +464,7 @@ static int count(struct user_arg_ptr argv, int max)
43472 if (!p)
43473 break;
43474
43475 - if (IS_ERR(p))
43476 + if (IS_ERR((const char __force_kernel *)p))
43477 return -EFAULT;
43478
43479 if (i++ >= max)
43480 @@ -480,7 +498,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43481
43482 ret = -EFAULT;
43483 str = get_user_arg_ptr(argv, argc);
43484 - if (IS_ERR(str))
43485 + if (IS_ERR((const char __force_kernel *)str))
43486 goto out;
43487
43488 len = strnlen_user(str, MAX_ARG_STRLEN);
43489 @@ -562,7 +580,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43490 int r;
43491 mm_segment_t oldfs = get_fs();
43492 struct user_arg_ptr argv = {
43493 - .ptr.native = (const char __user *const __user *)__argv,
43494 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43495 };
43496
43497 set_fs(KERNEL_DS);
43498 @@ -597,7 +615,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43499 unsigned long new_end = old_end - shift;
43500 struct mmu_gather tlb;
43501
43502 - BUG_ON(new_start > new_end);
43503 + if (new_start >= new_end || new_start < mmap_min_addr)
43504 + return -ENOMEM;
43505
43506 /*
43507 * ensure there are no vmas between where we want to go
43508 @@ -606,6 +625,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43509 if (vma != find_vma(mm, new_start))
43510 return -EFAULT;
43511
43512 +#ifdef CONFIG_PAX_SEGMEXEC
43513 + BUG_ON(pax_find_mirror_vma(vma));
43514 +#endif
43515 +
43516 /*
43517 * cover the whole range: [new_start, old_end)
43518 */
43519 @@ -686,10 +709,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43520 stack_top = arch_align_stack(stack_top);
43521 stack_top = PAGE_ALIGN(stack_top);
43522
43523 - if (unlikely(stack_top < mmap_min_addr) ||
43524 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43525 - return -ENOMEM;
43526 -
43527 stack_shift = vma->vm_end - stack_top;
43528
43529 bprm->p -= stack_shift;
43530 @@ -701,8 +720,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43531 bprm->exec -= stack_shift;
43532
43533 down_write(&mm->mmap_sem);
43534 +
43535 + /* Move stack pages down in memory. */
43536 + if (stack_shift) {
43537 + ret = shift_arg_pages(vma, stack_shift);
43538 + if (ret)
43539 + goto out_unlock;
43540 + }
43541 +
43542 vm_flags = VM_STACK_FLAGS;
43543
43544 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43545 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43546 + vm_flags &= ~VM_EXEC;
43547 +
43548 +#ifdef CONFIG_PAX_MPROTECT
43549 + if (mm->pax_flags & MF_PAX_MPROTECT)
43550 + vm_flags &= ~VM_MAYEXEC;
43551 +#endif
43552 +
43553 + }
43554 +#endif
43555 +
43556 /*
43557 * Adjust stack execute permissions; explicitly enable for
43558 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43559 @@ -721,13 +760,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43560 goto out_unlock;
43561 BUG_ON(prev != vma);
43562
43563 - /* Move stack pages down in memory. */
43564 - if (stack_shift) {
43565 - ret = shift_arg_pages(vma, stack_shift);
43566 - if (ret)
43567 - goto out_unlock;
43568 - }
43569 -
43570 /* mprotect_fixup is overkill to remove the temporary stack flags */
43571 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43572
43573 @@ -808,7 +840,7 @@ int kernel_read(struct file *file, loff_t offset,
43574 old_fs = get_fs();
43575 set_fs(get_ds());
43576 /* The cast to a user pointer is valid due to the set_fs() */
43577 - result = vfs_read(file, (void __user *)addr, count, &pos);
43578 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43579 set_fs(old_fs);
43580 return result;
43581 }
43582 @@ -1254,7 +1286,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43583 }
43584 rcu_read_unlock();
43585
43586 - if (p->fs->users > n_fs) {
43587 + if (atomic_read(&p->fs->users) > n_fs) {
43588 bprm->unsafe |= LSM_UNSAFE_SHARE;
43589 } else {
43590 res = -EAGAIN;
43591 @@ -1451,6 +1483,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43592
43593 EXPORT_SYMBOL(search_binary_handler);
43594
43595 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43596 +static DEFINE_PER_CPU(u64, exec_counter);
43597 +static int __init init_exec_counters(void)
43598 +{
43599 + unsigned int cpu;
43600 +
43601 + for_each_possible_cpu(cpu) {
43602 + per_cpu(exec_counter, cpu) = (u64)cpu;
43603 + }
43604 +
43605 + return 0;
43606 +}
43607 +early_initcall(init_exec_counters);
43608 +static inline void increment_exec_counter(void)
43609 +{
43610 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
43611 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43612 +}
43613 +#else
43614 +static inline void increment_exec_counter(void) {}
43615 +#endif
43616 +
43617 /*
43618 * sys_execve() executes a new program.
43619 */
43620 @@ -1459,6 +1513,11 @@ static int do_execve_common(const char *filename,
43621 struct user_arg_ptr envp,
43622 struct pt_regs *regs)
43623 {
43624 +#ifdef CONFIG_GRKERNSEC
43625 + struct file *old_exec_file;
43626 + struct acl_subject_label *old_acl;
43627 + struct rlimit old_rlim[RLIM_NLIMITS];
43628 +#endif
43629 struct linux_binprm *bprm;
43630 struct file *file;
43631 struct files_struct *displaced;
43632 @@ -1466,6 +1525,8 @@ static int do_execve_common(const char *filename,
43633 int retval;
43634 const struct cred *cred = current_cred();
43635
43636 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43637 +
43638 /*
43639 * We move the actual failure in case of RLIMIT_NPROC excess from
43640 * set*uid() to execve() because too many poorly written programs
43641 @@ -1506,12 +1567,27 @@ static int do_execve_common(const char *filename,
43642 if (IS_ERR(file))
43643 goto out_unmark;
43644
43645 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
43646 + retval = -EPERM;
43647 + goto out_file;
43648 + }
43649 +
43650 sched_exec();
43651
43652 bprm->file = file;
43653 bprm->filename = filename;
43654 bprm->interp = filename;
43655
43656 + if (gr_process_user_ban()) {
43657 + retval = -EPERM;
43658 + goto out_file;
43659 + }
43660 +
43661 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43662 + retval = -EACCES;
43663 + goto out_file;
43664 + }
43665 +
43666 retval = bprm_mm_init(bprm);
43667 if (retval)
43668 goto out_file;
43669 @@ -1528,24 +1604,65 @@ static int do_execve_common(const char *filename,
43670 if (retval < 0)
43671 goto out;
43672
43673 +#ifdef CONFIG_GRKERNSEC
43674 + old_acl = current->acl;
43675 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43676 + old_exec_file = current->exec_file;
43677 + get_file(file);
43678 + current->exec_file = file;
43679 +#endif
43680 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43681 + /* limit suid stack to 8MB
43682 + we saved the old limits above and will restore them if this exec fails
43683 + */
43684 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43685 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43686 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43687 +#endif
43688 +
43689 + if (!gr_tpe_allow(file)) {
43690 + retval = -EACCES;
43691 + goto out_fail;
43692 + }
43693 +
43694 + if (gr_check_crash_exec(file)) {
43695 + retval = -EACCES;
43696 + goto out_fail;
43697 + }
43698 +
43699 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43700 + bprm->unsafe);
43701 + if (retval < 0)
43702 + goto out_fail;
43703 +
43704 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43705 if (retval < 0)
43706 - goto out;
43707 + goto out_fail;
43708
43709 bprm->exec = bprm->p;
43710 retval = copy_strings(bprm->envc, envp, bprm);
43711 if (retval < 0)
43712 - goto out;
43713 + goto out_fail;
43714
43715 retval = copy_strings(bprm->argc, argv, bprm);
43716 if (retval < 0)
43717 - goto out;
43718 + goto out_fail;
43719 +
43720 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43721 +
43722 + gr_handle_exec_args(bprm, argv);
43723
43724 retval = search_binary_handler(bprm,regs);
43725 if (retval < 0)
43726 - goto out;
43727 + goto out_fail;
43728 +#ifdef CONFIG_GRKERNSEC
43729 + if (old_exec_file)
43730 + fput(old_exec_file);
43731 +#endif
43732
43733 /* execve succeeded */
43734 +
43735 + increment_exec_counter();
43736 current->fs->in_exec = 0;
43737 current->in_execve = 0;
43738 acct_update_integrals(current);
43739 @@ -1554,6 +1671,14 @@ static int do_execve_common(const char *filename,
43740 put_files_struct(displaced);
43741 return retval;
43742
43743 +out_fail:
43744 +#ifdef CONFIG_GRKERNSEC
43745 + current->acl = old_acl;
43746 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43747 + fput(current->exec_file);
43748 + current->exec_file = old_exec_file;
43749 +#endif
43750 +
43751 out:
43752 if (bprm->mm) {
43753 acct_arg_size(bprm, 0);
43754 @@ -1627,7 +1752,7 @@ static int expand_corename(struct core_name *cn)
43755 {
43756 char *old_corename = cn->corename;
43757
43758 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43759 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43760 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43761
43762 if (!cn->corename) {
43763 @@ -1724,7 +1849,7 @@ static int format_corename(struct core_name *cn, long signr)
43764 int pid_in_pattern = 0;
43765 int err = 0;
43766
43767 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43768 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43769 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43770 cn->used = 0;
43771
43772 @@ -1821,6 +1946,228 @@ out:
43773 return ispipe;
43774 }
43775
43776 +int pax_check_flags(unsigned long *flags)
43777 +{
43778 + int retval = 0;
43779 +
43780 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43781 + if (*flags & MF_PAX_SEGMEXEC)
43782 + {
43783 + *flags &= ~MF_PAX_SEGMEXEC;
43784 + retval = -EINVAL;
43785 + }
43786 +#endif
43787 +
43788 + if ((*flags & MF_PAX_PAGEEXEC)
43789 +
43790 +#ifdef CONFIG_PAX_PAGEEXEC
43791 + && (*flags & MF_PAX_SEGMEXEC)
43792 +#endif
43793 +
43794 + )
43795 + {
43796 + *flags &= ~MF_PAX_PAGEEXEC;
43797 + retval = -EINVAL;
43798 + }
43799 +
43800 + if ((*flags & MF_PAX_MPROTECT)
43801 +
43802 +#ifdef CONFIG_PAX_MPROTECT
43803 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43804 +#endif
43805 +
43806 + )
43807 + {
43808 + *flags &= ~MF_PAX_MPROTECT;
43809 + retval = -EINVAL;
43810 + }
43811 +
43812 + if ((*flags & MF_PAX_EMUTRAMP)
43813 +
43814 +#ifdef CONFIG_PAX_EMUTRAMP
43815 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43816 +#endif
43817 +
43818 + )
43819 + {
43820 + *flags &= ~MF_PAX_EMUTRAMP;
43821 + retval = -EINVAL;
43822 + }
43823 +
43824 + return retval;
43825 +}
43826 +
43827 +EXPORT_SYMBOL(pax_check_flags);
43828 +
43829 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43830 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43831 +{
43832 + struct task_struct *tsk = current;
43833 + struct mm_struct *mm = current->mm;
43834 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43835 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43836 + char *path_exec = NULL;
43837 + char *path_fault = NULL;
43838 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
43839 +
43840 + if (buffer_exec && buffer_fault) {
43841 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43842 +
43843 + down_read(&mm->mmap_sem);
43844 + vma = mm->mmap;
43845 + while (vma && (!vma_exec || !vma_fault)) {
43846 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43847 + vma_exec = vma;
43848 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43849 + vma_fault = vma;
43850 + vma = vma->vm_next;
43851 + }
43852 + if (vma_exec) {
43853 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43854 + if (IS_ERR(path_exec))
43855 + path_exec = "<path too long>";
43856 + else {
43857 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43858 + if (path_exec) {
43859 + *path_exec = 0;
43860 + path_exec = buffer_exec;
43861 + } else
43862 + path_exec = "<path too long>";
43863 + }
43864 + }
43865 + if (vma_fault) {
43866 + start = vma_fault->vm_start;
43867 + end = vma_fault->vm_end;
43868 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43869 + if (vma_fault->vm_file) {
43870 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43871 + if (IS_ERR(path_fault))
43872 + path_fault = "<path too long>";
43873 + else {
43874 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43875 + if (path_fault) {
43876 + *path_fault = 0;
43877 + path_fault = buffer_fault;
43878 + } else
43879 + path_fault = "<path too long>";
43880 + }
43881 + } else
43882 + path_fault = "<anonymous mapping>";
43883 + }
43884 + up_read(&mm->mmap_sem);
43885 + }
43886 + if (tsk->signal->curr_ip)
43887 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43888 + else
43889 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43890 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43891 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43892 + task_uid(tsk), task_euid(tsk), pc, sp);
43893 + free_page((unsigned long)buffer_exec);
43894 + free_page((unsigned long)buffer_fault);
43895 + pax_report_insns(regs, pc, sp);
43896 + do_coredump(SIGKILL, SIGKILL, regs);
43897 +}
43898 +#endif
43899 +
43900 +#ifdef CONFIG_PAX_REFCOUNT
43901 +void pax_report_refcount_overflow(struct pt_regs *regs)
43902 +{
43903 + if (current->signal->curr_ip)
43904 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43905 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43906 + else
43907 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43908 + current->comm, task_pid_nr(current), current_uid(), current_euid());
43909 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43910 + show_regs(regs);
43911 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43912 +}
43913 +#endif
43914 +
43915 +#ifdef CONFIG_PAX_USERCOPY
43916 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43917 +int object_is_on_stack(const void *obj, unsigned long len)
43918 +{
43919 + const void * const stack = task_stack_page(current);
43920 + const void * const stackend = stack + THREAD_SIZE;
43921 +
43922 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43923 + const void *frame = NULL;
43924 + const void *oldframe;
43925 +#endif
43926 +
43927 + if (obj + len < obj)
43928 + return -1;
43929 +
43930 + if (obj + len <= stack || stackend <= obj)
43931 + return 0;
43932 +
43933 + if (obj < stack || stackend < obj + len)
43934 + return -1;
43935 +
43936 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43937 + oldframe = __builtin_frame_address(1);
43938 + if (oldframe)
43939 + frame = __builtin_frame_address(2);
43940 + /*
43941 + low ----------------------------------------------> high
43942 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
43943 + ^----------------^
43944 + allow copies only within here
43945 + */
43946 + while (stack <= frame && frame < stackend) {
43947 + /* if obj + len extends past the last frame, this
43948 + check won't pass and the next frame will be 0,
43949 + causing us to bail out and correctly report
43950 + the copy as invalid
43951 + */
43952 + if (obj + len <= frame)
43953 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
43954 + oldframe = frame;
43955 + frame = *(const void * const *)frame;
43956 + }
43957 + return -1;
43958 +#else
43959 + return 1;
43960 +#endif
43961 +}
43962 +
43963 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
43964 +{
43965 + if (current->signal->curr_ip)
43966 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43967 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43968 + else
43969 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43970 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43971 + dump_stack();
43972 + gr_handle_kernel_exploit();
43973 + do_group_exit(SIGKILL);
43974 +}
43975 +#endif
43976 +
43977 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
43978 +void pax_track_stack(void)
43979 +{
43980 + unsigned long sp = (unsigned long)&sp;
43981 + if (sp < current_thread_info()->lowest_stack &&
43982 + sp > (unsigned long)task_stack_page(current))
43983 + current_thread_info()->lowest_stack = sp;
43984 +}
43985 +EXPORT_SYMBOL(pax_track_stack);
43986 +#endif
43987 +
43988 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
43989 +void report_size_overflow(const char *file, unsigned int line, const char *func)
43990 +{
43991 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
43992 + dump_stack();
43993 + do_group_exit(SIGKILL);
43994 +}
43995 +EXPORT_SYMBOL(report_size_overflow);
43996 +#endif
43997 +
43998 static int zap_process(struct task_struct *start, int exit_code)
43999 {
44000 struct task_struct *t;
44001 @@ -2018,17 +2365,17 @@ static void wait_for_dump_helpers(struct file *file)
44002 pipe = file->f_path.dentry->d_inode->i_pipe;
44003
44004 pipe_lock(pipe);
44005 - pipe->readers++;
44006 - pipe->writers--;
44007 + atomic_inc(&pipe->readers);
44008 + atomic_dec(&pipe->writers);
44009
44010 - while ((pipe->readers > 1) && (!signal_pending(current))) {
44011 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44012 wake_up_interruptible_sync(&pipe->wait);
44013 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44014 pipe_wait(pipe);
44015 }
44016
44017 - pipe->readers--;
44018 - pipe->writers++;
44019 + atomic_dec(&pipe->readers);
44020 + atomic_inc(&pipe->writers);
44021 pipe_unlock(pipe);
44022
44023 }
44024 @@ -2089,7 +2436,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44025 int retval = 0;
44026 int flag = 0;
44027 int ispipe;
44028 - static atomic_t core_dump_count = ATOMIC_INIT(0);
44029 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44030 struct coredump_params cprm = {
44031 .signr = signr,
44032 .regs = regs,
44033 @@ -2104,6 +2451,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44034
44035 audit_core_dumps(signr);
44036
44037 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44038 + gr_handle_brute_attach(current, cprm.mm_flags);
44039 +
44040 binfmt = mm->binfmt;
44041 if (!binfmt || !binfmt->core_dump)
44042 goto fail;
44043 @@ -2171,7 +2521,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44044 }
44045 cprm.limit = RLIM_INFINITY;
44046
44047 - dump_count = atomic_inc_return(&core_dump_count);
44048 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
44049 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44050 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44051 task_tgid_vnr(current), current->comm);
44052 @@ -2198,6 +2548,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44053 } else {
44054 struct inode *inode;
44055
44056 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44057 +
44058 if (cprm.limit < binfmt->min_coredump)
44059 goto fail_unlock;
44060
44061 @@ -2241,7 +2593,7 @@ close_fail:
44062 filp_close(cprm.file, NULL);
44063 fail_dropcount:
44064 if (ispipe)
44065 - atomic_dec(&core_dump_count);
44066 + atomic_dec_unchecked(&core_dump_count);
44067 fail_unlock:
44068 kfree(cn.corename);
44069 fail_corename:
44070 @@ -2260,7 +2612,7 @@ fail:
44071 */
44072 int dump_write(struct file *file, const void *addr, int nr)
44073 {
44074 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44075 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44076 }
44077 EXPORT_SYMBOL(dump_write);
44078
44079 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44080 index a8cbe1b..fed04cb 100644
44081 --- a/fs/ext2/balloc.c
44082 +++ b/fs/ext2/balloc.c
44083 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44084
44085 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44086 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44087 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44088 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44089 sbi->s_resuid != current_fsuid() &&
44090 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44091 return 0;
44092 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44093 index baac1b1..1499b62 100644
44094 --- a/fs/ext3/balloc.c
44095 +++ b/fs/ext3/balloc.c
44096 @@ -1438,9 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44097
44098 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44099 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44100 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44101 + if (free_blocks < root_blocks + 1 &&
44102 !use_reservation && sbi->s_resuid != current_fsuid() &&
44103 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44104 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44105 + !capable_nolog(CAP_SYS_RESOURCE)) {
44106 return 0;
44107 }
44108 return 1;
44109 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44110 index 4bbd07a..a37bee6 100644
44111 --- a/fs/ext4/balloc.c
44112 +++ b/fs/ext4/balloc.c
44113 @@ -463,8 +463,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44114 /* Hm, nope. Are (enough) root reserved clusters available? */
44115 if (sbi->s_resuid == current_fsuid() ||
44116 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44117 - capable(CAP_SYS_RESOURCE) ||
44118 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44119 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44120 + capable_nolog(CAP_SYS_RESOURCE)) {
44121
44122 if (free_clusters >= (nclusters + dirty_clusters))
44123 return 1;
44124 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44125 index 0e01e90..ae2bd5e 100644
44126 --- a/fs/ext4/ext4.h
44127 +++ b/fs/ext4/ext4.h
44128 @@ -1225,19 +1225,19 @@ struct ext4_sb_info {
44129 unsigned long s_mb_last_start;
44130
44131 /* stats for buddy allocator */
44132 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44133 - atomic_t s_bal_success; /* we found long enough chunks */
44134 - atomic_t s_bal_allocated; /* in blocks */
44135 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44136 - atomic_t s_bal_goals; /* goal hits */
44137 - atomic_t s_bal_breaks; /* too long searches */
44138 - atomic_t s_bal_2orders; /* 2^order hits */
44139 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44140 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44141 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44142 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44143 + atomic_unchecked_t s_bal_goals; /* goal hits */
44144 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44145 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44146 spinlock_t s_bal_lock;
44147 unsigned long s_mb_buddies_generated;
44148 unsigned long long s_mb_generation_time;
44149 - atomic_t s_mb_lost_chunks;
44150 - atomic_t s_mb_preallocated;
44151 - atomic_t s_mb_discarded;
44152 + atomic_unchecked_t s_mb_lost_chunks;
44153 + atomic_unchecked_t s_mb_preallocated;
44154 + atomic_unchecked_t s_mb_discarded;
44155 atomic_t s_lock_busy;
44156
44157 /* locality groups */
44158 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44159 index 99ab428..7b65ba1 100644
44160 --- a/fs/ext4/mballoc.c
44161 +++ b/fs/ext4/mballoc.c
44162 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44163 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44164
44165 if (EXT4_SB(sb)->s_mb_stats)
44166 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44167 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44168
44169 break;
44170 }
44171 @@ -2041,7 +2041,7 @@ repeat:
44172 ac->ac_status = AC_STATUS_CONTINUE;
44173 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44174 cr = 3;
44175 - atomic_inc(&sbi->s_mb_lost_chunks);
44176 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44177 goto repeat;
44178 }
44179 }
44180 @@ -2542,25 +2542,25 @@ int ext4_mb_release(struct super_block *sb)
44181 if (sbi->s_mb_stats) {
44182 ext4_msg(sb, KERN_INFO,
44183 "mballoc: %u blocks %u reqs (%u success)",
44184 - atomic_read(&sbi->s_bal_allocated),
44185 - atomic_read(&sbi->s_bal_reqs),
44186 - atomic_read(&sbi->s_bal_success));
44187 + atomic_read_unchecked(&sbi->s_bal_allocated),
44188 + atomic_read_unchecked(&sbi->s_bal_reqs),
44189 + atomic_read_unchecked(&sbi->s_bal_success));
44190 ext4_msg(sb, KERN_INFO,
44191 "mballoc: %u extents scanned, %u goal hits, "
44192 "%u 2^N hits, %u breaks, %u lost",
44193 - atomic_read(&sbi->s_bal_ex_scanned),
44194 - atomic_read(&sbi->s_bal_goals),
44195 - atomic_read(&sbi->s_bal_2orders),
44196 - atomic_read(&sbi->s_bal_breaks),
44197 - atomic_read(&sbi->s_mb_lost_chunks));
44198 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44199 + atomic_read_unchecked(&sbi->s_bal_goals),
44200 + atomic_read_unchecked(&sbi->s_bal_2orders),
44201 + atomic_read_unchecked(&sbi->s_bal_breaks),
44202 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44203 ext4_msg(sb, KERN_INFO,
44204 "mballoc: %lu generated and it took %Lu",
44205 sbi->s_mb_buddies_generated,
44206 sbi->s_mb_generation_time);
44207 ext4_msg(sb, KERN_INFO,
44208 "mballoc: %u preallocated, %u discarded",
44209 - atomic_read(&sbi->s_mb_preallocated),
44210 - atomic_read(&sbi->s_mb_discarded));
44211 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44212 + atomic_read_unchecked(&sbi->s_mb_discarded));
44213 }
44214
44215 free_percpu(sbi->s_locality_groups);
44216 @@ -3044,16 +3044,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44217 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44218
44219 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44220 - atomic_inc(&sbi->s_bal_reqs);
44221 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44222 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44223 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44224 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44225 - atomic_inc(&sbi->s_bal_success);
44226 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44227 + atomic_inc_unchecked(&sbi->s_bal_success);
44228 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44229 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44230 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44231 - atomic_inc(&sbi->s_bal_goals);
44232 + atomic_inc_unchecked(&sbi->s_bal_goals);
44233 if (ac->ac_found > sbi->s_mb_max_to_scan)
44234 - atomic_inc(&sbi->s_bal_breaks);
44235 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44236 }
44237
44238 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44239 @@ -3457,7 +3457,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44240 trace_ext4_mb_new_inode_pa(ac, pa);
44241
44242 ext4_mb_use_inode_pa(ac, pa);
44243 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44244 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44245
44246 ei = EXT4_I(ac->ac_inode);
44247 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44248 @@ -3517,7 +3517,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44249 trace_ext4_mb_new_group_pa(ac, pa);
44250
44251 ext4_mb_use_group_pa(ac, pa);
44252 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44253 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44254
44255 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44256 lg = ac->ac_lg;
44257 @@ -3606,7 +3606,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44258 * from the bitmap and continue.
44259 */
44260 }
44261 - atomic_add(free, &sbi->s_mb_discarded);
44262 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44263
44264 return err;
44265 }
44266 @@ -3624,7 +3624,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44267 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44268 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44269 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44270 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44271 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44272 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44273
44274 return 0;
44275 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
44276 index 59fa0be..53589ff 100644
44277 --- a/fs/ext4/resize.c
44278 +++ b/fs/ext4/resize.c
44279 @@ -161,6 +161,8 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
44280 if (flex_gd == NULL)
44281 goto out3;
44282
44283 + if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
44284 + goto out2;
44285 flex_gd->count = flexbg_size;
44286
44287 flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
44288 diff --git a/fs/fcntl.c b/fs/fcntl.c
44289 index 75e7c1f..1eb3e4d 100644
44290 --- a/fs/fcntl.c
44291 +++ b/fs/fcntl.c
44292 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44293 if (err)
44294 return err;
44295
44296 + if (gr_handle_chroot_fowner(pid, type))
44297 + return -ENOENT;
44298 + if (gr_check_protected_task_fowner(pid, type))
44299 + return -EACCES;
44300 +
44301 f_modown(filp, pid, type, force);
44302 return 0;
44303 }
44304 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44305
44306 static int f_setown_ex(struct file *filp, unsigned long arg)
44307 {
44308 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44309 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44310 struct f_owner_ex owner;
44311 struct pid *pid;
44312 int type;
44313 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44314
44315 static int f_getown_ex(struct file *filp, unsigned long arg)
44316 {
44317 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44318 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44319 struct f_owner_ex owner;
44320 int ret = 0;
44321
44322 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44323 switch (cmd) {
44324 case F_DUPFD:
44325 case F_DUPFD_CLOEXEC:
44326 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44327 if (arg >= rlimit(RLIMIT_NOFILE))
44328 break;
44329 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44330 diff --git a/fs/fifo.c b/fs/fifo.c
44331 index b1a524d..4ee270e 100644
44332 --- a/fs/fifo.c
44333 +++ b/fs/fifo.c
44334 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44335 */
44336 filp->f_op = &read_pipefifo_fops;
44337 pipe->r_counter++;
44338 - if (pipe->readers++ == 0)
44339 + if (atomic_inc_return(&pipe->readers) == 1)
44340 wake_up_partner(inode);
44341
44342 - if (!pipe->writers) {
44343 + if (!atomic_read(&pipe->writers)) {
44344 if ((filp->f_flags & O_NONBLOCK)) {
44345 /* suppress POLLHUP until we have
44346 * seen a writer */
44347 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44348 * errno=ENXIO when there is no process reading the FIFO.
44349 */
44350 ret = -ENXIO;
44351 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44352 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44353 goto err;
44354
44355 filp->f_op = &write_pipefifo_fops;
44356 pipe->w_counter++;
44357 - if (!pipe->writers++)
44358 + if (atomic_inc_return(&pipe->writers) == 1)
44359 wake_up_partner(inode);
44360
44361 - if (!pipe->readers) {
44362 + if (!atomic_read(&pipe->readers)) {
44363 wait_for_partner(inode, &pipe->r_counter);
44364 if (signal_pending(current))
44365 goto err_wr;
44366 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44367 */
44368 filp->f_op = &rdwr_pipefifo_fops;
44369
44370 - pipe->readers++;
44371 - pipe->writers++;
44372 + atomic_inc(&pipe->readers);
44373 + atomic_inc(&pipe->writers);
44374 pipe->r_counter++;
44375 pipe->w_counter++;
44376 - if (pipe->readers == 1 || pipe->writers == 1)
44377 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44378 wake_up_partner(inode);
44379 break;
44380
44381 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44382 return 0;
44383
44384 err_rd:
44385 - if (!--pipe->readers)
44386 + if (atomic_dec_and_test(&pipe->readers))
44387 wake_up_interruptible(&pipe->wait);
44388 ret = -ERESTARTSYS;
44389 goto err;
44390
44391 err_wr:
44392 - if (!--pipe->writers)
44393 + if (atomic_dec_and_test(&pipe->writers))
44394 wake_up_interruptible(&pipe->wait);
44395 ret = -ERESTARTSYS;
44396 goto err;
44397
44398 err:
44399 - if (!pipe->readers && !pipe->writers)
44400 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44401 free_pipe_info(inode);
44402
44403 err_nocleanup:
44404 diff --git a/fs/file.c b/fs/file.c
44405 index ba3f605..fade102 100644
44406 --- a/fs/file.c
44407 +++ b/fs/file.c
44408 @@ -15,6 +15,7 @@
44409 #include <linux/slab.h>
44410 #include <linux/vmalloc.h>
44411 #include <linux/file.h>
44412 +#include <linux/security.h>
44413 #include <linux/fdtable.h>
44414 #include <linux/bitops.h>
44415 #include <linux/interrupt.h>
44416 @@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
44417 * N.B. For clone tasks sharing a files structure, this test
44418 * will limit the total number of files that can be opened.
44419 */
44420 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44421 if (nr >= rlimit(RLIMIT_NOFILE))
44422 return -EMFILE;
44423
44424 diff --git a/fs/filesystems.c b/fs/filesystems.c
44425 index 96f2428..f5eeb8e 100644
44426 --- a/fs/filesystems.c
44427 +++ b/fs/filesystems.c
44428 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44429 int len = dot ? dot - name : strlen(name);
44430
44431 fs = __get_fs_type(name, len);
44432 +
44433 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44434 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44435 +#else
44436 if (!fs && (request_module("%.*s", len, name) == 0))
44437 +#endif
44438 fs = __get_fs_type(name, len);
44439
44440 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44441 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44442 index e159e68..e7d2a6f 100644
44443 --- a/fs/fs_struct.c
44444 +++ b/fs/fs_struct.c
44445 @@ -4,6 +4,7 @@
44446 #include <linux/path.h>
44447 #include <linux/slab.h>
44448 #include <linux/fs_struct.h>
44449 +#include <linux/grsecurity.h>
44450 #include "internal.h"
44451
44452 static inline void path_get_longterm(struct path *path)
44453 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44454 write_seqcount_begin(&fs->seq);
44455 old_root = fs->root;
44456 fs->root = *path;
44457 + gr_set_chroot_entries(current, path);
44458 write_seqcount_end(&fs->seq);
44459 spin_unlock(&fs->lock);
44460 if (old_root.dentry)
44461 @@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
44462 return 1;
44463 }
44464
44465 +static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
44466 +{
44467 + if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
44468 + return 0;
44469 + *p = *new;
44470 +
44471 + gr_set_chroot_entries(task, new);
44472 +
44473 + return 1;
44474 +}
44475 +
44476 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44477 {
44478 struct task_struct *g, *p;
44479 @@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44480 int hits = 0;
44481 spin_lock(&fs->lock);
44482 write_seqcount_begin(&fs->seq);
44483 - hits += replace_path(&fs->root, old_root, new_root);
44484 + hits += replace_root_path(p, &fs->root, old_root, new_root);
44485 hits += replace_path(&fs->pwd, old_root, new_root);
44486 write_seqcount_end(&fs->seq);
44487 while (hits--) {
44488 @@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
44489 task_lock(tsk);
44490 spin_lock(&fs->lock);
44491 tsk->fs = NULL;
44492 - kill = !--fs->users;
44493 + gr_clear_chroot_entries(tsk);
44494 + kill = !atomic_dec_return(&fs->users);
44495 spin_unlock(&fs->lock);
44496 task_unlock(tsk);
44497 if (kill)
44498 @@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44499 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44500 /* We don't need to lock fs - think why ;-) */
44501 if (fs) {
44502 - fs->users = 1;
44503 + atomic_set(&fs->users, 1);
44504 fs->in_exec = 0;
44505 spin_lock_init(&fs->lock);
44506 seqcount_init(&fs->seq);
44507 @@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44508 spin_lock(&old->lock);
44509 fs->root = old->root;
44510 path_get_longterm(&fs->root);
44511 + /* instead of calling gr_set_chroot_entries here,
44512 + we call it from every caller of this function
44513 + */
44514 fs->pwd = old->pwd;
44515 path_get_longterm(&fs->pwd);
44516 spin_unlock(&old->lock);
44517 @@ -151,8 +168,9 @@ int unshare_fs_struct(void)
44518
44519 task_lock(current);
44520 spin_lock(&fs->lock);
44521 - kill = !--fs->users;
44522 + kill = !atomic_dec_return(&fs->users);
44523 current->fs = new_fs;
44524 + gr_set_chroot_entries(current, &new_fs->root);
44525 spin_unlock(&fs->lock);
44526 task_unlock(current);
44527
44528 @@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44529
44530 int current_umask(void)
44531 {
44532 - return current->fs->umask;
44533 + return current->fs->umask | gr_acl_umask();
44534 }
44535 EXPORT_SYMBOL(current_umask);
44536
44537 /* to be mentioned only in INIT_TASK */
44538 struct fs_struct init_fs = {
44539 - .users = 1,
44540 + .users = ATOMIC_INIT(1),
44541 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44542 .seq = SEQCNT_ZERO,
44543 .umask = 0022,
44544 @@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
44545 task_lock(current);
44546
44547 spin_lock(&init_fs.lock);
44548 - init_fs.users++;
44549 + atomic_inc(&init_fs.users);
44550 spin_unlock(&init_fs.lock);
44551
44552 spin_lock(&fs->lock);
44553 current->fs = &init_fs;
44554 - kill = !--fs->users;
44555 + gr_set_chroot_entries(current, &current->fs->root);
44556 + kill = !atomic_dec_return(&fs->users);
44557 spin_unlock(&fs->lock);
44558
44559 task_unlock(current);
44560 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44561 index 9905350..02eaec4 100644
44562 --- a/fs/fscache/cookie.c
44563 +++ b/fs/fscache/cookie.c
44564 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44565 parent ? (char *) parent->def->name : "<no-parent>",
44566 def->name, netfs_data);
44567
44568 - fscache_stat(&fscache_n_acquires);
44569 + fscache_stat_unchecked(&fscache_n_acquires);
44570
44571 /* if there's no parent cookie, then we don't create one here either */
44572 if (!parent) {
44573 - fscache_stat(&fscache_n_acquires_null);
44574 + fscache_stat_unchecked(&fscache_n_acquires_null);
44575 _leave(" [no parent]");
44576 return NULL;
44577 }
44578 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44579 /* allocate and initialise a cookie */
44580 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44581 if (!cookie) {
44582 - fscache_stat(&fscache_n_acquires_oom);
44583 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44584 _leave(" [ENOMEM]");
44585 return NULL;
44586 }
44587 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44588
44589 switch (cookie->def->type) {
44590 case FSCACHE_COOKIE_TYPE_INDEX:
44591 - fscache_stat(&fscache_n_cookie_index);
44592 + fscache_stat_unchecked(&fscache_n_cookie_index);
44593 break;
44594 case FSCACHE_COOKIE_TYPE_DATAFILE:
44595 - fscache_stat(&fscache_n_cookie_data);
44596 + fscache_stat_unchecked(&fscache_n_cookie_data);
44597 break;
44598 default:
44599 - fscache_stat(&fscache_n_cookie_special);
44600 + fscache_stat_unchecked(&fscache_n_cookie_special);
44601 break;
44602 }
44603
44604 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44605 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44606 atomic_dec(&parent->n_children);
44607 __fscache_cookie_put(cookie);
44608 - fscache_stat(&fscache_n_acquires_nobufs);
44609 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44610 _leave(" = NULL");
44611 return NULL;
44612 }
44613 }
44614
44615 - fscache_stat(&fscache_n_acquires_ok);
44616 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44617 _leave(" = %p", cookie);
44618 return cookie;
44619 }
44620 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44621 cache = fscache_select_cache_for_object(cookie->parent);
44622 if (!cache) {
44623 up_read(&fscache_addremove_sem);
44624 - fscache_stat(&fscache_n_acquires_no_cache);
44625 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44626 _leave(" = -ENOMEDIUM [no cache]");
44627 return -ENOMEDIUM;
44628 }
44629 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44630 object = cache->ops->alloc_object(cache, cookie);
44631 fscache_stat_d(&fscache_n_cop_alloc_object);
44632 if (IS_ERR(object)) {
44633 - fscache_stat(&fscache_n_object_no_alloc);
44634 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44635 ret = PTR_ERR(object);
44636 goto error;
44637 }
44638
44639 - fscache_stat(&fscache_n_object_alloc);
44640 + fscache_stat_unchecked(&fscache_n_object_alloc);
44641
44642 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44643
44644 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44645 struct fscache_object *object;
44646 struct hlist_node *_p;
44647
44648 - fscache_stat(&fscache_n_updates);
44649 + fscache_stat_unchecked(&fscache_n_updates);
44650
44651 if (!cookie) {
44652 - fscache_stat(&fscache_n_updates_null);
44653 + fscache_stat_unchecked(&fscache_n_updates_null);
44654 _leave(" [no cookie]");
44655 return;
44656 }
44657 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44658 struct fscache_object *object;
44659 unsigned long event;
44660
44661 - fscache_stat(&fscache_n_relinquishes);
44662 + fscache_stat_unchecked(&fscache_n_relinquishes);
44663 if (retire)
44664 - fscache_stat(&fscache_n_relinquishes_retire);
44665 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44666
44667 if (!cookie) {
44668 - fscache_stat(&fscache_n_relinquishes_null);
44669 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44670 _leave(" [no cookie]");
44671 return;
44672 }
44673 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44674
44675 /* wait for the cookie to finish being instantiated (or to fail) */
44676 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44677 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44678 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44679 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44680 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44681 }
44682 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44683 index f6aad48..88dcf26 100644
44684 --- a/fs/fscache/internal.h
44685 +++ b/fs/fscache/internal.h
44686 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44687 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44688 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44689
44690 -extern atomic_t fscache_n_op_pend;
44691 -extern atomic_t fscache_n_op_run;
44692 -extern atomic_t fscache_n_op_enqueue;
44693 -extern atomic_t fscache_n_op_deferred_release;
44694 -extern atomic_t fscache_n_op_release;
44695 -extern atomic_t fscache_n_op_gc;
44696 -extern atomic_t fscache_n_op_cancelled;
44697 -extern atomic_t fscache_n_op_rejected;
44698 +extern atomic_unchecked_t fscache_n_op_pend;
44699 +extern atomic_unchecked_t fscache_n_op_run;
44700 +extern atomic_unchecked_t fscache_n_op_enqueue;
44701 +extern atomic_unchecked_t fscache_n_op_deferred_release;
44702 +extern atomic_unchecked_t fscache_n_op_release;
44703 +extern atomic_unchecked_t fscache_n_op_gc;
44704 +extern atomic_unchecked_t fscache_n_op_cancelled;
44705 +extern atomic_unchecked_t fscache_n_op_rejected;
44706
44707 -extern atomic_t fscache_n_attr_changed;
44708 -extern atomic_t fscache_n_attr_changed_ok;
44709 -extern atomic_t fscache_n_attr_changed_nobufs;
44710 -extern atomic_t fscache_n_attr_changed_nomem;
44711 -extern atomic_t fscache_n_attr_changed_calls;
44712 +extern atomic_unchecked_t fscache_n_attr_changed;
44713 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
44714 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44715 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44716 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
44717
44718 -extern atomic_t fscache_n_allocs;
44719 -extern atomic_t fscache_n_allocs_ok;
44720 -extern atomic_t fscache_n_allocs_wait;
44721 -extern atomic_t fscache_n_allocs_nobufs;
44722 -extern atomic_t fscache_n_allocs_intr;
44723 -extern atomic_t fscache_n_allocs_object_dead;
44724 -extern atomic_t fscache_n_alloc_ops;
44725 -extern atomic_t fscache_n_alloc_op_waits;
44726 +extern atomic_unchecked_t fscache_n_allocs;
44727 +extern atomic_unchecked_t fscache_n_allocs_ok;
44728 +extern atomic_unchecked_t fscache_n_allocs_wait;
44729 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
44730 +extern atomic_unchecked_t fscache_n_allocs_intr;
44731 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
44732 +extern atomic_unchecked_t fscache_n_alloc_ops;
44733 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
44734
44735 -extern atomic_t fscache_n_retrievals;
44736 -extern atomic_t fscache_n_retrievals_ok;
44737 -extern atomic_t fscache_n_retrievals_wait;
44738 -extern atomic_t fscache_n_retrievals_nodata;
44739 -extern atomic_t fscache_n_retrievals_nobufs;
44740 -extern atomic_t fscache_n_retrievals_intr;
44741 -extern atomic_t fscache_n_retrievals_nomem;
44742 -extern atomic_t fscache_n_retrievals_object_dead;
44743 -extern atomic_t fscache_n_retrieval_ops;
44744 -extern atomic_t fscache_n_retrieval_op_waits;
44745 +extern atomic_unchecked_t fscache_n_retrievals;
44746 +extern atomic_unchecked_t fscache_n_retrievals_ok;
44747 +extern atomic_unchecked_t fscache_n_retrievals_wait;
44748 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
44749 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44750 +extern atomic_unchecked_t fscache_n_retrievals_intr;
44751 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
44752 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44753 +extern atomic_unchecked_t fscache_n_retrieval_ops;
44754 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44755
44756 -extern atomic_t fscache_n_stores;
44757 -extern atomic_t fscache_n_stores_ok;
44758 -extern atomic_t fscache_n_stores_again;
44759 -extern atomic_t fscache_n_stores_nobufs;
44760 -extern atomic_t fscache_n_stores_oom;
44761 -extern atomic_t fscache_n_store_ops;
44762 -extern atomic_t fscache_n_store_calls;
44763 -extern atomic_t fscache_n_store_pages;
44764 -extern atomic_t fscache_n_store_radix_deletes;
44765 -extern atomic_t fscache_n_store_pages_over_limit;
44766 +extern atomic_unchecked_t fscache_n_stores;
44767 +extern atomic_unchecked_t fscache_n_stores_ok;
44768 +extern atomic_unchecked_t fscache_n_stores_again;
44769 +extern atomic_unchecked_t fscache_n_stores_nobufs;
44770 +extern atomic_unchecked_t fscache_n_stores_oom;
44771 +extern atomic_unchecked_t fscache_n_store_ops;
44772 +extern atomic_unchecked_t fscache_n_store_calls;
44773 +extern atomic_unchecked_t fscache_n_store_pages;
44774 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
44775 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44776
44777 -extern atomic_t fscache_n_store_vmscan_not_storing;
44778 -extern atomic_t fscache_n_store_vmscan_gone;
44779 -extern atomic_t fscache_n_store_vmscan_busy;
44780 -extern atomic_t fscache_n_store_vmscan_cancelled;
44781 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44782 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44783 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44784 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44785
44786 -extern atomic_t fscache_n_marks;
44787 -extern atomic_t fscache_n_uncaches;
44788 +extern atomic_unchecked_t fscache_n_marks;
44789 +extern atomic_unchecked_t fscache_n_uncaches;
44790
44791 -extern atomic_t fscache_n_acquires;
44792 -extern atomic_t fscache_n_acquires_null;
44793 -extern atomic_t fscache_n_acquires_no_cache;
44794 -extern atomic_t fscache_n_acquires_ok;
44795 -extern atomic_t fscache_n_acquires_nobufs;
44796 -extern atomic_t fscache_n_acquires_oom;
44797 +extern atomic_unchecked_t fscache_n_acquires;
44798 +extern atomic_unchecked_t fscache_n_acquires_null;
44799 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
44800 +extern atomic_unchecked_t fscache_n_acquires_ok;
44801 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
44802 +extern atomic_unchecked_t fscache_n_acquires_oom;
44803
44804 -extern atomic_t fscache_n_updates;
44805 -extern atomic_t fscache_n_updates_null;
44806 -extern atomic_t fscache_n_updates_run;
44807 +extern atomic_unchecked_t fscache_n_updates;
44808 +extern atomic_unchecked_t fscache_n_updates_null;
44809 +extern atomic_unchecked_t fscache_n_updates_run;
44810
44811 -extern atomic_t fscache_n_relinquishes;
44812 -extern atomic_t fscache_n_relinquishes_null;
44813 -extern atomic_t fscache_n_relinquishes_waitcrt;
44814 -extern atomic_t fscache_n_relinquishes_retire;
44815 +extern atomic_unchecked_t fscache_n_relinquishes;
44816 +extern atomic_unchecked_t fscache_n_relinquishes_null;
44817 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44818 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
44819
44820 -extern atomic_t fscache_n_cookie_index;
44821 -extern atomic_t fscache_n_cookie_data;
44822 -extern atomic_t fscache_n_cookie_special;
44823 +extern atomic_unchecked_t fscache_n_cookie_index;
44824 +extern atomic_unchecked_t fscache_n_cookie_data;
44825 +extern atomic_unchecked_t fscache_n_cookie_special;
44826
44827 -extern atomic_t fscache_n_object_alloc;
44828 -extern atomic_t fscache_n_object_no_alloc;
44829 -extern atomic_t fscache_n_object_lookups;
44830 -extern atomic_t fscache_n_object_lookups_negative;
44831 -extern atomic_t fscache_n_object_lookups_positive;
44832 -extern atomic_t fscache_n_object_lookups_timed_out;
44833 -extern atomic_t fscache_n_object_created;
44834 -extern atomic_t fscache_n_object_avail;
44835 -extern atomic_t fscache_n_object_dead;
44836 +extern atomic_unchecked_t fscache_n_object_alloc;
44837 +extern atomic_unchecked_t fscache_n_object_no_alloc;
44838 +extern atomic_unchecked_t fscache_n_object_lookups;
44839 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
44840 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
44841 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44842 +extern atomic_unchecked_t fscache_n_object_created;
44843 +extern atomic_unchecked_t fscache_n_object_avail;
44844 +extern atomic_unchecked_t fscache_n_object_dead;
44845
44846 -extern atomic_t fscache_n_checkaux_none;
44847 -extern atomic_t fscache_n_checkaux_okay;
44848 -extern atomic_t fscache_n_checkaux_update;
44849 -extern atomic_t fscache_n_checkaux_obsolete;
44850 +extern atomic_unchecked_t fscache_n_checkaux_none;
44851 +extern atomic_unchecked_t fscache_n_checkaux_okay;
44852 +extern atomic_unchecked_t fscache_n_checkaux_update;
44853 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44854
44855 extern atomic_t fscache_n_cop_alloc_object;
44856 extern atomic_t fscache_n_cop_lookup_object;
44857 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44858 atomic_inc(stat);
44859 }
44860
44861 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44862 +{
44863 + atomic_inc_unchecked(stat);
44864 +}
44865 +
44866 static inline void fscache_stat_d(atomic_t *stat)
44867 {
44868 atomic_dec(stat);
44869 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44870
44871 #define __fscache_stat(stat) (NULL)
44872 #define fscache_stat(stat) do {} while (0)
44873 +#define fscache_stat_unchecked(stat) do {} while (0)
44874 #define fscache_stat_d(stat) do {} while (0)
44875 #endif
44876
44877 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44878 index b6b897c..0ffff9c 100644
44879 --- a/fs/fscache/object.c
44880 +++ b/fs/fscache/object.c
44881 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44882 /* update the object metadata on disk */
44883 case FSCACHE_OBJECT_UPDATING:
44884 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44885 - fscache_stat(&fscache_n_updates_run);
44886 + fscache_stat_unchecked(&fscache_n_updates_run);
44887 fscache_stat(&fscache_n_cop_update_object);
44888 object->cache->ops->update_object(object);
44889 fscache_stat_d(&fscache_n_cop_update_object);
44890 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44891 spin_lock(&object->lock);
44892 object->state = FSCACHE_OBJECT_DEAD;
44893 spin_unlock(&object->lock);
44894 - fscache_stat(&fscache_n_object_dead);
44895 + fscache_stat_unchecked(&fscache_n_object_dead);
44896 goto terminal_transit;
44897
44898 /* handle the parent cache of this object being withdrawn from
44899 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44900 spin_lock(&object->lock);
44901 object->state = FSCACHE_OBJECT_DEAD;
44902 spin_unlock(&object->lock);
44903 - fscache_stat(&fscache_n_object_dead);
44904 + fscache_stat_unchecked(&fscache_n_object_dead);
44905 goto terminal_transit;
44906
44907 /* complain about the object being woken up once it is
44908 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44909 parent->cookie->def->name, cookie->def->name,
44910 object->cache->tag->name);
44911
44912 - fscache_stat(&fscache_n_object_lookups);
44913 + fscache_stat_unchecked(&fscache_n_object_lookups);
44914 fscache_stat(&fscache_n_cop_lookup_object);
44915 ret = object->cache->ops->lookup_object(object);
44916 fscache_stat_d(&fscache_n_cop_lookup_object);
44917 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44918 if (ret == -ETIMEDOUT) {
44919 /* probably stuck behind another object, so move this one to
44920 * the back of the queue */
44921 - fscache_stat(&fscache_n_object_lookups_timed_out);
44922 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44923 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44924 }
44925
44926 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44927
44928 spin_lock(&object->lock);
44929 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44930 - fscache_stat(&fscache_n_object_lookups_negative);
44931 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44932
44933 /* transit here to allow write requests to begin stacking up
44934 * and read requests to begin returning ENODATA */
44935 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
44936 * result, in which case there may be data available */
44937 spin_lock(&object->lock);
44938 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44939 - fscache_stat(&fscache_n_object_lookups_positive);
44940 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
44941
44942 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
44943
44944 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
44945 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44946 } else {
44947 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
44948 - fscache_stat(&fscache_n_object_created);
44949 + fscache_stat_unchecked(&fscache_n_object_created);
44950
44951 object->state = FSCACHE_OBJECT_AVAILABLE;
44952 spin_unlock(&object->lock);
44953 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
44954 fscache_enqueue_dependents(object);
44955
44956 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
44957 - fscache_stat(&fscache_n_object_avail);
44958 + fscache_stat_unchecked(&fscache_n_object_avail);
44959
44960 _leave("");
44961 }
44962 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44963 enum fscache_checkaux result;
44964
44965 if (!object->cookie->def->check_aux) {
44966 - fscache_stat(&fscache_n_checkaux_none);
44967 + fscache_stat_unchecked(&fscache_n_checkaux_none);
44968 return FSCACHE_CHECKAUX_OKAY;
44969 }
44970
44971 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44972 switch (result) {
44973 /* entry okay as is */
44974 case FSCACHE_CHECKAUX_OKAY:
44975 - fscache_stat(&fscache_n_checkaux_okay);
44976 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
44977 break;
44978
44979 /* entry requires update */
44980 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
44981 - fscache_stat(&fscache_n_checkaux_update);
44982 + fscache_stat_unchecked(&fscache_n_checkaux_update);
44983 break;
44984
44985 /* entry requires deletion */
44986 case FSCACHE_CHECKAUX_OBSOLETE:
44987 - fscache_stat(&fscache_n_checkaux_obsolete);
44988 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
44989 break;
44990
44991 default:
44992 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
44993 index 30afdfa..2256596 100644
44994 --- a/fs/fscache/operation.c
44995 +++ b/fs/fscache/operation.c
44996 @@ -17,7 +17,7 @@
44997 #include <linux/slab.h>
44998 #include "internal.h"
44999
45000 -atomic_t fscache_op_debug_id;
45001 +atomic_unchecked_t fscache_op_debug_id;
45002 EXPORT_SYMBOL(fscache_op_debug_id);
45003
45004 /**
45005 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45006 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45007 ASSERTCMP(atomic_read(&op->usage), >, 0);
45008
45009 - fscache_stat(&fscache_n_op_enqueue);
45010 + fscache_stat_unchecked(&fscache_n_op_enqueue);
45011 switch (op->flags & FSCACHE_OP_TYPE) {
45012 case FSCACHE_OP_ASYNC:
45013 _debug("queue async");
45014 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45015 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45016 if (op->processor)
45017 fscache_enqueue_operation(op);
45018 - fscache_stat(&fscache_n_op_run);
45019 + fscache_stat_unchecked(&fscache_n_op_run);
45020 }
45021
45022 /*
45023 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45024 if (object->n_ops > 1) {
45025 atomic_inc(&op->usage);
45026 list_add_tail(&op->pend_link, &object->pending_ops);
45027 - fscache_stat(&fscache_n_op_pend);
45028 + fscache_stat_unchecked(&fscache_n_op_pend);
45029 } else if (!list_empty(&object->pending_ops)) {
45030 atomic_inc(&op->usage);
45031 list_add_tail(&op->pend_link, &object->pending_ops);
45032 - fscache_stat(&fscache_n_op_pend);
45033 + fscache_stat_unchecked(&fscache_n_op_pend);
45034 fscache_start_operations(object);
45035 } else {
45036 ASSERTCMP(object->n_in_progress, ==, 0);
45037 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45038 object->n_exclusive++; /* reads and writes must wait */
45039 atomic_inc(&op->usage);
45040 list_add_tail(&op->pend_link, &object->pending_ops);
45041 - fscache_stat(&fscache_n_op_pend);
45042 + fscache_stat_unchecked(&fscache_n_op_pend);
45043 ret = 0;
45044 } else {
45045 /* not allowed to submit ops in any other state */
45046 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45047 if (object->n_exclusive > 0) {
45048 atomic_inc(&op->usage);
45049 list_add_tail(&op->pend_link, &object->pending_ops);
45050 - fscache_stat(&fscache_n_op_pend);
45051 + fscache_stat_unchecked(&fscache_n_op_pend);
45052 } else if (!list_empty(&object->pending_ops)) {
45053 atomic_inc(&op->usage);
45054 list_add_tail(&op->pend_link, &object->pending_ops);
45055 - fscache_stat(&fscache_n_op_pend);
45056 + fscache_stat_unchecked(&fscache_n_op_pend);
45057 fscache_start_operations(object);
45058 } else {
45059 ASSERTCMP(object->n_exclusive, ==, 0);
45060 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45061 object->n_ops++;
45062 atomic_inc(&op->usage);
45063 list_add_tail(&op->pend_link, &object->pending_ops);
45064 - fscache_stat(&fscache_n_op_pend);
45065 + fscache_stat_unchecked(&fscache_n_op_pend);
45066 ret = 0;
45067 } else if (object->state == FSCACHE_OBJECT_DYING ||
45068 object->state == FSCACHE_OBJECT_LC_DYING ||
45069 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45070 - fscache_stat(&fscache_n_op_rejected);
45071 + fscache_stat_unchecked(&fscache_n_op_rejected);
45072 ret = -ENOBUFS;
45073 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45074 fscache_report_unexpected_submission(object, op, ostate);
45075 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45076
45077 ret = -EBUSY;
45078 if (!list_empty(&op->pend_link)) {
45079 - fscache_stat(&fscache_n_op_cancelled);
45080 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45081 list_del_init(&op->pend_link);
45082 object->n_ops--;
45083 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45084 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45085 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45086 BUG();
45087
45088 - fscache_stat(&fscache_n_op_release);
45089 + fscache_stat_unchecked(&fscache_n_op_release);
45090
45091 if (op->release) {
45092 op->release(op);
45093 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45094 * lock, and defer it otherwise */
45095 if (!spin_trylock(&object->lock)) {
45096 _debug("defer put");
45097 - fscache_stat(&fscache_n_op_deferred_release);
45098 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45099
45100 cache = object->cache;
45101 spin_lock(&cache->op_gc_list_lock);
45102 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45103
45104 _debug("GC DEFERRED REL OBJ%x OP%x",
45105 object->debug_id, op->debug_id);
45106 - fscache_stat(&fscache_n_op_gc);
45107 + fscache_stat_unchecked(&fscache_n_op_gc);
45108
45109 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45110
45111 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45112 index 3f7a59b..cf196cc 100644
45113 --- a/fs/fscache/page.c
45114 +++ b/fs/fscache/page.c
45115 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45116 val = radix_tree_lookup(&cookie->stores, page->index);
45117 if (!val) {
45118 rcu_read_unlock();
45119 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45120 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45121 __fscache_uncache_page(cookie, page);
45122 return true;
45123 }
45124 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45125 spin_unlock(&cookie->stores_lock);
45126
45127 if (xpage) {
45128 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45129 - fscache_stat(&fscache_n_store_radix_deletes);
45130 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45131 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45132 ASSERTCMP(xpage, ==, page);
45133 } else {
45134 - fscache_stat(&fscache_n_store_vmscan_gone);
45135 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45136 }
45137
45138 wake_up_bit(&cookie->flags, 0);
45139 @@ -107,7 +107,7 @@ page_busy:
45140 /* we might want to wait here, but that could deadlock the allocator as
45141 * the work threads writing to the cache may all end up sleeping
45142 * on memory allocation */
45143 - fscache_stat(&fscache_n_store_vmscan_busy);
45144 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45145 return false;
45146 }
45147 EXPORT_SYMBOL(__fscache_maybe_release_page);
45148 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45149 FSCACHE_COOKIE_STORING_TAG);
45150 if (!radix_tree_tag_get(&cookie->stores, page->index,
45151 FSCACHE_COOKIE_PENDING_TAG)) {
45152 - fscache_stat(&fscache_n_store_radix_deletes);
45153 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45154 xpage = radix_tree_delete(&cookie->stores, page->index);
45155 }
45156 spin_unlock(&cookie->stores_lock);
45157 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45158
45159 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45160
45161 - fscache_stat(&fscache_n_attr_changed_calls);
45162 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45163
45164 if (fscache_object_is_active(object)) {
45165 fscache_stat(&fscache_n_cop_attr_changed);
45166 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45167
45168 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45169
45170 - fscache_stat(&fscache_n_attr_changed);
45171 + fscache_stat_unchecked(&fscache_n_attr_changed);
45172
45173 op = kzalloc(sizeof(*op), GFP_KERNEL);
45174 if (!op) {
45175 - fscache_stat(&fscache_n_attr_changed_nomem);
45176 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45177 _leave(" = -ENOMEM");
45178 return -ENOMEM;
45179 }
45180 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45181 if (fscache_submit_exclusive_op(object, op) < 0)
45182 goto nobufs;
45183 spin_unlock(&cookie->lock);
45184 - fscache_stat(&fscache_n_attr_changed_ok);
45185 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45186 fscache_put_operation(op);
45187 _leave(" = 0");
45188 return 0;
45189 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45190 nobufs:
45191 spin_unlock(&cookie->lock);
45192 kfree(op);
45193 - fscache_stat(&fscache_n_attr_changed_nobufs);
45194 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45195 _leave(" = %d", -ENOBUFS);
45196 return -ENOBUFS;
45197 }
45198 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45199 /* allocate a retrieval operation and attempt to submit it */
45200 op = kzalloc(sizeof(*op), GFP_NOIO);
45201 if (!op) {
45202 - fscache_stat(&fscache_n_retrievals_nomem);
45203 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45204 return NULL;
45205 }
45206
45207 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45208 return 0;
45209 }
45210
45211 - fscache_stat(&fscache_n_retrievals_wait);
45212 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45213
45214 jif = jiffies;
45215 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45216 fscache_wait_bit_interruptible,
45217 TASK_INTERRUPTIBLE) != 0) {
45218 - fscache_stat(&fscache_n_retrievals_intr);
45219 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45220 _leave(" = -ERESTARTSYS");
45221 return -ERESTARTSYS;
45222 }
45223 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45224 */
45225 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45226 struct fscache_retrieval *op,
45227 - atomic_t *stat_op_waits,
45228 - atomic_t *stat_object_dead)
45229 + atomic_unchecked_t *stat_op_waits,
45230 + atomic_unchecked_t *stat_object_dead)
45231 {
45232 int ret;
45233
45234 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45235 goto check_if_dead;
45236
45237 _debug(">>> WT");
45238 - fscache_stat(stat_op_waits);
45239 + fscache_stat_unchecked(stat_op_waits);
45240 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45241 fscache_wait_bit_interruptible,
45242 TASK_INTERRUPTIBLE) < 0) {
45243 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45244
45245 check_if_dead:
45246 if (unlikely(fscache_object_is_dead(object))) {
45247 - fscache_stat(stat_object_dead);
45248 + fscache_stat_unchecked(stat_object_dead);
45249 return -ENOBUFS;
45250 }
45251 return 0;
45252 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45253
45254 _enter("%p,%p,,,", cookie, page);
45255
45256 - fscache_stat(&fscache_n_retrievals);
45257 + fscache_stat_unchecked(&fscache_n_retrievals);
45258
45259 if (hlist_empty(&cookie->backing_objects))
45260 goto nobufs;
45261 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45262 goto nobufs_unlock;
45263 spin_unlock(&cookie->lock);
45264
45265 - fscache_stat(&fscache_n_retrieval_ops);
45266 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45267
45268 /* pin the netfs read context in case we need to do the actual netfs
45269 * read because we've encountered a cache read failure */
45270 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45271
45272 error:
45273 if (ret == -ENOMEM)
45274 - fscache_stat(&fscache_n_retrievals_nomem);
45275 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45276 else if (ret == -ERESTARTSYS)
45277 - fscache_stat(&fscache_n_retrievals_intr);
45278 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45279 else if (ret == -ENODATA)
45280 - fscache_stat(&fscache_n_retrievals_nodata);
45281 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45282 else if (ret < 0)
45283 - fscache_stat(&fscache_n_retrievals_nobufs);
45284 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45285 else
45286 - fscache_stat(&fscache_n_retrievals_ok);
45287 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45288
45289 fscache_put_retrieval(op);
45290 _leave(" = %d", ret);
45291 @@ -429,7 +429,7 @@ nobufs_unlock:
45292 spin_unlock(&cookie->lock);
45293 kfree(op);
45294 nobufs:
45295 - fscache_stat(&fscache_n_retrievals_nobufs);
45296 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45297 _leave(" = -ENOBUFS");
45298 return -ENOBUFS;
45299 }
45300 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45301
45302 _enter("%p,,%d,,,", cookie, *nr_pages);
45303
45304 - fscache_stat(&fscache_n_retrievals);
45305 + fscache_stat_unchecked(&fscache_n_retrievals);
45306
45307 if (hlist_empty(&cookie->backing_objects))
45308 goto nobufs;
45309 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45310 goto nobufs_unlock;
45311 spin_unlock(&cookie->lock);
45312
45313 - fscache_stat(&fscache_n_retrieval_ops);
45314 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45315
45316 /* pin the netfs read context in case we need to do the actual netfs
45317 * read because we've encountered a cache read failure */
45318 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45319
45320 error:
45321 if (ret == -ENOMEM)
45322 - fscache_stat(&fscache_n_retrievals_nomem);
45323 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45324 else if (ret == -ERESTARTSYS)
45325 - fscache_stat(&fscache_n_retrievals_intr);
45326 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45327 else if (ret == -ENODATA)
45328 - fscache_stat(&fscache_n_retrievals_nodata);
45329 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45330 else if (ret < 0)
45331 - fscache_stat(&fscache_n_retrievals_nobufs);
45332 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45333 else
45334 - fscache_stat(&fscache_n_retrievals_ok);
45335 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45336
45337 fscache_put_retrieval(op);
45338 _leave(" = %d", ret);
45339 @@ -545,7 +545,7 @@ nobufs_unlock:
45340 spin_unlock(&cookie->lock);
45341 kfree(op);
45342 nobufs:
45343 - fscache_stat(&fscache_n_retrievals_nobufs);
45344 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45345 _leave(" = -ENOBUFS");
45346 return -ENOBUFS;
45347 }
45348 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45349
45350 _enter("%p,%p,,,", cookie, page);
45351
45352 - fscache_stat(&fscache_n_allocs);
45353 + fscache_stat_unchecked(&fscache_n_allocs);
45354
45355 if (hlist_empty(&cookie->backing_objects))
45356 goto nobufs;
45357 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45358 goto nobufs_unlock;
45359 spin_unlock(&cookie->lock);
45360
45361 - fscache_stat(&fscache_n_alloc_ops);
45362 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45363
45364 ret = fscache_wait_for_retrieval_activation(
45365 object, op,
45366 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45367
45368 error:
45369 if (ret == -ERESTARTSYS)
45370 - fscache_stat(&fscache_n_allocs_intr);
45371 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45372 else if (ret < 0)
45373 - fscache_stat(&fscache_n_allocs_nobufs);
45374 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45375 else
45376 - fscache_stat(&fscache_n_allocs_ok);
45377 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45378
45379 fscache_put_retrieval(op);
45380 _leave(" = %d", ret);
45381 @@ -625,7 +625,7 @@ nobufs_unlock:
45382 spin_unlock(&cookie->lock);
45383 kfree(op);
45384 nobufs:
45385 - fscache_stat(&fscache_n_allocs_nobufs);
45386 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45387 _leave(" = -ENOBUFS");
45388 return -ENOBUFS;
45389 }
45390 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45391
45392 spin_lock(&cookie->stores_lock);
45393
45394 - fscache_stat(&fscache_n_store_calls);
45395 + fscache_stat_unchecked(&fscache_n_store_calls);
45396
45397 /* find a page to store */
45398 page = NULL;
45399 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45400 page = results[0];
45401 _debug("gang %d [%lx]", n, page->index);
45402 if (page->index > op->store_limit) {
45403 - fscache_stat(&fscache_n_store_pages_over_limit);
45404 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45405 goto superseded;
45406 }
45407
45408 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45409 spin_unlock(&cookie->stores_lock);
45410 spin_unlock(&object->lock);
45411
45412 - fscache_stat(&fscache_n_store_pages);
45413 + fscache_stat_unchecked(&fscache_n_store_pages);
45414 fscache_stat(&fscache_n_cop_write_page);
45415 ret = object->cache->ops->write_page(op, page);
45416 fscache_stat_d(&fscache_n_cop_write_page);
45417 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45418 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45419 ASSERT(PageFsCache(page));
45420
45421 - fscache_stat(&fscache_n_stores);
45422 + fscache_stat_unchecked(&fscache_n_stores);
45423
45424 op = kzalloc(sizeof(*op), GFP_NOIO);
45425 if (!op)
45426 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45427 spin_unlock(&cookie->stores_lock);
45428 spin_unlock(&object->lock);
45429
45430 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45431 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45432 op->store_limit = object->store_limit;
45433
45434 if (fscache_submit_op(object, &op->op) < 0)
45435 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45436
45437 spin_unlock(&cookie->lock);
45438 radix_tree_preload_end();
45439 - fscache_stat(&fscache_n_store_ops);
45440 - fscache_stat(&fscache_n_stores_ok);
45441 + fscache_stat_unchecked(&fscache_n_store_ops);
45442 + fscache_stat_unchecked(&fscache_n_stores_ok);
45443
45444 /* the work queue now carries its own ref on the object */
45445 fscache_put_operation(&op->op);
45446 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45447 return 0;
45448
45449 already_queued:
45450 - fscache_stat(&fscache_n_stores_again);
45451 + fscache_stat_unchecked(&fscache_n_stores_again);
45452 already_pending:
45453 spin_unlock(&cookie->stores_lock);
45454 spin_unlock(&object->lock);
45455 spin_unlock(&cookie->lock);
45456 radix_tree_preload_end();
45457 kfree(op);
45458 - fscache_stat(&fscache_n_stores_ok);
45459 + fscache_stat_unchecked(&fscache_n_stores_ok);
45460 _leave(" = 0");
45461 return 0;
45462
45463 @@ -851,14 +851,14 @@ nobufs:
45464 spin_unlock(&cookie->lock);
45465 radix_tree_preload_end();
45466 kfree(op);
45467 - fscache_stat(&fscache_n_stores_nobufs);
45468 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45469 _leave(" = -ENOBUFS");
45470 return -ENOBUFS;
45471
45472 nomem_free:
45473 kfree(op);
45474 nomem:
45475 - fscache_stat(&fscache_n_stores_oom);
45476 + fscache_stat_unchecked(&fscache_n_stores_oom);
45477 _leave(" = -ENOMEM");
45478 return -ENOMEM;
45479 }
45480 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45481 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45482 ASSERTCMP(page, !=, NULL);
45483
45484 - fscache_stat(&fscache_n_uncaches);
45485 + fscache_stat_unchecked(&fscache_n_uncaches);
45486
45487 /* cache withdrawal may beat us to it */
45488 if (!PageFsCache(page))
45489 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45490 unsigned long loop;
45491
45492 #ifdef CONFIG_FSCACHE_STATS
45493 - atomic_add(pagevec->nr, &fscache_n_marks);
45494 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45495 #endif
45496
45497 for (loop = 0; loop < pagevec->nr; loop++) {
45498 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45499 index 4765190..2a067f2 100644
45500 --- a/fs/fscache/stats.c
45501 +++ b/fs/fscache/stats.c
45502 @@ -18,95 +18,95 @@
45503 /*
45504 * operation counters
45505 */
45506 -atomic_t fscache_n_op_pend;
45507 -atomic_t fscache_n_op_run;
45508 -atomic_t fscache_n_op_enqueue;
45509 -atomic_t fscache_n_op_requeue;
45510 -atomic_t fscache_n_op_deferred_release;
45511 -atomic_t fscache_n_op_release;
45512 -atomic_t fscache_n_op_gc;
45513 -atomic_t fscache_n_op_cancelled;
45514 -atomic_t fscache_n_op_rejected;
45515 +atomic_unchecked_t fscache_n_op_pend;
45516 +atomic_unchecked_t fscache_n_op_run;
45517 +atomic_unchecked_t fscache_n_op_enqueue;
45518 +atomic_unchecked_t fscache_n_op_requeue;
45519 +atomic_unchecked_t fscache_n_op_deferred_release;
45520 +atomic_unchecked_t fscache_n_op_release;
45521 +atomic_unchecked_t fscache_n_op_gc;
45522 +atomic_unchecked_t fscache_n_op_cancelled;
45523 +atomic_unchecked_t fscache_n_op_rejected;
45524
45525 -atomic_t fscache_n_attr_changed;
45526 -atomic_t fscache_n_attr_changed_ok;
45527 -atomic_t fscache_n_attr_changed_nobufs;
45528 -atomic_t fscache_n_attr_changed_nomem;
45529 -atomic_t fscache_n_attr_changed_calls;
45530 +atomic_unchecked_t fscache_n_attr_changed;
45531 +atomic_unchecked_t fscache_n_attr_changed_ok;
45532 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45533 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45534 +atomic_unchecked_t fscache_n_attr_changed_calls;
45535
45536 -atomic_t fscache_n_allocs;
45537 -atomic_t fscache_n_allocs_ok;
45538 -atomic_t fscache_n_allocs_wait;
45539 -atomic_t fscache_n_allocs_nobufs;
45540 -atomic_t fscache_n_allocs_intr;
45541 -atomic_t fscache_n_allocs_object_dead;
45542 -atomic_t fscache_n_alloc_ops;
45543 -atomic_t fscache_n_alloc_op_waits;
45544 +atomic_unchecked_t fscache_n_allocs;
45545 +atomic_unchecked_t fscache_n_allocs_ok;
45546 +atomic_unchecked_t fscache_n_allocs_wait;
45547 +atomic_unchecked_t fscache_n_allocs_nobufs;
45548 +atomic_unchecked_t fscache_n_allocs_intr;
45549 +atomic_unchecked_t fscache_n_allocs_object_dead;
45550 +atomic_unchecked_t fscache_n_alloc_ops;
45551 +atomic_unchecked_t fscache_n_alloc_op_waits;
45552
45553 -atomic_t fscache_n_retrievals;
45554 -atomic_t fscache_n_retrievals_ok;
45555 -atomic_t fscache_n_retrievals_wait;
45556 -atomic_t fscache_n_retrievals_nodata;
45557 -atomic_t fscache_n_retrievals_nobufs;
45558 -atomic_t fscache_n_retrievals_intr;
45559 -atomic_t fscache_n_retrievals_nomem;
45560 -atomic_t fscache_n_retrievals_object_dead;
45561 -atomic_t fscache_n_retrieval_ops;
45562 -atomic_t fscache_n_retrieval_op_waits;
45563 +atomic_unchecked_t fscache_n_retrievals;
45564 +atomic_unchecked_t fscache_n_retrievals_ok;
45565 +atomic_unchecked_t fscache_n_retrievals_wait;
45566 +atomic_unchecked_t fscache_n_retrievals_nodata;
45567 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45568 +atomic_unchecked_t fscache_n_retrievals_intr;
45569 +atomic_unchecked_t fscache_n_retrievals_nomem;
45570 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45571 +atomic_unchecked_t fscache_n_retrieval_ops;
45572 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45573
45574 -atomic_t fscache_n_stores;
45575 -atomic_t fscache_n_stores_ok;
45576 -atomic_t fscache_n_stores_again;
45577 -atomic_t fscache_n_stores_nobufs;
45578 -atomic_t fscache_n_stores_oom;
45579 -atomic_t fscache_n_store_ops;
45580 -atomic_t fscache_n_store_calls;
45581 -atomic_t fscache_n_store_pages;
45582 -atomic_t fscache_n_store_radix_deletes;
45583 -atomic_t fscache_n_store_pages_over_limit;
45584 +atomic_unchecked_t fscache_n_stores;
45585 +atomic_unchecked_t fscache_n_stores_ok;
45586 +atomic_unchecked_t fscache_n_stores_again;
45587 +atomic_unchecked_t fscache_n_stores_nobufs;
45588 +atomic_unchecked_t fscache_n_stores_oom;
45589 +atomic_unchecked_t fscache_n_store_ops;
45590 +atomic_unchecked_t fscache_n_store_calls;
45591 +atomic_unchecked_t fscache_n_store_pages;
45592 +atomic_unchecked_t fscache_n_store_radix_deletes;
45593 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45594
45595 -atomic_t fscache_n_store_vmscan_not_storing;
45596 -atomic_t fscache_n_store_vmscan_gone;
45597 -atomic_t fscache_n_store_vmscan_busy;
45598 -atomic_t fscache_n_store_vmscan_cancelled;
45599 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45600 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45601 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45602 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45603
45604 -atomic_t fscache_n_marks;
45605 -atomic_t fscache_n_uncaches;
45606 +atomic_unchecked_t fscache_n_marks;
45607 +atomic_unchecked_t fscache_n_uncaches;
45608
45609 -atomic_t fscache_n_acquires;
45610 -atomic_t fscache_n_acquires_null;
45611 -atomic_t fscache_n_acquires_no_cache;
45612 -atomic_t fscache_n_acquires_ok;
45613 -atomic_t fscache_n_acquires_nobufs;
45614 -atomic_t fscache_n_acquires_oom;
45615 +atomic_unchecked_t fscache_n_acquires;
45616 +atomic_unchecked_t fscache_n_acquires_null;
45617 +atomic_unchecked_t fscache_n_acquires_no_cache;
45618 +atomic_unchecked_t fscache_n_acquires_ok;
45619 +atomic_unchecked_t fscache_n_acquires_nobufs;
45620 +atomic_unchecked_t fscache_n_acquires_oom;
45621
45622 -atomic_t fscache_n_updates;
45623 -atomic_t fscache_n_updates_null;
45624 -atomic_t fscache_n_updates_run;
45625 +atomic_unchecked_t fscache_n_updates;
45626 +atomic_unchecked_t fscache_n_updates_null;
45627 +atomic_unchecked_t fscache_n_updates_run;
45628
45629 -atomic_t fscache_n_relinquishes;
45630 -atomic_t fscache_n_relinquishes_null;
45631 -atomic_t fscache_n_relinquishes_waitcrt;
45632 -atomic_t fscache_n_relinquishes_retire;
45633 +atomic_unchecked_t fscache_n_relinquishes;
45634 +atomic_unchecked_t fscache_n_relinquishes_null;
45635 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45636 +atomic_unchecked_t fscache_n_relinquishes_retire;
45637
45638 -atomic_t fscache_n_cookie_index;
45639 -atomic_t fscache_n_cookie_data;
45640 -atomic_t fscache_n_cookie_special;
45641 +atomic_unchecked_t fscache_n_cookie_index;
45642 +atomic_unchecked_t fscache_n_cookie_data;
45643 +atomic_unchecked_t fscache_n_cookie_special;
45644
45645 -atomic_t fscache_n_object_alloc;
45646 -atomic_t fscache_n_object_no_alloc;
45647 -atomic_t fscache_n_object_lookups;
45648 -atomic_t fscache_n_object_lookups_negative;
45649 -atomic_t fscache_n_object_lookups_positive;
45650 -atomic_t fscache_n_object_lookups_timed_out;
45651 -atomic_t fscache_n_object_created;
45652 -atomic_t fscache_n_object_avail;
45653 -atomic_t fscache_n_object_dead;
45654 +atomic_unchecked_t fscache_n_object_alloc;
45655 +atomic_unchecked_t fscache_n_object_no_alloc;
45656 +atomic_unchecked_t fscache_n_object_lookups;
45657 +atomic_unchecked_t fscache_n_object_lookups_negative;
45658 +atomic_unchecked_t fscache_n_object_lookups_positive;
45659 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45660 +atomic_unchecked_t fscache_n_object_created;
45661 +atomic_unchecked_t fscache_n_object_avail;
45662 +atomic_unchecked_t fscache_n_object_dead;
45663
45664 -atomic_t fscache_n_checkaux_none;
45665 -atomic_t fscache_n_checkaux_okay;
45666 -atomic_t fscache_n_checkaux_update;
45667 -atomic_t fscache_n_checkaux_obsolete;
45668 +atomic_unchecked_t fscache_n_checkaux_none;
45669 +atomic_unchecked_t fscache_n_checkaux_okay;
45670 +atomic_unchecked_t fscache_n_checkaux_update;
45671 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45672
45673 atomic_t fscache_n_cop_alloc_object;
45674 atomic_t fscache_n_cop_lookup_object;
45675 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45676 seq_puts(m, "FS-Cache statistics\n");
45677
45678 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45679 - atomic_read(&fscache_n_cookie_index),
45680 - atomic_read(&fscache_n_cookie_data),
45681 - atomic_read(&fscache_n_cookie_special));
45682 + atomic_read_unchecked(&fscache_n_cookie_index),
45683 + atomic_read_unchecked(&fscache_n_cookie_data),
45684 + atomic_read_unchecked(&fscache_n_cookie_special));
45685
45686 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45687 - atomic_read(&fscache_n_object_alloc),
45688 - atomic_read(&fscache_n_object_no_alloc),
45689 - atomic_read(&fscache_n_object_avail),
45690 - atomic_read(&fscache_n_object_dead));
45691 + atomic_read_unchecked(&fscache_n_object_alloc),
45692 + atomic_read_unchecked(&fscache_n_object_no_alloc),
45693 + atomic_read_unchecked(&fscache_n_object_avail),
45694 + atomic_read_unchecked(&fscache_n_object_dead));
45695 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45696 - atomic_read(&fscache_n_checkaux_none),
45697 - atomic_read(&fscache_n_checkaux_okay),
45698 - atomic_read(&fscache_n_checkaux_update),
45699 - atomic_read(&fscache_n_checkaux_obsolete));
45700 + atomic_read_unchecked(&fscache_n_checkaux_none),
45701 + atomic_read_unchecked(&fscache_n_checkaux_okay),
45702 + atomic_read_unchecked(&fscache_n_checkaux_update),
45703 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45704
45705 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45706 - atomic_read(&fscache_n_marks),
45707 - atomic_read(&fscache_n_uncaches));
45708 + atomic_read_unchecked(&fscache_n_marks),
45709 + atomic_read_unchecked(&fscache_n_uncaches));
45710
45711 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45712 " oom=%u\n",
45713 - atomic_read(&fscache_n_acquires),
45714 - atomic_read(&fscache_n_acquires_null),
45715 - atomic_read(&fscache_n_acquires_no_cache),
45716 - atomic_read(&fscache_n_acquires_ok),
45717 - atomic_read(&fscache_n_acquires_nobufs),
45718 - atomic_read(&fscache_n_acquires_oom));
45719 + atomic_read_unchecked(&fscache_n_acquires),
45720 + atomic_read_unchecked(&fscache_n_acquires_null),
45721 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
45722 + atomic_read_unchecked(&fscache_n_acquires_ok),
45723 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
45724 + atomic_read_unchecked(&fscache_n_acquires_oom));
45725
45726 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45727 - atomic_read(&fscache_n_object_lookups),
45728 - atomic_read(&fscache_n_object_lookups_negative),
45729 - atomic_read(&fscache_n_object_lookups_positive),
45730 - atomic_read(&fscache_n_object_created),
45731 - atomic_read(&fscache_n_object_lookups_timed_out));
45732 + atomic_read_unchecked(&fscache_n_object_lookups),
45733 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
45734 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
45735 + atomic_read_unchecked(&fscache_n_object_created),
45736 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45737
45738 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45739 - atomic_read(&fscache_n_updates),
45740 - atomic_read(&fscache_n_updates_null),
45741 - atomic_read(&fscache_n_updates_run));
45742 + atomic_read_unchecked(&fscache_n_updates),
45743 + atomic_read_unchecked(&fscache_n_updates_null),
45744 + atomic_read_unchecked(&fscache_n_updates_run));
45745
45746 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45747 - atomic_read(&fscache_n_relinquishes),
45748 - atomic_read(&fscache_n_relinquishes_null),
45749 - atomic_read(&fscache_n_relinquishes_waitcrt),
45750 - atomic_read(&fscache_n_relinquishes_retire));
45751 + atomic_read_unchecked(&fscache_n_relinquishes),
45752 + atomic_read_unchecked(&fscache_n_relinquishes_null),
45753 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45754 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
45755
45756 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45757 - atomic_read(&fscache_n_attr_changed),
45758 - atomic_read(&fscache_n_attr_changed_ok),
45759 - atomic_read(&fscache_n_attr_changed_nobufs),
45760 - atomic_read(&fscache_n_attr_changed_nomem),
45761 - atomic_read(&fscache_n_attr_changed_calls));
45762 + atomic_read_unchecked(&fscache_n_attr_changed),
45763 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
45764 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45765 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45766 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
45767
45768 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45769 - atomic_read(&fscache_n_allocs),
45770 - atomic_read(&fscache_n_allocs_ok),
45771 - atomic_read(&fscache_n_allocs_wait),
45772 - atomic_read(&fscache_n_allocs_nobufs),
45773 - atomic_read(&fscache_n_allocs_intr));
45774 + atomic_read_unchecked(&fscache_n_allocs),
45775 + atomic_read_unchecked(&fscache_n_allocs_ok),
45776 + atomic_read_unchecked(&fscache_n_allocs_wait),
45777 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
45778 + atomic_read_unchecked(&fscache_n_allocs_intr));
45779 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45780 - atomic_read(&fscache_n_alloc_ops),
45781 - atomic_read(&fscache_n_alloc_op_waits),
45782 - atomic_read(&fscache_n_allocs_object_dead));
45783 + atomic_read_unchecked(&fscache_n_alloc_ops),
45784 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
45785 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
45786
45787 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45788 " int=%u oom=%u\n",
45789 - atomic_read(&fscache_n_retrievals),
45790 - atomic_read(&fscache_n_retrievals_ok),
45791 - atomic_read(&fscache_n_retrievals_wait),
45792 - atomic_read(&fscache_n_retrievals_nodata),
45793 - atomic_read(&fscache_n_retrievals_nobufs),
45794 - atomic_read(&fscache_n_retrievals_intr),
45795 - atomic_read(&fscache_n_retrievals_nomem));
45796 + atomic_read_unchecked(&fscache_n_retrievals),
45797 + atomic_read_unchecked(&fscache_n_retrievals_ok),
45798 + atomic_read_unchecked(&fscache_n_retrievals_wait),
45799 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
45800 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45801 + atomic_read_unchecked(&fscache_n_retrievals_intr),
45802 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
45803 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45804 - atomic_read(&fscache_n_retrieval_ops),
45805 - atomic_read(&fscache_n_retrieval_op_waits),
45806 - atomic_read(&fscache_n_retrievals_object_dead));
45807 + atomic_read_unchecked(&fscache_n_retrieval_ops),
45808 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45809 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45810
45811 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45812 - atomic_read(&fscache_n_stores),
45813 - atomic_read(&fscache_n_stores_ok),
45814 - atomic_read(&fscache_n_stores_again),
45815 - atomic_read(&fscache_n_stores_nobufs),
45816 - atomic_read(&fscache_n_stores_oom));
45817 + atomic_read_unchecked(&fscache_n_stores),
45818 + atomic_read_unchecked(&fscache_n_stores_ok),
45819 + atomic_read_unchecked(&fscache_n_stores_again),
45820 + atomic_read_unchecked(&fscache_n_stores_nobufs),
45821 + atomic_read_unchecked(&fscache_n_stores_oom));
45822 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45823 - atomic_read(&fscache_n_store_ops),
45824 - atomic_read(&fscache_n_store_calls),
45825 - atomic_read(&fscache_n_store_pages),
45826 - atomic_read(&fscache_n_store_radix_deletes),
45827 - atomic_read(&fscache_n_store_pages_over_limit));
45828 + atomic_read_unchecked(&fscache_n_store_ops),
45829 + atomic_read_unchecked(&fscache_n_store_calls),
45830 + atomic_read_unchecked(&fscache_n_store_pages),
45831 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
45832 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45833
45834 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45835 - atomic_read(&fscache_n_store_vmscan_not_storing),
45836 - atomic_read(&fscache_n_store_vmscan_gone),
45837 - atomic_read(&fscache_n_store_vmscan_busy),
45838 - atomic_read(&fscache_n_store_vmscan_cancelled));
45839 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45840 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45841 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45842 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45843
45844 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45845 - atomic_read(&fscache_n_op_pend),
45846 - atomic_read(&fscache_n_op_run),
45847 - atomic_read(&fscache_n_op_enqueue),
45848 - atomic_read(&fscache_n_op_cancelled),
45849 - atomic_read(&fscache_n_op_rejected));
45850 + atomic_read_unchecked(&fscache_n_op_pend),
45851 + atomic_read_unchecked(&fscache_n_op_run),
45852 + atomic_read_unchecked(&fscache_n_op_enqueue),
45853 + atomic_read_unchecked(&fscache_n_op_cancelled),
45854 + atomic_read_unchecked(&fscache_n_op_rejected));
45855 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45856 - atomic_read(&fscache_n_op_deferred_release),
45857 - atomic_read(&fscache_n_op_release),
45858 - atomic_read(&fscache_n_op_gc));
45859 + atomic_read_unchecked(&fscache_n_op_deferred_release),
45860 + atomic_read_unchecked(&fscache_n_op_release),
45861 + atomic_read_unchecked(&fscache_n_op_gc));
45862
45863 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45864 atomic_read(&fscache_n_cop_alloc_object),
45865 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45866 index 3426521..3b75162 100644
45867 --- a/fs/fuse/cuse.c
45868 +++ b/fs/fuse/cuse.c
45869 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
45870 INIT_LIST_HEAD(&cuse_conntbl[i]);
45871
45872 /* inherit and extend fuse_dev_operations */
45873 - cuse_channel_fops = fuse_dev_operations;
45874 - cuse_channel_fops.owner = THIS_MODULE;
45875 - cuse_channel_fops.open = cuse_channel_open;
45876 - cuse_channel_fops.release = cuse_channel_release;
45877 + pax_open_kernel();
45878 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45879 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45880 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
45881 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
45882 + pax_close_kernel();
45883
45884 cuse_class = class_create(THIS_MODULE, "cuse");
45885 if (IS_ERR(cuse_class))
45886 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45887 index 7df2b5e..5804aa7 100644
45888 --- a/fs/fuse/dev.c
45889 +++ b/fs/fuse/dev.c
45890 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45891 ret = 0;
45892 pipe_lock(pipe);
45893
45894 - if (!pipe->readers) {
45895 + if (!atomic_read(&pipe->readers)) {
45896 send_sig(SIGPIPE, current, 0);
45897 if (!ret)
45898 ret = -EPIPE;
45899 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45900 index df5ac04..08cee2a 100644
45901 --- a/fs/fuse/dir.c
45902 +++ b/fs/fuse/dir.c
45903 @@ -1180,7 +1180,7 @@ static char *read_link(struct dentry *dentry)
45904 return link;
45905 }
45906
45907 -static void free_link(char *link)
45908 +static void free_link(const char *link)
45909 {
45910 if (!IS_ERR(link))
45911 free_page((unsigned long) link);
45912 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45913 index a9ba244..d9df391 100644
45914 --- a/fs/gfs2/inode.c
45915 +++ b/fs/gfs2/inode.c
45916 @@ -1496,7 +1496,7 @@ out:
45917
45918 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45919 {
45920 - char *s = nd_get_link(nd);
45921 + const char *s = nd_get_link(nd);
45922 if (!IS_ERR(s))
45923 kfree(s);
45924 }
45925 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
45926 index 001ef01..f7d5f07 100644
45927 --- a/fs/hugetlbfs/inode.c
45928 +++ b/fs/hugetlbfs/inode.c
45929 @@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
45930 .kill_sb = kill_litter_super,
45931 };
45932
45933 -static struct vfsmount *hugetlbfs_vfsmount;
45934 +struct vfsmount *hugetlbfs_vfsmount;
45935
45936 static int can_do_hugetlb_shm(void)
45937 {
45938 diff --git a/fs/inode.c b/fs/inode.c
45939 index 9f4f5fe..6214688 100644
45940 --- a/fs/inode.c
45941 +++ b/fs/inode.c
45942 @@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
45943
45944 #ifdef CONFIG_SMP
45945 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
45946 - static atomic_t shared_last_ino;
45947 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
45948 + static atomic_unchecked_t shared_last_ino;
45949 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
45950
45951 res = next - LAST_INO_BATCH;
45952 }
45953 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
45954 index 4a6cf28..d3a29d3 100644
45955 --- a/fs/jffs2/erase.c
45956 +++ b/fs/jffs2/erase.c
45957 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
45958 struct jffs2_unknown_node marker = {
45959 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
45960 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45961 - .totlen = cpu_to_je32(c->cleanmarker_size)
45962 + .totlen = cpu_to_je32(c->cleanmarker_size),
45963 + .hdr_crc = cpu_to_je32(0)
45964 };
45965
45966 jffs2_prealloc_raw_node_refs(c, jeb, 1);
45967 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
45968 index 74d9be1..d5dd140 100644
45969 --- a/fs/jffs2/wbuf.c
45970 +++ b/fs/jffs2/wbuf.c
45971 @@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
45972 {
45973 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
45974 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45975 - .totlen = constant_cpu_to_je32(8)
45976 + .totlen = constant_cpu_to_je32(8),
45977 + .hdr_crc = constant_cpu_to_je32(0)
45978 };
45979
45980 /*
45981 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
45982 index 4a82950..bcaa0cb 100644
45983 --- a/fs/jfs/super.c
45984 +++ b/fs/jfs/super.c
45985 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
45986
45987 jfs_inode_cachep =
45988 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
45989 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
45990 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
45991 init_once);
45992 if (jfs_inode_cachep == NULL)
45993 return -ENOMEM;
45994 diff --git a/fs/libfs.c b/fs/libfs.c
45995 index 18d08f5..fe3dc64 100644
45996 --- a/fs/libfs.c
45997 +++ b/fs/libfs.c
45998 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45999
46000 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46001 struct dentry *next;
46002 + char d_name[sizeof(next->d_iname)];
46003 + const unsigned char *name;
46004 +
46005 next = list_entry(p, struct dentry, d_u.d_child);
46006 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46007 if (!simple_positive(next)) {
46008 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46009
46010 spin_unlock(&next->d_lock);
46011 spin_unlock(&dentry->d_lock);
46012 - if (filldir(dirent, next->d_name.name,
46013 + name = next->d_name.name;
46014 + if (name == next->d_iname) {
46015 + memcpy(d_name, name, next->d_name.len);
46016 + name = d_name;
46017 + }
46018 + if (filldir(dirent, name,
46019 next->d_name.len, filp->f_pos,
46020 next->d_inode->i_ino,
46021 dt_type(next->d_inode)) < 0)
46022 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46023 index 8392cb8..80d6193 100644
46024 --- a/fs/lockd/clntproc.c
46025 +++ b/fs/lockd/clntproc.c
46026 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46027 /*
46028 * Cookie counter for NLM requests
46029 */
46030 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46031 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46032
46033 void nlmclnt_next_cookie(struct nlm_cookie *c)
46034 {
46035 - u32 cookie = atomic_inc_return(&nlm_cookie);
46036 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46037
46038 memcpy(c->data, &cookie, 4);
46039 c->len=4;
46040 diff --git a/fs/locks.c b/fs/locks.c
46041 index 0d68f1f..f216b79 100644
46042 --- a/fs/locks.c
46043 +++ b/fs/locks.c
46044 @@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46045 return;
46046
46047 if (filp->f_op && filp->f_op->flock) {
46048 - struct file_lock fl = {
46049 + struct file_lock flock = {
46050 .fl_pid = current->tgid,
46051 .fl_file = filp,
46052 .fl_flags = FL_FLOCK,
46053 .fl_type = F_UNLCK,
46054 .fl_end = OFFSET_MAX,
46055 };
46056 - filp->f_op->flock(filp, F_SETLKW, &fl);
46057 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46058 - fl.fl_ops->fl_release_private(&fl);
46059 + filp->f_op->flock(filp, F_SETLKW, &flock);
46060 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46061 + flock.fl_ops->fl_release_private(&flock);
46062 }
46063
46064 lock_flocks();
46065 diff --git a/fs/namei.c b/fs/namei.c
46066 index c427919..e37fd3f 100644
46067 --- a/fs/namei.c
46068 +++ b/fs/namei.c
46069 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46070 if (ret != -EACCES)
46071 return ret;
46072
46073 +#ifdef CONFIG_GRKERNSEC
46074 + /* we'll block if we have to log due to a denied capability use */
46075 + if (mask & MAY_NOT_BLOCK)
46076 + return -ECHILD;
46077 +#endif
46078 +
46079 if (S_ISDIR(inode->i_mode)) {
46080 /* DACs are overridable for directories */
46081 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46082 - return 0;
46083 if (!(mask & MAY_WRITE))
46084 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46085 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46086 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46087 return 0;
46088 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46089 + return 0;
46090 return -EACCES;
46091 }
46092 /*
46093 + * Searching includes executable on directories, else just read.
46094 + */
46095 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46096 + if (mask == MAY_READ)
46097 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46098 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46099 + return 0;
46100 +
46101 + /*
46102 * Read/write DACs are always overridable.
46103 * Executable DACs are overridable when there is
46104 * at least one exec bit set.
46105 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46106 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46107 return 0;
46108
46109 - /*
46110 - * Searching includes executable on directories, else just read.
46111 - */
46112 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46113 - if (mask == MAY_READ)
46114 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46115 - return 0;
46116 -
46117 return -EACCES;
46118 }
46119
46120 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46121 return error;
46122 }
46123
46124 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46125 + dentry->d_inode, dentry, nd->path.mnt)) {
46126 + error = -EACCES;
46127 + *p = ERR_PTR(error); /* no ->put_link(), please */
46128 + path_put(&nd->path);
46129 + return error;
46130 + }
46131 +
46132 nd->last_type = LAST_BIND;
46133 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46134 error = PTR_ERR(*p);
46135 if (!IS_ERR(*p)) {
46136 - char *s = nd_get_link(nd);
46137 + const char *s = nd_get_link(nd);
46138 error = 0;
46139 if (s)
46140 error = __vfs_follow_link(nd, s);
46141 @@ -1753,6 +1769,21 @@ static int path_lookupat(int dfd, const char *name,
46142 if (!err)
46143 err = complete_walk(nd);
46144
46145 + if (!(nd->flags & LOOKUP_PARENT)) {
46146 +#ifdef CONFIG_GRKERNSEC
46147 + if (flags & LOOKUP_RCU) {
46148 + if (!err)
46149 + path_put(&nd->path);
46150 + err = -ECHILD;
46151 + } else
46152 +#endif
46153 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46154 + if (!err)
46155 + path_put(&nd->path);
46156 + err = -ENOENT;
46157 + }
46158 + }
46159 +
46160 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46161 if (!nd->inode->i_op->lookup) {
46162 path_put(&nd->path);
46163 @@ -1780,6 +1811,15 @@ static int do_path_lookup(int dfd, const char *name,
46164 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46165
46166 if (likely(!retval)) {
46167 + if (*name != '/' && nd->path.dentry && nd->inode) {
46168 +#ifdef CONFIG_GRKERNSEC
46169 + if (flags & LOOKUP_RCU)
46170 + return -ECHILD;
46171 +#endif
46172 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46173 + return -ENOENT;
46174 + }
46175 +
46176 if (unlikely(!audit_dummy_context())) {
46177 if (nd->path.dentry && nd->inode)
46178 audit_inode(name, nd->path.dentry);
46179 @@ -2126,6 +2166,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46180 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46181 return -EPERM;
46182
46183 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46184 + return -EPERM;
46185 + if (gr_handle_rawio(inode))
46186 + return -EPERM;
46187 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46188 + return -EACCES;
46189 +
46190 return 0;
46191 }
46192
46193 @@ -2187,6 +2234,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46194 error = complete_walk(nd);
46195 if (error)
46196 return ERR_PTR(error);
46197 +#ifdef CONFIG_GRKERNSEC
46198 + if (nd->flags & LOOKUP_RCU) {
46199 + error = -ECHILD;
46200 + goto exit;
46201 + }
46202 +#endif
46203 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46204 + error = -ENOENT;
46205 + goto exit;
46206 + }
46207 audit_inode(pathname, nd->path.dentry);
46208 if (open_flag & O_CREAT) {
46209 error = -EISDIR;
46210 @@ -2197,6 +2254,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46211 error = complete_walk(nd);
46212 if (error)
46213 return ERR_PTR(error);
46214 +#ifdef CONFIG_GRKERNSEC
46215 + if (nd->flags & LOOKUP_RCU) {
46216 + error = -ECHILD;
46217 + goto exit;
46218 + }
46219 +#endif
46220 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46221 + error = -ENOENT;
46222 + goto exit;
46223 + }
46224 audit_inode(pathname, dir);
46225 goto ok;
46226 }
46227 @@ -2218,6 +2285,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46228 error = complete_walk(nd);
46229 if (error)
46230 return ERR_PTR(error);
46231 +#ifdef CONFIG_GRKERNSEC
46232 + if (nd->flags & LOOKUP_RCU) {
46233 + error = -ECHILD;
46234 + goto exit;
46235 + }
46236 +#endif
46237 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46238 + error = -ENOENT;
46239 + goto exit;
46240 + }
46241
46242 error = -ENOTDIR;
46243 if (nd->flags & LOOKUP_DIRECTORY) {
46244 @@ -2258,6 +2335,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46245 /* Negative dentry, just create the file */
46246 if (!dentry->d_inode) {
46247 umode_t mode = op->mode;
46248 +
46249 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46250 + error = -EACCES;
46251 + goto exit_mutex_unlock;
46252 + }
46253 +
46254 if (!IS_POSIXACL(dir->d_inode))
46255 mode &= ~current_umask();
46256 /*
46257 @@ -2281,6 +2364,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46258 error = vfs_create(dir->d_inode, dentry, mode, nd);
46259 if (error)
46260 goto exit_mutex_unlock;
46261 + else
46262 + gr_handle_create(path->dentry, path->mnt);
46263 mutex_unlock(&dir->d_inode->i_mutex);
46264 dput(nd->path.dentry);
46265 nd->path.dentry = dentry;
46266 @@ -2290,6 +2375,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46267 /*
46268 * It already exists.
46269 */
46270 +
46271 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46272 + error = -ENOENT;
46273 + goto exit_mutex_unlock;
46274 + }
46275 +
46276 + /* only check if O_CREAT is specified, all other checks need to go
46277 + into may_open */
46278 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46279 + error = -EACCES;
46280 + goto exit_mutex_unlock;
46281 + }
46282 +
46283 mutex_unlock(&dir->d_inode->i_mutex);
46284 audit_inode(pathname, path->dentry);
46285
46286 @@ -2502,6 +2600,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46287 *path = nd.path;
46288 return dentry;
46289 eexist:
46290 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46291 + dput(dentry);
46292 + dentry = ERR_PTR(-ENOENT);
46293 + goto fail;
46294 + }
46295 dput(dentry);
46296 dentry = ERR_PTR(-EEXIST);
46297 fail:
46298 @@ -2524,6 +2627,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46299 }
46300 EXPORT_SYMBOL(user_path_create);
46301
46302 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46303 +{
46304 + char *tmp = getname(pathname);
46305 + struct dentry *res;
46306 + if (IS_ERR(tmp))
46307 + return ERR_CAST(tmp);
46308 + res = kern_path_create(dfd, tmp, path, is_dir);
46309 + if (IS_ERR(res))
46310 + putname(tmp);
46311 + else
46312 + *to = tmp;
46313 + return res;
46314 +}
46315 +
46316 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46317 {
46318 int error = may_create(dir, dentry);
46319 @@ -2591,6 +2708,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46320 error = mnt_want_write(path.mnt);
46321 if (error)
46322 goto out_dput;
46323 +
46324 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46325 + error = -EPERM;
46326 + goto out_drop_write;
46327 + }
46328 +
46329 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46330 + error = -EACCES;
46331 + goto out_drop_write;
46332 + }
46333 +
46334 error = security_path_mknod(&path, dentry, mode, dev);
46335 if (error)
46336 goto out_drop_write;
46337 @@ -2608,6 +2736,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46338 }
46339 out_drop_write:
46340 mnt_drop_write(path.mnt);
46341 +
46342 + if (!error)
46343 + gr_handle_create(dentry, path.mnt);
46344 out_dput:
46345 dput(dentry);
46346 mutex_unlock(&path.dentry->d_inode->i_mutex);
46347 @@ -2661,12 +2792,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46348 error = mnt_want_write(path.mnt);
46349 if (error)
46350 goto out_dput;
46351 +
46352 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46353 + error = -EACCES;
46354 + goto out_drop_write;
46355 + }
46356 +
46357 error = security_path_mkdir(&path, dentry, mode);
46358 if (error)
46359 goto out_drop_write;
46360 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46361 out_drop_write:
46362 mnt_drop_write(path.mnt);
46363 +
46364 + if (!error)
46365 + gr_handle_create(dentry, path.mnt);
46366 out_dput:
46367 dput(dentry);
46368 mutex_unlock(&path.dentry->d_inode->i_mutex);
46369 @@ -2746,6 +2886,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46370 char * name;
46371 struct dentry *dentry;
46372 struct nameidata nd;
46373 + ino_t saved_ino = 0;
46374 + dev_t saved_dev = 0;
46375
46376 error = user_path_parent(dfd, pathname, &nd, &name);
46377 if (error)
46378 @@ -2774,6 +2916,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46379 error = -ENOENT;
46380 goto exit3;
46381 }
46382 +
46383 + saved_ino = dentry->d_inode->i_ino;
46384 + saved_dev = gr_get_dev_from_dentry(dentry);
46385 +
46386 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46387 + error = -EACCES;
46388 + goto exit3;
46389 + }
46390 +
46391 error = mnt_want_write(nd.path.mnt);
46392 if (error)
46393 goto exit3;
46394 @@ -2781,6 +2932,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46395 if (error)
46396 goto exit4;
46397 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46398 + if (!error && (saved_dev || saved_ino))
46399 + gr_handle_delete(saved_ino, saved_dev);
46400 exit4:
46401 mnt_drop_write(nd.path.mnt);
46402 exit3:
46403 @@ -2843,6 +2996,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46404 struct dentry *dentry;
46405 struct nameidata nd;
46406 struct inode *inode = NULL;
46407 + ino_t saved_ino = 0;
46408 + dev_t saved_dev = 0;
46409
46410 error = user_path_parent(dfd, pathname, &nd, &name);
46411 if (error)
46412 @@ -2865,6 +3020,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46413 if (!inode)
46414 goto slashes;
46415 ihold(inode);
46416 +
46417 + if (inode->i_nlink <= 1) {
46418 + saved_ino = inode->i_ino;
46419 + saved_dev = gr_get_dev_from_dentry(dentry);
46420 + }
46421 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46422 + error = -EACCES;
46423 + goto exit2;
46424 + }
46425 +
46426 error = mnt_want_write(nd.path.mnt);
46427 if (error)
46428 goto exit2;
46429 @@ -2872,6 +3037,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46430 if (error)
46431 goto exit3;
46432 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46433 + if (!error && (saved_ino || saved_dev))
46434 + gr_handle_delete(saved_ino, saved_dev);
46435 exit3:
46436 mnt_drop_write(nd.path.mnt);
46437 exit2:
46438 @@ -2947,10 +3114,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46439 error = mnt_want_write(path.mnt);
46440 if (error)
46441 goto out_dput;
46442 +
46443 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46444 + error = -EACCES;
46445 + goto out_drop_write;
46446 + }
46447 +
46448 error = security_path_symlink(&path, dentry, from);
46449 if (error)
46450 goto out_drop_write;
46451 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46452 + if (!error)
46453 + gr_handle_create(dentry, path.mnt);
46454 out_drop_write:
46455 mnt_drop_write(path.mnt);
46456 out_dput:
46457 @@ -3025,6 +3200,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46458 {
46459 struct dentry *new_dentry;
46460 struct path old_path, new_path;
46461 + char *to = NULL;
46462 int how = 0;
46463 int error;
46464
46465 @@ -3048,7 +3224,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46466 if (error)
46467 return error;
46468
46469 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46470 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46471 error = PTR_ERR(new_dentry);
46472 if (IS_ERR(new_dentry))
46473 goto out;
46474 @@ -3059,13 +3235,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46475 error = mnt_want_write(new_path.mnt);
46476 if (error)
46477 goto out_dput;
46478 +
46479 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46480 + old_path.dentry->d_inode,
46481 + old_path.dentry->d_inode->i_mode, to)) {
46482 + error = -EACCES;
46483 + goto out_drop_write;
46484 + }
46485 +
46486 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46487 + old_path.dentry, old_path.mnt, to)) {
46488 + error = -EACCES;
46489 + goto out_drop_write;
46490 + }
46491 +
46492 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46493 if (error)
46494 goto out_drop_write;
46495 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46496 + if (!error)
46497 + gr_handle_create(new_dentry, new_path.mnt);
46498 out_drop_write:
46499 mnt_drop_write(new_path.mnt);
46500 out_dput:
46501 + putname(to);
46502 dput(new_dentry);
46503 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46504 path_put(&new_path);
46505 @@ -3299,6 +3492,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46506 if (new_dentry == trap)
46507 goto exit5;
46508
46509 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46510 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46511 + to);
46512 + if (error)
46513 + goto exit5;
46514 +
46515 error = mnt_want_write(oldnd.path.mnt);
46516 if (error)
46517 goto exit5;
46518 @@ -3308,6 +3507,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46519 goto exit6;
46520 error = vfs_rename(old_dir->d_inode, old_dentry,
46521 new_dir->d_inode, new_dentry);
46522 + if (!error)
46523 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46524 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46525 exit6:
46526 mnt_drop_write(oldnd.path.mnt);
46527 exit5:
46528 @@ -3333,6 +3535,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46529
46530 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46531 {
46532 + char tmpbuf[64];
46533 + const char *newlink;
46534 int len;
46535
46536 len = PTR_ERR(link);
46537 @@ -3342,7 +3546,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46538 len = strlen(link);
46539 if (len > (unsigned) buflen)
46540 len = buflen;
46541 - if (copy_to_user(buffer, link, len))
46542 +
46543 + if (len < sizeof(tmpbuf)) {
46544 + memcpy(tmpbuf, link, len);
46545 + newlink = tmpbuf;
46546 + } else
46547 + newlink = link;
46548 +
46549 + if (copy_to_user(buffer, newlink, len))
46550 len = -EFAULT;
46551 out:
46552 return len;
46553 diff --git a/fs/namespace.c b/fs/namespace.c
46554 index e608199..9609cb9 100644
46555 --- a/fs/namespace.c
46556 +++ b/fs/namespace.c
46557 @@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
46558 if (!(sb->s_flags & MS_RDONLY))
46559 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46560 up_write(&sb->s_umount);
46561 +
46562 + gr_log_remount(mnt->mnt_devname, retval);
46563 +
46564 return retval;
46565 }
46566
46567 @@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
46568 br_write_unlock(vfsmount_lock);
46569 up_write(&namespace_sem);
46570 release_mounts(&umount_list);
46571 +
46572 + gr_log_unmount(mnt->mnt_devname, retval);
46573 +
46574 return retval;
46575 }
46576
46577 @@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46578 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46579 MS_STRICTATIME);
46580
46581 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46582 + retval = -EPERM;
46583 + goto dput_out;
46584 + }
46585 +
46586 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46587 + retval = -EPERM;
46588 + goto dput_out;
46589 + }
46590 +
46591 if (flags & MS_REMOUNT)
46592 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46593 data_page);
46594 @@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46595 dev_name, data_page);
46596 dput_out:
46597 path_put(&path);
46598 +
46599 + gr_log_mount(dev_name, dir_name, retval);
46600 +
46601 return retval;
46602 }
46603
46604 @@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46605 if (error)
46606 goto out2;
46607
46608 + if (gr_handle_chroot_pivot()) {
46609 + error = -EPERM;
46610 + goto out2;
46611 + }
46612 +
46613 get_fs_root(current->fs, &root);
46614 error = lock_mount(&old);
46615 if (error)
46616 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46617 index e8bbfa5..864f936 100644
46618 --- a/fs/nfs/inode.c
46619 +++ b/fs/nfs/inode.c
46620 @@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46621 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46622 nfsi->attrtimeo_timestamp = jiffies;
46623
46624 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46625 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46626 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46627 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46628 else
46629 @@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46630 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46631 }
46632
46633 -static atomic_long_t nfs_attr_generation_counter;
46634 +static atomic_long_unchecked_t nfs_attr_generation_counter;
46635
46636 static unsigned long nfs_read_attr_generation_counter(void)
46637 {
46638 - return atomic_long_read(&nfs_attr_generation_counter);
46639 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46640 }
46641
46642 unsigned long nfs_inc_attr_generation_counter(void)
46643 {
46644 - return atomic_long_inc_return(&nfs_attr_generation_counter);
46645 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46646 }
46647
46648 void nfs_fattr_init(struct nfs_fattr *fattr)
46649 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46650 index 5686661..80a9a3a 100644
46651 --- a/fs/nfsd/vfs.c
46652 +++ b/fs/nfsd/vfs.c
46653 @@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46654 } else {
46655 oldfs = get_fs();
46656 set_fs(KERNEL_DS);
46657 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46658 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46659 set_fs(oldfs);
46660 }
46661
46662 @@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46663
46664 /* Write the data. */
46665 oldfs = get_fs(); set_fs(KERNEL_DS);
46666 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46667 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46668 set_fs(oldfs);
46669 if (host_err < 0)
46670 goto out_nfserr;
46671 @@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46672 */
46673
46674 oldfs = get_fs(); set_fs(KERNEL_DS);
46675 - host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
46676 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
46677 set_fs(oldfs);
46678
46679 if (host_err < 0)
46680 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46681 index 3568c8a..e0240d8 100644
46682 --- a/fs/notify/fanotify/fanotify_user.c
46683 +++ b/fs/notify/fanotify/fanotify_user.c
46684 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46685 goto out_close_fd;
46686
46687 ret = -EFAULT;
46688 - if (copy_to_user(buf, &fanotify_event_metadata,
46689 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46690 + copy_to_user(buf, &fanotify_event_metadata,
46691 fanotify_event_metadata.event_len))
46692 goto out_kill_access_response;
46693
46694 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46695 index c887b13..0fdf472 100644
46696 --- a/fs/notify/notification.c
46697 +++ b/fs/notify/notification.c
46698 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46699 * get set to 0 so it will never get 'freed'
46700 */
46701 static struct fsnotify_event *q_overflow_event;
46702 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46703 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46704
46705 /**
46706 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46707 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46708 */
46709 u32 fsnotify_get_cookie(void)
46710 {
46711 - return atomic_inc_return(&fsnotify_sync_cookie);
46712 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46713 }
46714 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46715
46716 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46717 index 99e3610..02c1068 100644
46718 --- a/fs/ntfs/dir.c
46719 +++ b/fs/ntfs/dir.c
46720 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
46721 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46722 ~(s64)(ndir->itype.index.block_size - 1)));
46723 /* Bounds checks. */
46724 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46725 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46726 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46727 "inode 0x%lx or driver bug.", vdir->i_ino);
46728 goto err_out;
46729 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46730 index 8639169..76697aa 100644
46731 --- a/fs/ntfs/file.c
46732 +++ b/fs/ntfs/file.c
46733 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46734 #endif /* NTFS_RW */
46735 };
46736
46737 -const struct file_operations ntfs_empty_file_ops = {};
46738 +const struct file_operations ntfs_empty_file_ops __read_only;
46739
46740 -const struct inode_operations ntfs_empty_inode_ops = {};
46741 +const struct inode_operations ntfs_empty_inode_ops __read_only;
46742 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46743 index 210c352..a174f83 100644
46744 --- a/fs/ocfs2/localalloc.c
46745 +++ b/fs/ocfs2/localalloc.c
46746 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46747 goto bail;
46748 }
46749
46750 - atomic_inc(&osb->alloc_stats.moves);
46751 + atomic_inc_unchecked(&osb->alloc_stats.moves);
46752
46753 bail:
46754 if (handle)
46755 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46756 index d355e6e..578d905 100644
46757 --- a/fs/ocfs2/ocfs2.h
46758 +++ b/fs/ocfs2/ocfs2.h
46759 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
46760
46761 struct ocfs2_alloc_stats
46762 {
46763 - atomic_t moves;
46764 - atomic_t local_data;
46765 - atomic_t bitmap_data;
46766 - atomic_t bg_allocs;
46767 - atomic_t bg_extends;
46768 + atomic_unchecked_t moves;
46769 + atomic_unchecked_t local_data;
46770 + atomic_unchecked_t bitmap_data;
46771 + atomic_unchecked_t bg_allocs;
46772 + atomic_unchecked_t bg_extends;
46773 };
46774
46775 enum ocfs2_local_alloc_state
46776 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46777 index f169da4..9112253 100644
46778 --- a/fs/ocfs2/suballoc.c
46779 +++ b/fs/ocfs2/suballoc.c
46780 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46781 mlog_errno(status);
46782 goto bail;
46783 }
46784 - atomic_inc(&osb->alloc_stats.bg_extends);
46785 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46786
46787 /* You should never ask for this much metadata */
46788 BUG_ON(bits_wanted >
46789 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46790 mlog_errno(status);
46791 goto bail;
46792 }
46793 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46794 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46795
46796 *suballoc_loc = res.sr_bg_blkno;
46797 *suballoc_bit_start = res.sr_bit_offset;
46798 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46799 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46800 res->sr_bits);
46801
46802 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46803 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46804
46805 BUG_ON(res->sr_bits != 1);
46806
46807 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46808 mlog_errno(status);
46809 goto bail;
46810 }
46811 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46812 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46813
46814 BUG_ON(res.sr_bits != 1);
46815
46816 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46817 cluster_start,
46818 num_clusters);
46819 if (!status)
46820 - atomic_inc(&osb->alloc_stats.local_data);
46821 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
46822 } else {
46823 if (min_clusters > (osb->bitmap_cpg - 1)) {
46824 /* The only paths asking for contiguousness
46825 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46826 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46827 res.sr_bg_blkno,
46828 res.sr_bit_offset);
46829 - atomic_inc(&osb->alloc_stats.bitmap_data);
46830 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46831 *num_clusters = res.sr_bits;
46832 }
46833 }
46834 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46835 index 68f4541..89cfe6a 100644
46836 --- a/fs/ocfs2/super.c
46837 +++ b/fs/ocfs2/super.c
46838 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46839 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46840 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46841 "Stats",
46842 - atomic_read(&osb->alloc_stats.bitmap_data),
46843 - atomic_read(&osb->alloc_stats.local_data),
46844 - atomic_read(&osb->alloc_stats.bg_allocs),
46845 - atomic_read(&osb->alloc_stats.moves),
46846 - atomic_read(&osb->alloc_stats.bg_extends));
46847 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46848 + atomic_read_unchecked(&osb->alloc_stats.local_data),
46849 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46850 + atomic_read_unchecked(&osb->alloc_stats.moves),
46851 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46852
46853 out += snprintf(buf + out, len - out,
46854 "%10s => State: %u Descriptor: %llu Size: %u bits "
46855 @@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46856 spin_lock_init(&osb->osb_xattr_lock);
46857 ocfs2_init_steal_slots(osb);
46858
46859 - atomic_set(&osb->alloc_stats.moves, 0);
46860 - atomic_set(&osb->alloc_stats.local_data, 0);
46861 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
46862 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
46863 - atomic_set(&osb->alloc_stats.bg_extends, 0);
46864 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
46865 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
46866 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
46867 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
46868 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
46869
46870 /* Copy the blockcheck stats from the superblock probe */
46871 osb->osb_ecc_stats = *stats;
46872 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
46873 index 5d22872..523db20 100644
46874 --- a/fs/ocfs2/symlink.c
46875 +++ b/fs/ocfs2/symlink.c
46876 @@ -142,7 +142,7 @@ bail:
46877
46878 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46879 {
46880 - char *link = nd_get_link(nd);
46881 + const char *link = nd_get_link(nd);
46882 if (!IS_ERR(link))
46883 kfree(link);
46884 }
46885 diff --git a/fs/open.c b/fs/open.c
46886 index 5720854..2707e82 100644
46887 --- a/fs/open.c
46888 +++ b/fs/open.c
46889 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
46890 error = locks_verify_truncate(inode, NULL, length);
46891 if (!error)
46892 error = security_path_truncate(&path);
46893 +
46894 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46895 + error = -EACCES;
46896 +
46897 if (!error)
46898 error = do_truncate(path.dentry, length, 0, NULL);
46899
46900 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
46901 if (__mnt_is_readonly(path.mnt))
46902 res = -EROFS;
46903
46904 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46905 + res = -EACCES;
46906 +
46907 out_path_release:
46908 path_put(&path);
46909 out:
46910 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
46911 if (error)
46912 goto dput_and_out;
46913
46914 + gr_log_chdir(path.dentry, path.mnt);
46915 +
46916 set_fs_pwd(current->fs, &path);
46917
46918 dput_and_out:
46919 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
46920 goto out_putf;
46921
46922 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46923 +
46924 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
46925 + error = -EPERM;
46926 +
46927 + if (!error)
46928 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
46929 +
46930 if (!error)
46931 set_fs_pwd(current->fs, &file->f_path);
46932 out_putf:
46933 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
46934 if (error)
46935 goto dput_and_out;
46936
46937 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
46938 + goto dput_and_out;
46939 +
46940 set_fs_root(current->fs, &path);
46941 +
46942 + gr_handle_chroot_chdir(&path);
46943 +
46944 error = 0;
46945 dput_and_out:
46946 path_put(&path);
46947 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
46948 if (error)
46949 return error;
46950 mutex_lock(&inode->i_mutex);
46951 +
46952 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
46953 + error = -EACCES;
46954 + goto out_unlock;
46955 + }
46956 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
46957 + error = -EACCES;
46958 + goto out_unlock;
46959 + }
46960 +
46961 error = security_path_chmod(path, mode);
46962 if (error)
46963 goto out_unlock;
46964 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
46965 int error;
46966 struct iattr newattrs;
46967
46968 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
46969 + return -EACCES;
46970 +
46971 newattrs.ia_valid = ATTR_CTIME;
46972 if (user != (uid_t) -1) {
46973 newattrs.ia_valid |= ATTR_UID;
46974 diff --git a/fs/pipe.c b/fs/pipe.c
46975 index fec5e4a..f4210f9 100644
46976 --- a/fs/pipe.c
46977 +++ b/fs/pipe.c
46978 @@ -438,9 +438,9 @@ redo:
46979 }
46980 if (bufs) /* More to do? */
46981 continue;
46982 - if (!pipe->writers)
46983 + if (!atomic_read(&pipe->writers))
46984 break;
46985 - if (!pipe->waiting_writers) {
46986 + if (!atomic_read(&pipe->waiting_writers)) {
46987 /* syscall merging: Usually we must not sleep
46988 * if O_NONBLOCK is set, or if we got some data.
46989 * But if a writer sleeps in kernel space, then
46990 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
46991 mutex_lock(&inode->i_mutex);
46992 pipe = inode->i_pipe;
46993
46994 - if (!pipe->readers) {
46995 + if (!atomic_read(&pipe->readers)) {
46996 send_sig(SIGPIPE, current, 0);
46997 ret = -EPIPE;
46998 goto out;
46999 @@ -553,7 +553,7 @@ redo1:
47000 for (;;) {
47001 int bufs;
47002
47003 - if (!pipe->readers) {
47004 + if (!atomic_read(&pipe->readers)) {
47005 send_sig(SIGPIPE, current, 0);
47006 if (!ret)
47007 ret = -EPIPE;
47008 @@ -644,9 +644,9 @@ redo2:
47009 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47010 do_wakeup = 0;
47011 }
47012 - pipe->waiting_writers++;
47013 + atomic_inc(&pipe->waiting_writers);
47014 pipe_wait(pipe);
47015 - pipe->waiting_writers--;
47016 + atomic_dec(&pipe->waiting_writers);
47017 }
47018 out:
47019 mutex_unlock(&inode->i_mutex);
47020 @@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47021 mask = 0;
47022 if (filp->f_mode & FMODE_READ) {
47023 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47024 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47025 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47026 mask |= POLLHUP;
47027 }
47028
47029 @@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47030 * Most Unices do not set POLLERR for FIFOs but on Linux they
47031 * behave exactly like pipes for poll().
47032 */
47033 - if (!pipe->readers)
47034 + if (!atomic_read(&pipe->readers))
47035 mask |= POLLERR;
47036 }
47037
47038 @@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47039
47040 mutex_lock(&inode->i_mutex);
47041 pipe = inode->i_pipe;
47042 - pipe->readers -= decr;
47043 - pipe->writers -= decw;
47044 + atomic_sub(decr, &pipe->readers);
47045 + atomic_sub(decw, &pipe->writers);
47046
47047 - if (!pipe->readers && !pipe->writers) {
47048 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47049 free_pipe_info(inode);
47050 } else {
47051 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47052 @@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47053
47054 if (inode->i_pipe) {
47055 ret = 0;
47056 - inode->i_pipe->readers++;
47057 + atomic_inc(&inode->i_pipe->readers);
47058 }
47059
47060 mutex_unlock(&inode->i_mutex);
47061 @@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47062
47063 if (inode->i_pipe) {
47064 ret = 0;
47065 - inode->i_pipe->writers++;
47066 + atomic_inc(&inode->i_pipe->writers);
47067 }
47068
47069 mutex_unlock(&inode->i_mutex);
47070 @@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47071 if (inode->i_pipe) {
47072 ret = 0;
47073 if (filp->f_mode & FMODE_READ)
47074 - inode->i_pipe->readers++;
47075 + atomic_inc(&inode->i_pipe->readers);
47076 if (filp->f_mode & FMODE_WRITE)
47077 - inode->i_pipe->writers++;
47078 + atomic_inc(&inode->i_pipe->writers);
47079 }
47080
47081 mutex_unlock(&inode->i_mutex);
47082 @@ -959,7 +959,7 @@ void free_pipe_info(struct inode *inode)
47083 inode->i_pipe = NULL;
47084 }
47085
47086 -static struct vfsmount *pipe_mnt __read_mostly;
47087 +struct vfsmount *pipe_mnt __read_mostly;
47088
47089 /*
47090 * pipefs_dname() is called from d_path().
47091 @@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(void)
47092 goto fail_iput;
47093 inode->i_pipe = pipe;
47094
47095 - pipe->readers = pipe->writers = 1;
47096 + atomic_set(&pipe->readers, 1);
47097 + atomic_set(&pipe->writers, 1);
47098 inode->i_fop = &rdwr_pipefifo_fops;
47099
47100 /*
47101 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47102 index 15af622..0e9f4467 100644
47103 --- a/fs/proc/Kconfig
47104 +++ b/fs/proc/Kconfig
47105 @@ -30,12 +30,12 @@ config PROC_FS
47106
47107 config PROC_KCORE
47108 bool "/proc/kcore support" if !ARM
47109 - depends on PROC_FS && MMU
47110 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47111
47112 config PROC_VMCORE
47113 bool "/proc/vmcore support"
47114 - depends on PROC_FS && CRASH_DUMP
47115 - default y
47116 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47117 + default n
47118 help
47119 Exports the dump image of crashed kernel in ELF format.
47120
47121 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47122 limited in memory.
47123
47124 config PROC_PAGE_MONITOR
47125 - default y
47126 - depends on PROC_FS && MMU
47127 + default n
47128 + depends on PROC_FS && MMU && !GRKERNSEC
47129 bool "Enable /proc page monitoring" if EXPERT
47130 help
47131 Various /proc files exist to monitor process memory utilization:
47132 diff --git a/fs/proc/array.c b/fs/proc/array.c
47133 index f9bd395..acb7847 100644
47134 --- a/fs/proc/array.c
47135 +++ b/fs/proc/array.c
47136 @@ -60,6 +60,7 @@
47137 #include <linux/tty.h>
47138 #include <linux/string.h>
47139 #include <linux/mman.h>
47140 +#include <linux/grsecurity.h>
47141 #include <linux/proc_fs.h>
47142 #include <linux/ioport.h>
47143 #include <linux/uaccess.h>
47144 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47145 seq_putc(m, '\n');
47146 }
47147
47148 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47149 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47150 +{
47151 + if (p->mm)
47152 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47153 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47154 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47155 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47156 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47157 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47158 + else
47159 + seq_printf(m, "PaX:\t-----\n");
47160 +}
47161 +#endif
47162 +
47163 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47164 struct pid *pid, struct task_struct *task)
47165 {
47166 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47167 task_cpus_allowed(m, task);
47168 cpuset_task_status_allowed(m, task);
47169 task_context_switch_counts(m, task);
47170 +
47171 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47172 + task_pax(m, task);
47173 +#endif
47174 +
47175 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47176 + task_grsec_rbac(m, task);
47177 +#endif
47178 +
47179 return 0;
47180 }
47181
47182 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47183 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47184 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47185 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47186 +#endif
47187 +
47188 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47189 struct pid *pid, struct task_struct *task, int whole)
47190 {
47191 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47192 char tcomm[sizeof(task->comm)];
47193 unsigned long flags;
47194
47195 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47196 + if (current->exec_id != m->exec_id) {
47197 + gr_log_badprocpid("stat");
47198 + return 0;
47199 + }
47200 +#endif
47201 +
47202 state = *get_task_state(task);
47203 vsize = eip = esp = 0;
47204 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47205 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47206 gtime = task->gtime;
47207 }
47208
47209 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47210 + if (PAX_RAND_FLAGS(mm)) {
47211 + eip = 0;
47212 + esp = 0;
47213 + wchan = 0;
47214 + }
47215 +#endif
47216 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47217 + wchan = 0;
47218 + eip =0;
47219 + esp =0;
47220 +#endif
47221 +
47222 /* scale priority and nice values from timeslices to -20..20 */
47223 /* to make it look like a "normal" Unix priority/nice value */
47224 priority = task_prio(task);
47225 @@ -485,9 +536,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47226 seq_put_decimal_ull(m, ' ', vsize);
47227 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
47228 seq_put_decimal_ull(m, ' ', rsslim);
47229 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47230 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
47231 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
47232 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
47233 +#else
47234 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
47235 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
47236 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
47237 +#endif
47238 seq_put_decimal_ull(m, ' ', esp);
47239 seq_put_decimal_ull(m, ' ', eip);
47240 /* The signal information here is obsolete.
47241 @@ -508,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47242 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
47243 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
47244 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
47245 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47246 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_data : 0));
47247 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->end_data : 0));
47248 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_brk : 0));
47249 +#else
47250 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
47251 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
47252 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
47253 +#endif
47254 seq_putc(m, '\n');
47255 if (mm)
47256 mmput(mm);
47257 @@ -533,8 +596,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47258 struct pid *pid, struct task_struct *task)
47259 {
47260 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47261 - struct mm_struct *mm = get_task_mm(task);
47262 + struct mm_struct *mm;
47263
47264 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47265 + if (current->exec_id != m->exec_id) {
47266 + gr_log_badprocpid("statm");
47267 + return 0;
47268 + }
47269 +#endif
47270 + mm = get_task_mm(task);
47271 if (mm) {
47272 size = task_statm(mm, &shared, &text, &data, &resident);
47273 mmput(mm);
47274 @@ -556,3 +626,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47275
47276 return 0;
47277 }
47278 +
47279 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47280 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47281 +{
47282 + u32 curr_ip = 0;
47283 + unsigned long flags;
47284 +
47285 + if (lock_task_sighand(task, &flags)) {
47286 + curr_ip = task->signal->curr_ip;
47287 + unlock_task_sighand(task, &flags);
47288 + }
47289 +
47290 + return sprintf(buffer, "%pI4\n", &curr_ip);
47291 +}
47292 +#endif
47293 diff --git a/fs/proc/base.c b/fs/proc/base.c
47294 index 57b8159..7a08ad5 100644
47295 --- a/fs/proc/base.c
47296 +++ b/fs/proc/base.c
47297 @@ -109,6 +109,14 @@ struct pid_entry {
47298 union proc_op op;
47299 };
47300
47301 +struct getdents_callback {
47302 + struct linux_dirent __user * current_dir;
47303 + struct linux_dirent __user * previous;
47304 + struct file * file;
47305 + int count;
47306 + int error;
47307 +};
47308 +
47309 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47310 .name = (NAME), \
47311 .len = sizeof(NAME) - 1, \
47312 @@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47313 if (!mm->arg_end)
47314 goto out_mm; /* Shh! No looking before we're done */
47315
47316 + if (gr_acl_handle_procpidmem(task))
47317 + goto out_mm;
47318 +
47319 len = mm->arg_end - mm->arg_start;
47320
47321 if (len > PAGE_SIZE)
47322 @@ -240,12 +251,28 @@ out:
47323 return res;
47324 }
47325
47326 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47327 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47328 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47329 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47330 +#endif
47331 +
47332 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47333 {
47334 struct mm_struct *mm = mm_for_maps(task);
47335 int res = PTR_ERR(mm);
47336 if (mm && !IS_ERR(mm)) {
47337 unsigned int nwords = 0;
47338 +
47339 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47340 + /* allow if we're currently ptracing this task */
47341 + if (PAX_RAND_FLAGS(mm) &&
47342 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47343 + mmput(mm);
47344 + return 0;
47345 + }
47346 +#endif
47347 +
47348 do {
47349 nwords += 2;
47350 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47351 @@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47352 }
47353
47354
47355 -#ifdef CONFIG_KALLSYMS
47356 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47357 /*
47358 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47359 * Returns the resolved symbol. If that fails, simply return the address.
47360 @@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
47361 mutex_unlock(&task->signal->cred_guard_mutex);
47362 }
47363
47364 -#ifdef CONFIG_STACKTRACE
47365 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47366
47367 #define MAX_STACK_TRACE_DEPTH 64
47368
47369 @@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47370 return count;
47371 }
47372
47373 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47374 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47375 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47376 {
47377 long nr;
47378 @@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47379 /************************************************************************/
47380
47381 /* permission checks */
47382 -static int proc_fd_access_allowed(struct inode *inode)
47383 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47384 {
47385 struct task_struct *task;
47386 int allowed = 0;
47387 @@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47388 */
47389 task = get_proc_task(inode);
47390 if (task) {
47391 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47392 + if (log)
47393 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47394 + else
47395 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47396 put_task_struct(task);
47397 }
47398 return allowed;
47399 @@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47400 struct task_struct *task,
47401 int hide_pid_min)
47402 {
47403 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47404 + return false;
47405 +
47406 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47407 + rcu_read_lock();
47408 + {
47409 + const struct cred *tmpcred = current_cred();
47410 + const struct cred *cred = __task_cred(task);
47411 +
47412 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47413 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47414 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47415 +#endif
47416 + ) {
47417 + rcu_read_unlock();
47418 + return true;
47419 + }
47420 + }
47421 + rcu_read_unlock();
47422 +
47423 + if (!pid->hide_pid)
47424 + return false;
47425 +#endif
47426 +
47427 if (pid->hide_pid < hide_pid_min)
47428 return true;
47429 if (in_group_p(pid->pid_gid))
47430 return true;
47431 +
47432 return ptrace_may_access(task, PTRACE_MODE_READ);
47433 }
47434
47435 @@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47436 put_task_struct(task);
47437
47438 if (!has_perms) {
47439 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47440 + {
47441 +#else
47442 if (pid->hide_pid == 2) {
47443 +#endif
47444 /*
47445 * Let's make getdents(), stat(), and open()
47446 * consistent with each other. If a process
47447 @@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
47448 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47449 file->private_data = mm;
47450
47451 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47452 + file->f_version = current->exec_id;
47453 +#endif
47454 +
47455 return 0;
47456 }
47457
47458 @@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47459 ssize_t copied;
47460 char *page;
47461
47462 +#ifdef CONFIG_GRKERNSEC
47463 + if (write)
47464 + return -EPERM;
47465 +#endif
47466 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47467 + if (file->f_version != current->exec_id) {
47468 + gr_log_badprocpid("mem");
47469 + return 0;
47470 + }
47471 +#endif
47472 +
47473 if (!mm)
47474 return 0;
47475
47476 @@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47477 if (!task)
47478 goto out_no_task;
47479
47480 + if (gr_acl_handle_procpidmem(task))
47481 + goto out;
47482 +
47483 ret = -ENOMEM;
47484 page = (char *)__get_free_page(GFP_TEMPORARY);
47485 if (!page)
47486 @@ -1433,7 +1510,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47487 path_put(&nd->path);
47488
47489 /* Are we allowed to snoop on the tasks file descriptors? */
47490 - if (!proc_fd_access_allowed(inode))
47491 + if (!proc_fd_access_allowed(inode, 0))
47492 goto out;
47493
47494 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
47495 @@ -1472,8 +1549,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47496 struct path path;
47497
47498 /* Are we allowed to snoop on the tasks file descriptors? */
47499 - if (!proc_fd_access_allowed(inode))
47500 - goto out;
47501 + /* logging this is needed for learning on chromium to work properly,
47502 + but we don't want to flood the logs from 'ps' which does a readlink
47503 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47504 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
47505 + */
47506 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47507 + if (!proc_fd_access_allowed(inode,0))
47508 + goto out;
47509 + } else {
47510 + if (!proc_fd_access_allowed(inode,1))
47511 + goto out;
47512 + }
47513
47514 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
47515 if (error)
47516 @@ -1538,7 +1625,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47517 rcu_read_lock();
47518 cred = __task_cred(task);
47519 inode->i_uid = cred->euid;
47520 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47521 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47522 +#else
47523 inode->i_gid = cred->egid;
47524 +#endif
47525 rcu_read_unlock();
47526 }
47527 security_task_to_inode(task, inode);
47528 @@ -1574,10 +1665,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47529 return -ENOENT;
47530 }
47531 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47532 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47533 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47534 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47535 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47536 +#endif
47537 task_dumpable(task)) {
47538 cred = __task_cred(task);
47539 stat->uid = cred->euid;
47540 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47541 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47542 +#else
47543 stat->gid = cred->egid;
47544 +#endif
47545 }
47546 }
47547 rcu_read_unlock();
47548 @@ -1615,11 +1715,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47549
47550 if (task) {
47551 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47552 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47553 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47554 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47555 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47556 +#endif
47557 task_dumpable(task)) {
47558 rcu_read_lock();
47559 cred = __task_cred(task);
47560 inode->i_uid = cred->euid;
47561 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47562 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47563 +#else
47564 inode->i_gid = cred->egid;
47565 +#endif
47566 rcu_read_unlock();
47567 } else {
47568 inode->i_uid = 0;
47569 @@ -1737,7 +1846,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47570 int fd = proc_fd(inode);
47571
47572 if (task) {
47573 - files = get_files_struct(task);
47574 + if (!gr_acl_handle_procpidmem(task))
47575 + files = get_files_struct(task);
47576 put_task_struct(task);
47577 }
47578 if (files) {
47579 @@ -2335,11 +2445,21 @@ static const struct file_operations proc_map_files_operations = {
47580 */
47581 static int proc_fd_permission(struct inode *inode, int mask)
47582 {
47583 + struct task_struct *task;
47584 int rv = generic_permission(inode, mask);
47585 - if (rv == 0)
47586 - return 0;
47587 +
47588 if (task_pid(current) == proc_pid(inode))
47589 rv = 0;
47590 +
47591 + task = get_proc_task(inode);
47592 + if (task == NULL)
47593 + return rv;
47594 +
47595 + if (gr_acl_handle_procpidmem(task))
47596 + rv = -EACCES;
47597 +
47598 + put_task_struct(task);
47599 +
47600 return rv;
47601 }
47602
47603 @@ -2449,6 +2569,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47604 if (!task)
47605 goto out_no_task;
47606
47607 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47608 + goto out;
47609 +
47610 /*
47611 * Yes, it does not scale. And it should not. Don't add
47612 * new entries into /proc/<tgid>/ without very good reasons.
47613 @@ -2493,6 +2616,9 @@ static int proc_pident_readdir(struct file *filp,
47614 if (!task)
47615 goto out_no_task;
47616
47617 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47618 + goto out;
47619 +
47620 ret = 0;
47621 i = filp->f_pos;
47622 switch (i) {
47623 @@ -2763,7 +2889,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47624 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47625 void *cookie)
47626 {
47627 - char *s = nd_get_link(nd);
47628 + const char *s = nd_get_link(nd);
47629 if (!IS_ERR(s))
47630 __putname(s);
47631 }
47632 @@ -2964,7 +3090,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47633 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47634 #endif
47635 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47636 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47637 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47638 INF("syscall", S_IRUGO, proc_pid_syscall),
47639 #endif
47640 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47641 @@ -2989,10 +3115,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47642 #ifdef CONFIG_SECURITY
47643 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47644 #endif
47645 -#ifdef CONFIG_KALLSYMS
47646 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47647 INF("wchan", S_IRUGO, proc_pid_wchan),
47648 #endif
47649 -#ifdef CONFIG_STACKTRACE
47650 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47651 ONE("stack", S_IRUGO, proc_pid_stack),
47652 #endif
47653 #ifdef CONFIG_SCHEDSTATS
47654 @@ -3026,6 +3152,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47655 #ifdef CONFIG_HARDWALL
47656 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47657 #endif
47658 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47659 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47660 +#endif
47661 };
47662
47663 static int proc_tgid_base_readdir(struct file * filp,
47664 @@ -3152,7 +3281,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47665 if (!inode)
47666 goto out;
47667
47668 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47669 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47670 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47671 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47672 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47673 +#else
47674 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47675 +#endif
47676 inode->i_op = &proc_tgid_base_inode_operations;
47677 inode->i_fop = &proc_tgid_base_operations;
47678 inode->i_flags|=S_IMMUTABLE;
47679 @@ -3194,7 +3330,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47680 if (!task)
47681 goto out;
47682
47683 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47684 + goto out_put_task;
47685 +
47686 result = proc_pid_instantiate(dir, dentry, task, NULL);
47687 +out_put_task:
47688 put_task_struct(task);
47689 out:
47690 return result;
47691 @@ -3257,6 +3397,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
47692 static int fake_filldir(void *buf, const char *name, int namelen,
47693 loff_t offset, u64 ino, unsigned d_type)
47694 {
47695 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
47696 + __buf->error = -EINVAL;
47697 return 0;
47698 }
47699
47700 @@ -3323,7 +3465,7 @@ static const struct pid_entry tid_base_stuff[] = {
47701 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47702 #endif
47703 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47704 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47705 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47706 INF("syscall", S_IRUGO, proc_pid_syscall),
47707 #endif
47708 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47709 @@ -3347,10 +3489,10 @@ static const struct pid_entry tid_base_stuff[] = {
47710 #ifdef CONFIG_SECURITY
47711 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47712 #endif
47713 -#ifdef CONFIG_KALLSYMS
47714 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47715 INF("wchan", S_IRUGO, proc_pid_wchan),
47716 #endif
47717 -#ifdef CONFIG_STACKTRACE
47718 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47719 ONE("stack", S_IRUGO, proc_pid_stack),
47720 #endif
47721 #ifdef CONFIG_SCHEDSTATS
47722 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47723 index 82676e3..5f8518a 100644
47724 --- a/fs/proc/cmdline.c
47725 +++ b/fs/proc/cmdline.c
47726 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47727
47728 static int __init proc_cmdline_init(void)
47729 {
47730 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47731 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47732 +#else
47733 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47734 +#endif
47735 return 0;
47736 }
47737 module_init(proc_cmdline_init);
47738 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47739 index b143471..bb105e5 100644
47740 --- a/fs/proc/devices.c
47741 +++ b/fs/proc/devices.c
47742 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47743
47744 static int __init proc_devices_init(void)
47745 {
47746 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47747 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47748 +#else
47749 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47750 +#endif
47751 return 0;
47752 }
47753 module_init(proc_devices_init);
47754 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47755 index 205c922..2ee4c57 100644
47756 --- a/fs/proc/inode.c
47757 +++ b/fs/proc/inode.c
47758 @@ -21,11 +21,17 @@
47759 #include <linux/seq_file.h>
47760 #include <linux/slab.h>
47761 #include <linux/mount.h>
47762 +#include <linux/grsecurity.h>
47763
47764 #include <asm/uaccess.h>
47765
47766 #include "internal.h"
47767
47768 +#ifdef CONFIG_PROC_SYSCTL
47769 +extern const struct inode_operations proc_sys_inode_operations;
47770 +extern const struct inode_operations proc_sys_dir_operations;
47771 +#endif
47772 +
47773 static void proc_evict_inode(struct inode *inode)
47774 {
47775 struct proc_dir_entry *de;
47776 @@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
47777 ns_ops = PROC_I(inode)->ns_ops;
47778 if (ns_ops && ns_ops->put)
47779 ns_ops->put(PROC_I(inode)->ns);
47780 +
47781 +#ifdef CONFIG_PROC_SYSCTL
47782 + if (inode->i_op == &proc_sys_inode_operations ||
47783 + inode->i_op == &proc_sys_dir_operations)
47784 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47785 +#endif
47786 +
47787 }
47788
47789 static struct kmem_cache * proc_inode_cachep;
47790 @@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47791 if (de->mode) {
47792 inode->i_mode = de->mode;
47793 inode->i_uid = de->uid;
47794 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47795 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47796 +#else
47797 inode->i_gid = de->gid;
47798 +#endif
47799 }
47800 if (de->size)
47801 inode->i_size = de->size;
47802 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47803 index 5f79bb8..eeccee4 100644
47804 --- a/fs/proc/internal.h
47805 +++ b/fs/proc/internal.h
47806 @@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47807 struct pid *pid, struct task_struct *task);
47808 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47809 struct pid *pid, struct task_struct *task);
47810 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47811 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47812 +#endif
47813 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47814
47815 extern const struct file_operations proc_pid_maps_operations;
47816 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47817 index 86c67ee..cdca321 100644
47818 --- a/fs/proc/kcore.c
47819 +++ b/fs/proc/kcore.c
47820 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47821 * the addresses in the elf_phdr on our list.
47822 */
47823 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47824 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47825 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47826 + if (tsz > buflen)
47827 tsz = buflen;
47828 -
47829 +
47830 while (buflen) {
47831 struct kcore_list *m;
47832
47833 @@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47834 kfree(elf_buf);
47835 } else {
47836 if (kern_addr_valid(start)) {
47837 - unsigned long n;
47838 + char *elf_buf;
47839 + mm_segment_t oldfs;
47840
47841 - n = copy_to_user(buffer, (char *)start, tsz);
47842 - /*
47843 - * We cannot distinguish between fault on source
47844 - * and fault on destination. When this happens
47845 - * we clear too and hope it will trigger the
47846 - * EFAULT again.
47847 - */
47848 - if (n) {
47849 - if (clear_user(buffer + tsz - n,
47850 - n))
47851 + elf_buf = kmalloc(tsz, GFP_KERNEL);
47852 + if (!elf_buf)
47853 + return -ENOMEM;
47854 + oldfs = get_fs();
47855 + set_fs(KERNEL_DS);
47856 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
47857 + set_fs(oldfs);
47858 + if (copy_to_user(buffer, elf_buf, tsz)) {
47859 + kfree(elf_buf);
47860 return -EFAULT;
47861 + }
47862 }
47863 + set_fs(oldfs);
47864 + kfree(elf_buf);
47865 } else {
47866 if (clear_user(buffer, tsz))
47867 return -EFAULT;
47868 @@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47869
47870 static int open_kcore(struct inode *inode, struct file *filp)
47871 {
47872 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47873 + return -EPERM;
47874 +#endif
47875 if (!capable(CAP_SYS_RAWIO))
47876 return -EPERM;
47877 if (kcore_need_update)
47878 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
47879 index 80e4645..53e5fcf 100644
47880 --- a/fs/proc/meminfo.c
47881 +++ b/fs/proc/meminfo.c
47882 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
47883 vmi.used >> 10,
47884 vmi.largest_chunk >> 10
47885 #ifdef CONFIG_MEMORY_FAILURE
47886 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47887 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47888 #endif
47889 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47890 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47891 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
47892 index b1822dd..df622cb 100644
47893 --- a/fs/proc/nommu.c
47894 +++ b/fs/proc/nommu.c
47895 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
47896 if (len < 1)
47897 len = 1;
47898 seq_printf(m, "%*c", len, ' ');
47899 - seq_path(m, &file->f_path, "");
47900 + seq_path(m, &file->f_path, "\n\\");
47901 }
47902
47903 seq_putc(m, '\n');
47904 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
47905 index 06e1cc1..177cd98 100644
47906 --- a/fs/proc/proc_net.c
47907 +++ b/fs/proc/proc_net.c
47908 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
47909 struct task_struct *task;
47910 struct nsproxy *ns;
47911 struct net *net = NULL;
47912 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47913 + const struct cred *cred = current_cred();
47914 +#endif
47915 +
47916 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47917 + if (cred->fsuid)
47918 + return net;
47919 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47920 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
47921 + return net;
47922 +#endif
47923
47924 rcu_read_lock();
47925 task = pid_task(proc_pid(dir), PIDTYPE_PID);
47926 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
47927 index 21d836f..bebf3ee 100644
47928 --- a/fs/proc/proc_sysctl.c
47929 +++ b/fs/proc/proc_sysctl.c
47930 @@ -12,11 +12,15 @@
47931 #include <linux/module.h>
47932 #include "internal.h"
47933
47934 +extern int gr_handle_chroot_sysctl(const int op);
47935 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
47936 + const int op);
47937 +
47938 static const struct dentry_operations proc_sys_dentry_operations;
47939 static const struct file_operations proc_sys_file_operations;
47940 -static const struct inode_operations proc_sys_inode_operations;
47941 +const struct inode_operations proc_sys_inode_operations;
47942 static const struct file_operations proc_sys_dir_file_operations;
47943 -static const struct inode_operations proc_sys_dir_operations;
47944 +const struct inode_operations proc_sys_dir_operations;
47945
47946 void proc_sys_poll_notify(struct ctl_table_poll *poll)
47947 {
47948 @@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
47949
47950 err = NULL;
47951 d_set_d_op(dentry, &proc_sys_dentry_operations);
47952 +
47953 + gr_handle_proc_create(dentry, inode);
47954 +
47955 d_add(dentry, inode);
47956
47957 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
47958 + err = ERR_PTR(-ENOENT);
47959 +
47960 out:
47961 sysctl_head_finish(head);
47962 return err;
47963 @@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
47964 struct inode *inode = filp->f_path.dentry->d_inode;
47965 struct ctl_table_header *head = grab_header(inode);
47966 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
47967 + int op = write ? MAY_WRITE : MAY_READ;
47968 ssize_t error;
47969 size_t res;
47970
47971 if (IS_ERR(head))
47972 return PTR_ERR(head);
47973
47974 +
47975 /*
47976 * At this point we know that the sysctl was not unregistered
47977 * and won't be until we finish.
47978 */
47979 error = -EPERM;
47980 - if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
47981 + if (sysctl_perm(head->root, table, op))
47982 goto out;
47983
47984 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
47985 @@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
47986 if (!table->proc_handler)
47987 goto out;
47988
47989 +#ifdef CONFIG_GRKERNSEC
47990 + error = -EPERM;
47991 + if (gr_handle_chroot_sysctl(op))
47992 + goto out;
47993 + dget(filp->f_path.dentry);
47994 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
47995 + dput(filp->f_path.dentry);
47996 + goto out;
47997 + }
47998 + dput(filp->f_path.dentry);
47999 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
48000 + goto out;
48001 + if (write && !capable(CAP_SYS_ADMIN))
48002 + goto out;
48003 +#endif
48004 +
48005 /* careful: calling conventions are nasty here */
48006 res = count;
48007 error = table->proc_handler(table, write, buf, &res, ppos);
48008 @@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48009 return -ENOMEM;
48010 } else {
48011 d_set_d_op(child, &proc_sys_dentry_operations);
48012 +
48013 + gr_handle_proc_create(child, inode);
48014 +
48015 d_add(child, inode);
48016 }
48017 } else {
48018 @@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48019 if ((*pos)++ < file->f_pos)
48020 return 0;
48021
48022 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
48023 + return 0;
48024 +
48025 if (unlikely(S_ISLNK(table->mode)))
48026 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
48027 else
48028 @@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48029 if (IS_ERR(head))
48030 return PTR_ERR(head);
48031
48032 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
48033 + return -ENOENT;
48034 +
48035 generic_fillattr(inode, stat);
48036 if (table)
48037 stat->mode = (stat->mode & S_IFMT) | table->mode;
48038 @@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48039 .llseek = generic_file_llseek,
48040 };
48041
48042 -static const struct inode_operations proc_sys_inode_operations = {
48043 +const struct inode_operations proc_sys_inode_operations = {
48044 .permission = proc_sys_permission,
48045 .setattr = proc_sys_setattr,
48046 .getattr = proc_sys_getattr,
48047 };
48048
48049 -static const struct inode_operations proc_sys_dir_operations = {
48050 +const struct inode_operations proc_sys_dir_operations = {
48051 .lookup = proc_sys_lookup,
48052 .permission = proc_sys_permission,
48053 .setattr = proc_sys_setattr,
48054 diff --git a/fs/proc/root.c b/fs/proc/root.c
48055 index eed44bf..abeb499 100644
48056 --- a/fs/proc/root.c
48057 +++ b/fs/proc/root.c
48058 @@ -188,7 +188,15 @@ void __init proc_root_init(void)
48059 #ifdef CONFIG_PROC_DEVICETREE
48060 proc_device_tree_init();
48061 #endif
48062 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48063 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48064 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48065 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48066 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48067 +#endif
48068 +#else
48069 proc_mkdir("bus", NULL);
48070 +#endif
48071 proc_sys_init();
48072 }
48073
48074 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48075 index 1030a71..096c28b 100644
48076 --- a/fs/proc/task_mmu.c
48077 +++ b/fs/proc/task_mmu.c
48078 @@ -11,12 +11,19 @@
48079 #include <linux/rmap.h>
48080 #include <linux/swap.h>
48081 #include <linux/swapops.h>
48082 +#include <linux/grsecurity.h>
48083
48084 #include <asm/elf.h>
48085 #include <asm/uaccess.h>
48086 #include <asm/tlbflush.h>
48087 #include "internal.h"
48088
48089 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48090 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48091 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48092 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48093 +#endif
48094 +
48095 void task_mem(struct seq_file *m, struct mm_struct *mm)
48096 {
48097 unsigned long data, text, lib, swap;
48098 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48099 "VmExe:\t%8lu kB\n"
48100 "VmLib:\t%8lu kB\n"
48101 "VmPTE:\t%8lu kB\n"
48102 - "VmSwap:\t%8lu kB\n",
48103 - hiwater_vm << (PAGE_SHIFT-10),
48104 + "VmSwap:\t%8lu kB\n"
48105 +
48106 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48107 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48108 +#endif
48109 +
48110 + ,hiwater_vm << (PAGE_SHIFT-10),
48111 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48112 mm->locked_vm << (PAGE_SHIFT-10),
48113 mm->pinned_vm << (PAGE_SHIFT-10),
48114 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48115 data << (PAGE_SHIFT-10),
48116 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48117 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48118 - swap << (PAGE_SHIFT-10));
48119 + swap << (PAGE_SHIFT-10)
48120 +
48121 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48122 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48123 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
48124 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
48125 +#else
48126 + , mm->context.user_cs_base
48127 + , mm->context.user_cs_limit
48128 +#endif
48129 +#endif
48130 +
48131 + );
48132 }
48133
48134 unsigned long task_vsize(struct mm_struct *mm)
48135 @@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48136 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48137 }
48138
48139 - /* We don't show the stack guard page in /proc/maps */
48140 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48141 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48142 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48143 +#else
48144 start = vma->vm_start;
48145 - if (stack_guard_page_start(vma, start))
48146 - start += PAGE_SIZE;
48147 end = vma->vm_end;
48148 - if (stack_guard_page_end(vma, end))
48149 - end -= PAGE_SIZE;
48150 +#endif
48151
48152 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48153 start,
48154 @@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48155 flags & VM_WRITE ? 'w' : '-',
48156 flags & VM_EXEC ? 'x' : '-',
48157 flags & VM_MAYSHARE ? 's' : 'p',
48158 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48159 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48160 +#else
48161 pgoff,
48162 +#endif
48163 MAJOR(dev), MINOR(dev), ino, &len);
48164
48165 /*
48166 @@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48167 */
48168 if (file) {
48169 pad_len_spaces(m, len);
48170 - seq_path(m, &file->f_path, "\n");
48171 + seq_path(m, &file->f_path, "\n\\");
48172 goto done;
48173 }
48174
48175 @@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48176 * Thread stack in /proc/PID/task/TID/maps or
48177 * the main process stack.
48178 */
48179 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
48180 - vma->vm_end >= mm->start_stack)) {
48181 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48182 + (vma->vm_start <= mm->start_stack &&
48183 + vma->vm_end >= mm->start_stack)) {
48184 name = "[stack]";
48185 } else {
48186 /* Thread stack in /proc/PID/maps */
48187 @@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
48188 struct proc_maps_private *priv = m->private;
48189 struct task_struct *task = priv->task;
48190
48191 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48192 + if (current->exec_id != m->exec_id) {
48193 + gr_log_badprocpid("maps");
48194 + return 0;
48195 + }
48196 +#endif
48197 +
48198 show_map_vma(m, vma, is_pid);
48199
48200 if (m->count < m->size) /* vma is copied successfully */
48201 @@ -482,12 +518,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48202 .private = &mss,
48203 };
48204
48205 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48206 + if (current->exec_id != m->exec_id) {
48207 + gr_log_badprocpid("smaps");
48208 + return 0;
48209 + }
48210 +#endif
48211 memset(&mss, 0, sizeof mss);
48212 - mss.vma = vma;
48213 - /* mmap_sem is held in m_start */
48214 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48215 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48216 -
48217 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48218 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48219 +#endif
48220 + mss.vma = vma;
48221 + /* mmap_sem is held in m_start */
48222 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48223 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48224 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48225 + }
48226 +#endif
48227 show_map_vma(m, vma, is_pid);
48228
48229 seq_printf(m,
48230 @@ -505,7 +552,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48231 "KernelPageSize: %8lu kB\n"
48232 "MMUPageSize: %8lu kB\n"
48233 "Locked: %8lu kB\n",
48234 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48235 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48236 +#else
48237 (vma->vm_end - vma->vm_start) >> 10,
48238 +#endif
48239 mss.resident >> 10,
48240 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48241 mss.shared_clean >> 10,
48242 @@ -784,7 +835,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
48243
48244 /* find the first VMA at or above 'addr' */
48245 vma = find_vma(walk->mm, addr);
48246 - if (pmd_trans_huge_lock(pmd, vma) == 1) {
48247 + if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
48248 for (; addr != end; addr += PAGE_SIZE) {
48249 unsigned long offset;
48250
48251 @@ -1138,6 +1189,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48252 int n;
48253 char buffer[50];
48254
48255 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48256 + if (current->exec_id != m->exec_id) {
48257 + gr_log_badprocpid("numa_maps");
48258 + return 0;
48259 + }
48260 +#endif
48261 +
48262 if (!mm)
48263 return 0;
48264
48265 @@ -1155,11 +1213,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48266 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48267 mpol_cond_put(pol);
48268
48269 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48270 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48271 +#else
48272 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48273 +#endif
48274
48275 if (file) {
48276 seq_printf(m, " file=");
48277 - seq_path(m, &file->f_path, "\n\t= ");
48278 + seq_path(m, &file->f_path, "\n\t\\= ");
48279 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48280 seq_printf(m, " heap");
48281 } else {
48282 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48283 index 74fe164..899e77b 100644
48284 --- a/fs/proc/task_nommu.c
48285 +++ b/fs/proc/task_nommu.c
48286 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48287 else
48288 bytes += kobjsize(mm);
48289
48290 - if (current->fs && current->fs->users > 1)
48291 + if (current->fs && atomic_read(&current->fs->users) > 1)
48292 sbytes += kobjsize(current->fs);
48293 else
48294 bytes += kobjsize(current->fs);
48295 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
48296
48297 if (file) {
48298 pad_len_spaces(m, len);
48299 - seq_path(m, &file->f_path, "");
48300 + seq_path(m, &file->f_path, "\n\\");
48301 } else if (mm) {
48302 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
48303
48304 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48305 index d67908b..d13f6a6 100644
48306 --- a/fs/quota/netlink.c
48307 +++ b/fs/quota/netlink.c
48308 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48309 void quota_send_warning(short type, unsigned int id, dev_t dev,
48310 const char warntype)
48311 {
48312 - static atomic_t seq;
48313 + static atomic_unchecked_t seq;
48314 struct sk_buff *skb;
48315 void *msg_head;
48316 int ret;
48317 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48318 "VFS: Not enough memory to send quota warning.\n");
48319 return;
48320 }
48321 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48322 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48323 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48324 if (!msg_head) {
48325 printk(KERN_ERR
48326 diff --git a/fs/readdir.c b/fs/readdir.c
48327 index cc0a822..43cb195 100644
48328 --- a/fs/readdir.c
48329 +++ b/fs/readdir.c
48330 @@ -17,6 +17,7 @@
48331 #include <linux/security.h>
48332 #include <linux/syscalls.h>
48333 #include <linux/unistd.h>
48334 +#include <linux/namei.h>
48335
48336 #include <asm/uaccess.h>
48337
48338 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48339
48340 struct readdir_callback {
48341 struct old_linux_dirent __user * dirent;
48342 + struct file * file;
48343 int result;
48344 };
48345
48346 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48347 buf->result = -EOVERFLOW;
48348 return -EOVERFLOW;
48349 }
48350 +
48351 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48352 + return 0;
48353 +
48354 buf->result++;
48355 dirent = buf->dirent;
48356 if (!access_ok(VERIFY_WRITE, dirent,
48357 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48358
48359 buf.result = 0;
48360 buf.dirent = dirent;
48361 + buf.file = file;
48362
48363 error = vfs_readdir(file, fillonedir, &buf);
48364 if (buf.result)
48365 @@ -142,6 +149,7 @@ struct linux_dirent {
48366 struct getdents_callback {
48367 struct linux_dirent __user * current_dir;
48368 struct linux_dirent __user * previous;
48369 + struct file * file;
48370 int count;
48371 int error;
48372 };
48373 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48374 buf->error = -EOVERFLOW;
48375 return -EOVERFLOW;
48376 }
48377 +
48378 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48379 + return 0;
48380 +
48381 dirent = buf->previous;
48382 if (dirent) {
48383 if (__put_user(offset, &dirent->d_off))
48384 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48385 buf.previous = NULL;
48386 buf.count = count;
48387 buf.error = 0;
48388 + buf.file = file;
48389
48390 error = vfs_readdir(file, filldir, &buf);
48391 if (error >= 0)
48392 @@ -229,6 +242,7 @@ out:
48393 struct getdents_callback64 {
48394 struct linux_dirent64 __user * current_dir;
48395 struct linux_dirent64 __user * previous;
48396 + struct file *file;
48397 int count;
48398 int error;
48399 };
48400 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48401 buf->error = -EINVAL; /* only used if we fail.. */
48402 if (reclen > buf->count)
48403 return -EINVAL;
48404 +
48405 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48406 + return 0;
48407 +
48408 dirent = buf->previous;
48409 if (dirent) {
48410 if (__put_user(offset, &dirent->d_off))
48411 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48412
48413 buf.current_dir = dirent;
48414 buf.previous = NULL;
48415 + buf.file = file;
48416 buf.count = count;
48417 buf.error = 0;
48418
48419 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48420 error = buf.error;
48421 lastdirent = buf.previous;
48422 if (lastdirent) {
48423 - typeof(lastdirent->d_off) d_off = file->f_pos;
48424 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48425 if (__put_user(d_off, &lastdirent->d_off))
48426 error = -EFAULT;
48427 else
48428 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48429 index 2b7882b..1c5ef48 100644
48430 --- a/fs/reiserfs/do_balan.c
48431 +++ b/fs/reiserfs/do_balan.c
48432 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48433 return;
48434 }
48435
48436 - atomic_inc(&(fs_generation(tb->tb_sb)));
48437 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48438 do_balance_starts(tb);
48439
48440 /* balance leaf returns 0 except if combining L R and S into
48441 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48442 index 2c1ade6..8c59d8d 100644
48443 --- a/fs/reiserfs/procfs.c
48444 +++ b/fs/reiserfs/procfs.c
48445 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48446 "SMALL_TAILS " : "NO_TAILS ",
48447 replay_only(sb) ? "REPLAY_ONLY " : "",
48448 convert_reiserfs(sb) ? "CONV " : "",
48449 - atomic_read(&r->s_generation_counter),
48450 + atomic_read_unchecked(&r->s_generation_counter),
48451 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48452 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48453 SF(s_good_search_by_key_reada), SF(s_bmaps),
48454 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
48455 index a59d271..e12d1cf 100644
48456 --- a/fs/reiserfs/reiserfs.h
48457 +++ b/fs/reiserfs/reiserfs.h
48458 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
48459 /* Comment? -Hans */
48460 wait_queue_head_t s_wait;
48461 /* To be obsoleted soon by per buffer seals.. -Hans */
48462 - atomic_t s_generation_counter; // increased by one every time the
48463 + atomic_unchecked_t s_generation_counter; // increased by one every time the
48464 // tree gets re-balanced
48465 unsigned long s_properties; /* File system properties. Currently holds
48466 on-disk FS format */
48467 @@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
48468 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
48469
48470 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
48471 -#define get_generation(s) atomic_read (&fs_generation(s))
48472 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
48473 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
48474 #define __fs_changed(gen,s) (gen != get_generation (s))
48475 #define fs_changed(gen,s) \
48476 diff --git a/fs/select.c b/fs/select.c
48477 index 17d33d0..da0bf5c 100644
48478 --- a/fs/select.c
48479 +++ b/fs/select.c
48480 @@ -20,6 +20,7 @@
48481 #include <linux/export.h>
48482 #include <linux/slab.h>
48483 #include <linux/poll.h>
48484 +#include <linux/security.h>
48485 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48486 #include <linux/file.h>
48487 #include <linux/fdtable.h>
48488 @@ -833,6 +834,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48489 struct poll_list *walk = head;
48490 unsigned long todo = nfds;
48491
48492 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48493 if (nfds > rlimit(RLIMIT_NOFILE))
48494 return -EINVAL;
48495
48496 diff --git a/fs/seq_file.c b/fs/seq_file.c
48497 index 0cbd049..cab1127 100644
48498 --- a/fs/seq_file.c
48499 +++ b/fs/seq_file.c
48500 @@ -9,6 +9,7 @@
48501 #include <linux/export.h>
48502 #include <linux/seq_file.h>
48503 #include <linux/slab.h>
48504 +#include <linux/sched.h>
48505
48506 #include <asm/uaccess.h>
48507 #include <asm/page.h>
48508 @@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48509 memset(p, 0, sizeof(*p));
48510 mutex_init(&p->lock);
48511 p->op = op;
48512 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48513 + p->exec_id = current->exec_id;
48514 +#endif
48515
48516 /*
48517 * Wrappers around seq_open(e.g. swaps_open) need to be
48518 @@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
48519 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48520 void *data)
48521 {
48522 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48523 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48524 int res = -ENOMEM;
48525
48526 if (op) {
48527 diff --git a/fs/splice.c b/fs/splice.c
48528 index f847684..156619e 100644
48529 --- a/fs/splice.c
48530 +++ b/fs/splice.c
48531 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48532 pipe_lock(pipe);
48533
48534 for (;;) {
48535 - if (!pipe->readers) {
48536 + if (!atomic_read(&pipe->readers)) {
48537 send_sig(SIGPIPE, current, 0);
48538 if (!ret)
48539 ret = -EPIPE;
48540 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48541 do_wakeup = 0;
48542 }
48543
48544 - pipe->waiting_writers++;
48545 + atomic_inc(&pipe->waiting_writers);
48546 pipe_wait(pipe);
48547 - pipe->waiting_writers--;
48548 + atomic_dec(&pipe->waiting_writers);
48549 }
48550
48551 pipe_unlock(pipe);
48552 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48553 old_fs = get_fs();
48554 set_fs(get_ds());
48555 /* The cast to a user pointer is valid due to the set_fs() */
48556 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48557 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48558 set_fs(old_fs);
48559
48560 return res;
48561 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48562 old_fs = get_fs();
48563 set_fs(get_ds());
48564 /* The cast to a user pointer is valid due to the set_fs() */
48565 - res = vfs_write(file, (const char __user *)buf, count, &pos);
48566 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48567 set_fs(old_fs);
48568
48569 return res;
48570 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48571 goto err;
48572
48573 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48574 - vec[i].iov_base = (void __user *) page_address(page);
48575 + vec[i].iov_base = (void __force_user *) page_address(page);
48576 vec[i].iov_len = this_len;
48577 spd.pages[i] = page;
48578 spd.nr_pages++;
48579 @@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48580 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48581 {
48582 while (!pipe->nrbufs) {
48583 - if (!pipe->writers)
48584 + if (!atomic_read(&pipe->writers))
48585 return 0;
48586
48587 - if (!pipe->waiting_writers && sd->num_spliced)
48588 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48589 return 0;
48590
48591 if (sd->flags & SPLICE_F_NONBLOCK)
48592 @@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48593 * out of the pipe right after the splice_to_pipe(). So set
48594 * PIPE_READERS appropriately.
48595 */
48596 - pipe->readers = 1;
48597 + atomic_set(&pipe->readers, 1);
48598
48599 current->splice_pipe = pipe;
48600 }
48601 @@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48602 ret = -ERESTARTSYS;
48603 break;
48604 }
48605 - if (!pipe->writers)
48606 + if (!atomic_read(&pipe->writers))
48607 break;
48608 - if (!pipe->waiting_writers) {
48609 + if (!atomic_read(&pipe->waiting_writers)) {
48610 if (flags & SPLICE_F_NONBLOCK) {
48611 ret = -EAGAIN;
48612 break;
48613 @@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48614 pipe_lock(pipe);
48615
48616 while (pipe->nrbufs >= pipe->buffers) {
48617 - if (!pipe->readers) {
48618 + if (!atomic_read(&pipe->readers)) {
48619 send_sig(SIGPIPE, current, 0);
48620 ret = -EPIPE;
48621 break;
48622 @@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48623 ret = -ERESTARTSYS;
48624 break;
48625 }
48626 - pipe->waiting_writers++;
48627 + atomic_inc(&pipe->waiting_writers);
48628 pipe_wait(pipe);
48629 - pipe->waiting_writers--;
48630 + atomic_dec(&pipe->waiting_writers);
48631 }
48632
48633 pipe_unlock(pipe);
48634 @@ -1818,14 +1818,14 @@ retry:
48635 pipe_double_lock(ipipe, opipe);
48636
48637 do {
48638 - if (!opipe->readers) {
48639 + if (!atomic_read(&opipe->readers)) {
48640 send_sig(SIGPIPE, current, 0);
48641 if (!ret)
48642 ret = -EPIPE;
48643 break;
48644 }
48645
48646 - if (!ipipe->nrbufs && !ipipe->writers)
48647 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48648 break;
48649
48650 /*
48651 @@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48652 pipe_double_lock(ipipe, opipe);
48653
48654 do {
48655 - if (!opipe->readers) {
48656 + if (!atomic_read(&opipe->readers)) {
48657 send_sig(SIGPIPE, current, 0);
48658 if (!ret)
48659 ret = -EPIPE;
48660 @@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48661 * return EAGAIN if we have the potential of some data in the
48662 * future, otherwise just return 0
48663 */
48664 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48665 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48666 ret = -EAGAIN;
48667
48668 pipe_unlock(ipipe);
48669 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48670 index 35a36d3..23424b2 100644
48671 --- a/fs/sysfs/dir.c
48672 +++ b/fs/sysfs/dir.c
48673 @@ -657,6 +657,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48674 struct sysfs_dirent *sd;
48675 int rc;
48676
48677 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48678 + const char *parent_name = parent_sd->s_name;
48679 +
48680 + mode = S_IFDIR | S_IRWXU;
48681 +
48682 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48683 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48684 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48685 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48686 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48687 +#endif
48688 +
48689 /* allocate */
48690 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48691 if (!sd)
48692 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48693 index 00012e3..8392349 100644
48694 --- a/fs/sysfs/file.c
48695 +++ b/fs/sysfs/file.c
48696 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48697
48698 struct sysfs_open_dirent {
48699 atomic_t refcnt;
48700 - atomic_t event;
48701 + atomic_unchecked_t event;
48702 wait_queue_head_t poll;
48703 struct list_head buffers; /* goes through sysfs_buffer.list */
48704 };
48705 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48706 if (!sysfs_get_active(attr_sd))
48707 return -ENODEV;
48708
48709 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48710 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48711 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48712
48713 sysfs_put_active(attr_sd);
48714 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48715 return -ENOMEM;
48716
48717 atomic_set(&new_od->refcnt, 0);
48718 - atomic_set(&new_od->event, 1);
48719 + atomic_set_unchecked(&new_od->event, 1);
48720 init_waitqueue_head(&new_od->poll);
48721 INIT_LIST_HEAD(&new_od->buffers);
48722 goto retry;
48723 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48724
48725 sysfs_put_active(attr_sd);
48726
48727 - if (buffer->event != atomic_read(&od->event))
48728 + if (buffer->event != atomic_read_unchecked(&od->event))
48729 goto trigger;
48730
48731 return DEFAULT_POLLMASK;
48732 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48733
48734 od = sd->s_attr.open;
48735 if (od) {
48736 - atomic_inc(&od->event);
48737 + atomic_inc_unchecked(&od->event);
48738 wake_up_interruptible(&od->poll);
48739 }
48740
48741 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48742 index a7ac78f..02158e1 100644
48743 --- a/fs/sysfs/symlink.c
48744 +++ b/fs/sysfs/symlink.c
48745 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48746
48747 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48748 {
48749 - char *page = nd_get_link(nd);
48750 + const char *page = nd_get_link(nd);
48751 if (!IS_ERR(page))
48752 free_page((unsigned long)page);
48753 }
48754 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48755 index c175b4d..8f36a16 100644
48756 --- a/fs/udf/misc.c
48757 +++ b/fs/udf/misc.c
48758 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48759
48760 u8 udf_tag_checksum(const struct tag *t)
48761 {
48762 - u8 *data = (u8 *)t;
48763 + const u8 *data = (const u8 *)t;
48764 u8 checksum = 0;
48765 int i;
48766 for (i = 0; i < sizeof(struct tag); ++i)
48767 diff --git a/fs/utimes.c b/fs/utimes.c
48768 index ba653f3..06ea4b1 100644
48769 --- a/fs/utimes.c
48770 +++ b/fs/utimes.c
48771 @@ -1,6 +1,7 @@
48772 #include <linux/compiler.h>
48773 #include <linux/file.h>
48774 #include <linux/fs.h>
48775 +#include <linux/security.h>
48776 #include <linux/linkage.h>
48777 #include <linux/mount.h>
48778 #include <linux/namei.h>
48779 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48780 goto mnt_drop_write_and_out;
48781 }
48782 }
48783 +
48784 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48785 + error = -EACCES;
48786 + goto mnt_drop_write_and_out;
48787 + }
48788 +
48789 mutex_lock(&inode->i_mutex);
48790 error = notify_change(path->dentry, &newattrs);
48791 mutex_unlock(&inode->i_mutex);
48792 diff --git a/fs/xattr.c b/fs/xattr.c
48793 index 3c8c1cc..a83c398 100644
48794 --- a/fs/xattr.c
48795 +++ b/fs/xattr.c
48796 @@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48797 * Extended attribute SET operations
48798 */
48799 static long
48800 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
48801 +setxattr(struct path *path, const char __user *name, const void __user *value,
48802 size_t size, int flags)
48803 {
48804 int error;
48805 @@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48806 }
48807 }
48808
48809 - error = vfs_setxattr(d, kname, kvalue, size, flags);
48810 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48811 + error = -EACCES;
48812 + goto out;
48813 + }
48814 +
48815 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48816 out:
48817 if (vvalue)
48818 vfree(vvalue);
48819 @@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48820 return error;
48821 error = mnt_want_write(path.mnt);
48822 if (!error) {
48823 - error = setxattr(path.dentry, name, value, size, flags);
48824 + error = setxattr(&path, name, value, size, flags);
48825 mnt_drop_write(path.mnt);
48826 }
48827 path_put(&path);
48828 @@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48829 return error;
48830 error = mnt_want_write(path.mnt);
48831 if (!error) {
48832 - error = setxattr(path.dentry, name, value, size, flags);
48833 + error = setxattr(&path, name, value, size, flags);
48834 mnt_drop_write(path.mnt);
48835 }
48836 path_put(&path);
48837 @@ -400,17 +405,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48838 const void __user *,value, size_t, size, int, flags)
48839 {
48840 struct file *f;
48841 - struct dentry *dentry;
48842 int error = -EBADF;
48843
48844 f = fget(fd);
48845 if (!f)
48846 return error;
48847 - dentry = f->f_path.dentry;
48848 - audit_inode(NULL, dentry);
48849 + audit_inode(NULL, f->f_path.dentry);
48850 error = mnt_want_write_file(f);
48851 if (!error) {
48852 - error = setxattr(dentry, name, value, size, flags);
48853 + error = setxattr(&f->f_path, name, value, size, flags);
48854 mnt_drop_write_file(f);
48855 }
48856 fput(f);
48857 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
48858 index 69d06b0..c0996e5 100644
48859 --- a/fs/xattr_acl.c
48860 +++ b/fs/xattr_acl.c
48861 @@ -17,8 +17,8 @@
48862 struct posix_acl *
48863 posix_acl_from_xattr(const void *value, size_t size)
48864 {
48865 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
48866 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
48867 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
48868 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
48869 int count;
48870 struct posix_acl *acl;
48871 struct posix_acl_entry *acl_e;
48872 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
48873 index 85e7e32..5344e52 100644
48874 --- a/fs/xfs/xfs_bmap.c
48875 +++ b/fs/xfs/xfs_bmap.c
48876 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
48877 int nmap,
48878 int ret_nmap);
48879 #else
48880 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48881 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48882 #endif /* DEBUG */
48883
48884 STATIC int
48885 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
48886 index 79d05e8..e3e5861 100644
48887 --- a/fs/xfs/xfs_dir2_sf.c
48888 +++ b/fs/xfs/xfs_dir2_sf.c
48889 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48890 }
48891
48892 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48893 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48894 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48895 + char name[sfep->namelen];
48896 + memcpy(name, sfep->name, sfep->namelen);
48897 + if (filldir(dirent, name, sfep->namelen,
48898 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
48899 + *offset = off & 0x7fffffff;
48900 + return 0;
48901 + }
48902 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48903 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48904 *offset = off & 0x7fffffff;
48905 return 0;
48906 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
48907 index 91f8ff5..0ce68f9 100644
48908 --- a/fs/xfs/xfs_ioctl.c
48909 +++ b/fs/xfs/xfs_ioctl.c
48910 @@ -128,7 +128,7 @@ xfs_find_handle(
48911 }
48912
48913 error = -EFAULT;
48914 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48915 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48916 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48917 goto out_put;
48918
48919 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
48920 index 3011b87..1ab03e9 100644
48921 --- a/fs/xfs/xfs_iops.c
48922 +++ b/fs/xfs/xfs_iops.c
48923 @@ -397,7 +397,7 @@ xfs_vn_put_link(
48924 struct nameidata *nd,
48925 void *p)
48926 {
48927 - char *s = nd_get_link(nd);
48928 + const char *s = nd_get_link(nd);
48929
48930 if (!IS_ERR(s))
48931 kfree(s);
48932 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
48933 new file mode 100644
48934 index 0000000..2645296
48935 --- /dev/null
48936 +++ b/grsecurity/Kconfig
48937 @@ -0,0 +1,1079 @@
48938 +#
48939 +# grecurity configuration
48940 +#
48941 +
48942 +menu "Grsecurity"
48943 +
48944 +config GRKERNSEC
48945 + bool "Grsecurity"
48946 + select CRYPTO
48947 + select CRYPTO_SHA256
48948 + help
48949 + If you say Y here, you will be able to configure many features
48950 + that will enhance the security of your system. It is highly
48951 + recommended that you say Y here and read through the help
48952 + for each option so that you fully understand the features and
48953 + can evaluate their usefulness for your machine.
48954 +
48955 +choice
48956 + prompt "Security Level"
48957 + depends on GRKERNSEC
48958 + default GRKERNSEC_CUSTOM
48959 +
48960 +config GRKERNSEC_LOW
48961 + bool "Low"
48962 + select GRKERNSEC_LINK
48963 + select GRKERNSEC_FIFO
48964 + select GRKERNSEC_RANDNET
48965 + select GRKERNSEC_DMESG
48966 + select GRKERNSEC_CHROOT
48967 + select GRKERNSEC_CHROOT_CHDIR
48968 +
48969 + help
48970 + If you choose this option, several of the grsecurity options will
48971 + be enabled that will give you greater protection against a number
48972 + of attacks, while assuring that none of your software will have any
48973 + conflicts with the additional security measures. If you run a lot
48974 + of unusual software, or you are having problems with the higher
48975 + security levels, you should say Y here. With this option, the
48976 + following features are enabled:
48977 +
48978 + - Linking restrictions
48979 + - FIFO restrictions
48980 + - Restricted dmesg
48981 + - Enforced chdir("/") on chroot
48982 + - Runtime module disabling
48983 +
48984 +config GRKERNSEC_MEDIUM
48985 + bool "Medium"
48986 + select PAX
48987 + select PAX_EI_PAX
48988 + select PAX_PT_PAX_FLAGS
48989 + select PAX_HAVE_ACL_FLAGS
48990 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48991 + select GRKERNSEC_CHROOT
48992 + select GRKERNSEC_CHROOT_SYSCTL
48993 + select GRKERNSEC_LINK
48994 + select GRKERNSEC_FIFO
48995 + select GRKERNSEC_DMESG
48996 + select GRKERNSEC_RANDNET
48997 + select GRKERNSEC_FORKFAIL
48998 + select GRKERNSEC_TIME
48999 + select GRKERNSEC_SIGNAL
49000 + select GRKERNSEC_CHROOT
49001 + select GRKERNSEC_CHROOT_UNIX
49002 + select GRKERNSEC_CHROOT_MOUNT
49003 + select GRKERNSEC_CHROOT_PIVOT
49004 + select GRKERNSEC_CHROOT_DOUBLE
49005 + select GRKERNSEC_CHROOT_CHDIR
49006 + select GRKERNSEC_CHROOT_MKNOD
49007 + select GRKERNSEC_PROC
49008 + select GRKERNSEC_PROC_USERGROUP
49009 + select PAX_RANDUSTACK
49010 + select PAX_ASLR
49011 + select PAX_RANDMMAP
49012 + select PAX_REFCOUNT if (X86 || SPARC64)
49013 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49014 +
49015 + help
49016 + If you say Y here, several features in addition to those included
49017 + in the low additional security level will be enabled. These
49018 + features provide even more security to your system, though in rare
49019 + cases they may be incompatible with very old or poorly written
49020 + software. If you enable this option, make sure that your auth
49021 + service (identd) is running as gid 1001. With this option,
49022 + the following features (in addition to those provided in the
49023 + low additional security level) will be enabled:
49024 +
49025 + - Failed fork logging
49026 + - Time change logging
49027 + - Signal logging
49028 + - Deny mounts in chroot
49029 + - Deny double chrooting
49030 + - Deny sysctl writes in chroot
49031 + - Deny mknod in chroot
49032 + - Deny access to abstract AF_UNIX sockets out of chroot
49033 + - Deny pivot_root in chroot
49034 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49035 + - /proc restrictions with special GID set to 10 (usually wheel)
49036 + - Address Space Layout Randomization (ASLR)
49037 + - Prevent exploitation of most refcount overflows
49038 + - Bounds checking of copying between the kernel and userland
49039 +
49040 +config GRKERNSEC_HIGH
49041 + bool "High"
49042 + select GRKERNSEC_LINK
49043 + select GRKERNSEC_FIFO
49044 + select GRKERNSEC_DMESG
49045 + select GRKERNSEC_FORKFAIL
49046 + select GRKERNSEC_TIME
49047 + select GRKERNSEC_SIGNAL
49048 + select GRKERNSEC_CHROOT
49049 + select GRKERNSEC_CHROOT_SHMAT
49050 + select GRKERNSEC_CHROOT_UNIX
49051 + select GRKERNSEC_CHROOT_MOUNT
49052 + select GRKERNSEC_CHROOT_FCHDIR
49053 + select GRKERNSEC_CHROOT_PIVOT
49054 + select GRKERNSEC_CHROOT_DOUBLE
49055 + select GRKERNSEC_CHROOT_CHDIR
49056 + select GRKERNSEC_CHROOT_MKNOD
49057 + select GRKERNSEC_CHROOT_CAPS
49058 + select GRKERNSEC_CHROOT_SYSCTL
49059 + select GRKERNSEC_CHROOT_FINDTASK
49060 + select GRKERNSEC_SYSFS_RESTRICT
49061 + select GRKERNSEC_PROC
49062 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49063 + select GRKERNSEC_HIDESYM
49064 + select GRKERNSEC_BRUTE
49065 + select GRKERNSEC_PROC_USERGROUP
49066 + select GRKERNSEC_KMEM
49067 + select GRKERNSEC_RESLOG
49068 + select GRKERNSEC_RANDNET
49069 + select GRKERNSEC_PROC_ADD
49070 + select GRKERNSEC_CHROOT_CHMOD
49071 + select GRKERNSEC_CHROOT_NICE
49072 + select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
49073 + select GRKERNSEC_AUDIT_MOUNT
49074 + select GRKERNSEC_MODHARDEN if (MODULES)
49075 + select GRKERNSEC_HARDEN_PTRACE
49076 + select GRKERNSEC_PTRACE_READEXEC
49077 + select GRKERNSEC_VM86 if (X86_32)
49078 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49079 + select PAX
49080 + select PAX_RANDUSTACK
49081 + select PAX_ASLR
49082 + select PAX_RANDMMAP
49083 + select PAX_NOEXEC
49084 + select PAX_MPROTECT
49085 + select PAX_EI_PAX
49086 + select PAX_PT_PAX_FLAGS
49087 + select PAX_HAVE_ACL_FLAGS
49088 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49089 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49090 + select PAX_RANDKSTACK if (X86_TSC && X86)
49091 + select PAX_SEGMEXEC if (X86_32)
49092 + select PAX_PAGEEXEC
49093 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49094 + select PAX_EMUTRAMP if (PARISC)
49095 + select PAX_EMUSIGRT if (PARISC)
49096 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49097 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49098 + select PAX_REFCOUNT if (X86 || SPARC64)
49099 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49100 + help
49101 + If you say Y here, many of the features of grsecurity will be
49102 + enabled, which will protect you against many kinds of attacks
49103 + against your system. The heightened security comes at a cost
49104 + of an increased chance of incompatibilities with rare software
49105 + on your machine. Since this security level enables PaX, you should
49106 + view <http://pax.grsecurity.net> and read about the PaX
49107 + project. While you are there, download chpax and run it on
49108 + binaries that cause problems with PaX. Also remember that
49109 + since the /proc restrictions are enabled, you must run your
49110 + identd as gid 1001. This security level enables the following
49111 + features in addition to those listed in the low and medium
49112 + security levels:
49113 +
49114 + - Additional /proc restrictions
49115 + - Chmod restrictions in chroot
49116 + - No signals, ptrace, or viewing of processes outside of chroot
49117 + - Capability restrictions in chroot
49118 + - Deny fchdir out of chroot
49119 + - Priority restrictions in chroot
49120 + - Segmentation-based implementation of PaX
49121 + - Mprotect restrictions
49122 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49123 + - Kernel stack randomization
49124 + - Mount/unmount/remount logging
49125 + - Kernel symbol hiding
49126 + - Hardening of module auto-loading
49127 + - Ptrace restrictions
49128 + - Restricted vm86 mode
49129 + - Restricted sysfs/debugfs
49130 + - Active kernel exploit response
49131 +
49132 +config GRKERNSEC_CUSTOM
49133 + bool "Custom"
49134 + help
49135 + If you say Y here, you will be able to configure every grsecurity
49136 + option, which allows you to enable many more features that aren't
49137 + covered in the basic security levels. These additional features
49138 + include TPE, socket restrictions, and the sysctl system for
49139 + grsecurity. It is advised that you read through the help for
49140 + each option to determine its usefulness in your situation.
49141 +
49142 +endchoice
49143 +
49144 +menu "Memory Protections"
49145 +depends on GRKERNSEC
49146 +
49147 +config GRKERNSEC_KMEM
49148 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49149 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49150 + help
49151 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49152 + be written to or read from to modify or leak the contents of the running
49153 + kernel. /dev/port will also not be allowed to be opened. If you have module
49154 + support disabled, enabling this will close up four ways that are
49155 + currently used to insert malicious code into the running kernel.
49156 + Even with all these features enabled, we still highly recommend that
49157 + you use the RBAC system, as it is still possible for an attacker to
49158 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49159 + If you are not using XFree86, you may be able to stop this additional
49160 + case by enabling the 'Disable privileged I/O' option. Though nothing
49161 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49162 + but only to video memory, which is the only writing we allow in this
49163 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49164 + not be allowed to mprotect it with PROT_WRITE later.
49165 + It is highly recommended that you say Y here if you meet all the
49166 + conditions above.
49167 +
49168 +config GRKERNSEC_VM86
49169 + bool "Restrict VM86 mode"
49170 + depends on X86_32
49171 +
49172 + help
49173 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49174 + make use of a special execution mode on 32bit x86 processors called
49175 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49176 + video cards and will still work with this option enabled. The purpose
49177 + of the option is to prevent exploitation of emulation errors in
49178 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49179 + Nearly all users should be able to enable this option.
49180 +
49181 +config GRKERNSEC_IO
49182 + bool "Disable privileged I/O"
49183 + depends on X86
49184 + select RTC_CLASS
49185 + select RTC_INTF_DEV
49186 + select RTC_DRV_CMOS
49187 +
49188 + help
49189 + If you say Y here, all ioperm and iopl calls will return an error.
49190 + Ioperm and iopl can be used to modify the running kernel.
49191 + Unfortunately, some programs need this access to operate properly,
49192 + the most notable of which are XFree86 and hwclock. hwclock can be
49193 + remedied by having RTC support in the kernel, so real-time
49194 + clock support is enabled if this option is enabled, to ensure
49195 + that hwclock operates correctly. XFree86 still will not
49196 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49197 + IF YOU USE XFree86. If you use XFree86 and you still want to
49198 + protect your kernel against modification, use the RBAC system.
49199 +
49200 +config GRKERNSEC_PROC_MEMMAP
49201 + bool "Harden ASLR against information leaks and entropy reduction"
49202 + default y if (PAX_NOEXEC || PAX_ASLR)
49203 + depends on PAX_NOEXEC || PAX_ASLR
49204 + help
49205 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49206 + give no information about the addresses of its mappings if
49207 + PaX features that rely on random addresses are enabled on the task.
49208 + In addition to sanitizing this information and disabling other
49209 + dangerous sources of information, this option causes reads of sensitive
49210 + /proc/<pid> entries where the file descriptor was opened in a different
49211 + task than the one performing the read. Such attempts are logged.
49212 + This option also limits argv/env strings for suid/sgid binaries
49213 + to 512KB to prevent a complete exhaustion of the stack entropy provided
49214 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49215 + binaries to prevent alternative mmap layouts from being abused.
49216 +
49217 + If you use PaX it is essential that you say Y here as it closes up
49218 + several holes that make full ASLR useless locally.
49219 +
49220 +config GRKERNSEC_BRUTE
49221 + bool "Deter exploit bruteforcing"
49222 + help
49223 + If you say Y here, attempts to bruteforce exploits against forking
49224 + daemons such as apache or sshd, as well as against suid/sgid binaries
49225 + will be deterred. When a child of a forking daemon is killed by PaX
49226 + or crashes due to an illegal instruction or other suspicious signal,
49227 + the parent process will be delayed 30 seconds upon every subsequent
49228 + fork until the administrator is able to assess the situation and
49229 + restart the daemon.
49230 + In the suid/sgid case, the attempt is logged, the user has all their
49231 + processes terminated, and they are prevented from executing any further
49232 + processes for 15 minutes.
49233 + It is recommended that you also enable signal logging in the auditing
49234 + section so that logs are generated when a process triggers a suspicious
49235 + signal.
49236 + If the sysctl option is enabled, a sysctl option with name
49237 + "deter_bruteforce" is created.
49238 +
49239 +
49240 +config GRKERNSEC_MODHARDEN
49241 + bool "Harden module auto-loading"
49242 + depends on MODULES
49243 + help
49244 + If you say Y here, module auto-loading in response to use of some
49245 + feature implemented by an unloaded module will be restricted to
49246 + root users. Enabling this option helps defend against attacks
49247 + by unprivileged users who abuse the auto-loading behavior to
49248 + cause a vulnerable module to load that is then exploited.
49249 +
49250 + If this option prevents a legitimate use of auto-loading for a
49251 + non-root user, the administrator can execute modprobe manually
49252 + with the exact name of the module mentioned in the alert log.
49253 + Alternatively, the administrator can add the module to the list
49254 + of modules loaded at boot by modifying init scripts.
49255 +
49256 + Modification of init scripts will most likely be needed on
49257 + Ubuntu servers with encrypted home directory support enabled,
49258 + as the first non-root user logging in will cause the ecb(aes),
49259 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49260 +
49261 +config GRKERNSEC_HIDESYM
49262 + bool "Hide kernel symbols"
49263 + help
49264 + If you say Y here, getting information on loaded modules, and
49265 + displaying all kernel symbols through a syscall will be restricted
49266 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49267 + /proc/kallsyms will be restricted to the root user. The RBAC
49268 + system can hide that entry even from root.
49269 +
49270 + This option also prevents leaking of kernel addresses through
49271 + several /proc entries.
49272 +
49273 + Note that this option is only effective provided the following
49274 + conditions are met:
49275 + 1) The kernel using grsecurity is not precompiled by some distribution
49276 + 2) You have also enabled GRKERNSEC_DMESG
49277 + 3) You are using the RBAC system and hiding other files such as your
49278 + kernel image and System.map. Alternatively, enabling this option
49279 + causes the permissions on /boot, /lib/modules, and the kernel
49280 + source directory to change at compile time to prevent
49281 + reading by non-root users.
49282 + If the above conditions are met, this option will aid in providing a
49283 + useful protection against local kernel exploitation of overflows
49284 + and arbitrary read/write vulnerabilities.
49285 +
49286 +config GRKERNSEC_KERN_LOCKOUT
49287 + bool "Active kernel exploit response"
49288 + depends on X86 || ARM || PPC || SPARC
49289 + help
49290 + If you say Y here, when a PaX alert is triggered due to suspicious
49291 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49292 + or an OOPs occurs due to bad memory accesses, instead of just
49293 + terminating the offending process (and potentially allowing
49294 + a subsequent exploit from the same user), we will take one of two
49295 + actions:
49296 + If the user was root, we will panic the system
49297 + If the user was non-root, we will log the attempt, terminate
49298 + all processes owned by the user, then prevent them from creating
49299 + any new processes until the system is restarted
49300 + This deters repeated kernel exploitation/bruteforcing attempts
49301 + and is useful for later forensics.
49302 +
49303 +endmenu
49304 +menu "Role Based Access Control Options"
49305 +depends on GRKERNSEC
49306 +
49307 +config GRKERNSEC_RBAC_DEBUG
49308 + bool
49309 +
49310 +config GRKERNSEC_NO_RBAC
49311 + bool "Disable RBAC system"
49312 + help
49313 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49314 + preventing the RBAC system from being enabled. You should only say Y
49315 + here if you have no intention of using the RBAC system, so as to prevent
49316 + an attacker with root access from misusing the RBAC system to hide files
49317 + and processes when loadable module support and /dev/[k]mem have been
49318 + locked down.
49319 +
49320 +config GRKERNSEC_ACL_HIDEKERN
49321 + bool "Hide kernel processes"
49322 + help
49323 + If you say Y here, all kernel threads will be hidden to all
49324 + processes but those whose subject has the "view hidden processes"
49325 + flag.
49326 +
49327 +config GRKERNSEC_ACL_MAXTRIES
49328 + int "Maximum tries before password lockout"
49329 + default 3
49330 + help
49331 + This option enforces the maximum number of times a user can attempt
49332 + to authorize themselves with the grsecurity RBAC system before being
49333 + denied the ability to attempt authorization again for a specified time.
49334 + The lower the number, the harder it will be to brute-force a password.
49335 +
49336 +config GRKERNSEC_ACL_TIMEOUT
49337 + int "Time to wait after max password tries, in seconds"
49338 + default 30
49339 + help
49340 + This option specifies the time the user must wait after attempting to
49341 + authorize to the RBAC system with the maximum number of invalid
49342 + passwords. The higher the number, the harder it will be to brute-force
49343 + a password.
49344 +
49345 +endmenu
49346 +menu "Filesystem Protections"
49347 +depends on GRKERNSEC
49348 +
49349 +config GRKERNSEC_PROC
49350 + bool "Proc restrictions"
49351 + help
49352 + If you say Y here, the permissions of the /proc filesystem
49353 + will be altered to enhance system security and privacy. You MUST
49354 + choose either a user only restriction or a user and group restriction.
49355 + Depending upon the option you choose, you can either restrict users to
49356 + see only the processes they themselves run, or choose a group that can
49357 + view all processes and files normally restricted to root if you choose
49358 + the "restrict to user only" option. NOTE: If you're running identd or
49359 + ntpd as a non-root user, you will have to run it as the group you
49360 + specify here.
49361 +
49362 +config GRKERNSEC_PROC_USER
49363 + bool "Restrict /proc to user only"
49364 + depends on GRKERNSEC_PROC
49365 + help
49366 + If you say Y here, non-root users will only be able to view their own
49367 + processes, and restricts them from viewing network-related information,
49368 + and viewing kernel symbol and module information.
49369 +
49370 +config GRKERNSEC_PROC_USERGROUP
49371 + bool "Allow special group"
49372 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49373 + help
49374 + If you say Y here, you will be able to select a group that will be
49375 + able to view all processes and network-related information. If you've
49376 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49377 + remain hidden. This option is useful if you want to run identd as
49378 + a non-root user.
49379 +
49380 +config GRKERNSEC_PROC_GID
49381 + int "GID for special group"
49382 + depends on GRKERNSEC_PROC_USERGROUP
49383 + default 1001
49384 +
49385 +config GRKERNSEC_PROC_ADD
49386 + bool "Additional restrictions"
49387 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49388 + help
49389 + If you say Y here, additional restrictions will be placed on
49390 + /proc that keep normal users from viewing device information and
49391 + slabinfo information that could be useful for exploits.
49392 +
49393 +config GRKERNSEC_LINK
49394 + bool "Linking restrictions"
49395 + help
49396 + If you say Y here, /tmp race exploits will be prevented, since users
49397 + will no longer be able to follow symlinks owned by other users in
49398 + world-writable +t directories (e.g. /tmp), unless the owner of the
49399 + symlink is the owner of the directory. users will also not be
49400 + able to hardlink to files they do not own. If the sysctl option is
49401 + enabled, a sysctl option with name "linking_restrictions" is created.
49402 +
49403 +config GRKERNSEC_FIFO
49404 + bool "FIFO restrictions"
49405 + help
49406 + If you say Y here, users will not be able to write to FIFOs they don't
49407 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49408 + the FIFO is the same owner of the directory it's held in. If the sysctl
49409 + option is enabled, a sysctl option with name "fifo_restrictions" is
49410 + created.
49411 +
49412 +config GRKERNSEC_SYSFS_RESTRICT
49413 + bool "Sysfs/debugfs restriction"
49414 + depends on SYSFS
49415 + help
49416 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49417 + any filesystem normally mounted under it (e.g. debugfs) will be
49418 + mostly accessible only by root. These filesystems generally provide access
49419 + to hardware and debug information that isn't appropriate for unprivileged
49420 + users of the system. Sysfs and debugfs have also become a large source
49421 + of new vulnerabilities, ranging from infoleaks to local compromise.
49422 + There has been very little oversight with an eye toward security involved
49423 + in adding new exporters of information to these filesystems, so their
49424 + use is discouraged.
49425 + For reasons of compatibility, a few directories have been whitelisted
49426 + for access by non-root users:
49427 + /sys/fs/selinux
49428 + /sys/fs/fuse
49429 + /sys/devices/system/cpu
49430 +
49431 +config GRKERNSEC_ROFS
49432 + bool "Runtime read-only mount protection"
49433 + help
49434 + If you say Y here, a sysctl option with name "romount_protect" will
49435 + be created. By setting this option to 1 at runtime, filesystems
49436 + will be protected in the following ways:
49437 + * No new writable mounts will be allowed
49438 + * Existing read-only mounts won't be able to be remounted read/write
49439 + * Write operations will be denied on all block devices
49440 + This option acts independently of grsec_lock: once it is set to 1,
49441 + it cannot be turned off. Therefore, please be mindful of the resulting
49442 + behavior if this option is enabled in an init script on a read-only
49443 + filesystem. This feature is mainly intended for secure embedded systems.
49444 +
49445 +config GRKERNSEC_CHROOT
49446 + bool "Chroot jail restrictions"
49447 + help
49448 + If you say Y here, you will be able to choose several options that will
49449 + make breaking out of a chrooted jail much more difficult. If you
49450 + encounter no software incompatibilities with the following options, it
49451 + is recommended that you enable each one.
49452 +
49453 +config GRKERNSEC_CHROOT_MOUNT
49454 + bool "Deny mounts"
49455 + depends on GRKERNSEC_CHROOT
49456 + help
49457 + If you say Y here, processes inside a chroot will not be able to
49458 + mount or remount filesystems. If the sysctl option is enabled, a
49459 + sysctl option with name "chroot_deny_mount" is created.
49460 +
49461 +config GRKERNSEC_CHROOT_DOUBLE
49462 + bool "Deny double-chroots"
49463 + depends on GRKERNSEC_CHROOT
49464 + help
49465 + If you say Y here, processes inside a chroot will not be able to chroot
49466 + again outside the chroot. This is a widely used method of breaking
49467 + out of a chroot jail and should not be allowed. If the sysctl
49468 + option is enabled, a sysctl option with name
49469 + "chroot_deny_chroot" is created.
49470 +
49471 +config GRKERNSEC_CHROOT_PIVOT
49472 + bool "Deny pivot_root in chroot"
49473 + depends on GRKERNSEC_CHROOT
49474 + help
49475 + If you say Y here, processes inside a chroot will not be able to use
49476 + a function called pivot_root() that was introduced in Linux 2.3.41. It
49477 + works similar to chroot in that it changes the root filesystem. This
49478 + function could be misused in a chrooted process to attempt to break out
49479 + of the chroot, and therefore should not be allowed. If the sysctl
49480 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
49481 + created.
49482 +
49483 +config GRKERNSEC_CHROOT_CHDIR
49484 + bool "Enforce chdir(\"/\") on all chroots"
49485 + depends on GRKERNSEC_CHROOT
49486 + help
49487 + If you say Y here, the current working directory of all newly-chrooted
49488 + applications will be set to the the root directory of the chroot.
49489 + The man page on chroot(2) states:
49490 + Note that this call does not change the current working
49491 + directory, so that `.' can be outside the tree rooted at
49492 + `/'. In particular, the super-user can escape from a
49493 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49494 +
49495 + It is recommended that you say Y here, since it's not known to break
49496 + any software. If the sysctl option is enabled, a sysctl option with
49497 + name "chroot_enforce_chdir" is created.
49498 +
49499 +config GRKERNSEC_CHROOT_CHMOD
49500 + bool "Deny (f)chmod +s"
49501 + depends on GRKERNSEC_CHROOT
49502 + help
49503 + If you say Y here, processes inside a chroot will not be able to chmod
49504 + or fchmod files to make them have suid or sgid bits. This protects
49505 + against another published method of breaking a chroot. If the sysctl
49506 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
49507 + created.
49508 +
49509 +config GRKERNSEC_CHROOT_FCHDIR
49510 + bool "Deny fchdir out of chroot"
49511 + depends on GRKERNSEC_CHROOT
49512 + help
49513 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
49514 + to a file descriptor of the chrooting process that points to a directory
49515 + outside the filesystem will be stopped. If the sysctl option
49516 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49517 +
49518 +config GRKERNSEC_CHROOT_MKNOD
49519 + bool "Deny mknod"
49520 + depends on GRKERNSEC_CHROOT
49521 + help
49522 + If you say Y here, processes inside a chroot will not be allowed to
49523 + mknod. The problem with using mknod inside a chroot is that it
49524 + would allow an attacker to create a device entry that is the same
49525 + as one on the physical root of your system, which could range from
49526 + anything from the console device to a device for your harddrive (which
49527 + they could then use to wipe the drive or steal data). It is recommended
49528 + that you say Y here, unless you run into software incompatibilities.
49529 + If the sysctl option is enabled, a sysctl option with name
49530 + "chroot_deny_mknod" is created.
49531 +
49532 +config GRKERNSEC_CHROOT_SHMAT
49533 + bool "Deny shmat() out of chroot"
49534 + depends on GRKERNSEC_CHROOT
49535 + help
49536 + If you say Y here, processes inside a chroot will not be able to attach
49537 + to shared memory segments that were created outside of the chroot jail.
49538 + It is recommended that you say Y here. If the sysctl option is enabled,
49539 + a sysctl option with name "chroot_deny_shmat" is created.
49540 +
49541 +config GRKERNSEC_CHROOT_UNIX
49542 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
49543 + depends on GRKERNSEC_CHROOT
49544 + help
49545 + If you say Y here, processes inside a chroot will not be able to
49546 + connect to abstract (meaning not belonging to a filesystem) Unix
49547 + domain sockets that were bound outside of a chroot. It is recommended
49548 + that you say Y here. If the sysctl option is enabled, a sysctl option
49549 + with name "chroot_deny_unix" is created.
49550 +
49551 +config GRKERNSEC_CHROOT_FINDTASK
49552 + bool "Protect outside processes"
49553 + depends on GRKERNSEC_CHROOT
49554 + help
49555 + If you say Y here, processes inside a chroot will not be able to
49556 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49557 + getsid, or view any process outside of the chroot. If the sysctl
49558 + option is enabled, a sysctl option with name "chroot_findtask" is
49559 + created.
49560 +
49561 +config GRKERNSEC_CHROOT_NICE
49562 + bool "Restrict priority changes"
49563 + depends on GRKERNSEC_CHROOT
49564 + help
49565 + If you say Y here, processes inside a chroot will not be able to raise
49566 + the priority of processes in the chroot, or alter the priority of
49567 + processes outside the chroot. This provides more security than simply
49568 + removing CAP_SYS_NICE from the process' capability set. If the
49569 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49570 + is created.
49571 +
49572 +config GRKERNSEC_CHROOT_SYSCTL
49573 + bool "Deny sysctl writes"
49574 + depends on GRKERNSEC_CHROOT
49575 + help
49576 + If you say Y here, an attacker in a chroot will not be able to
49577 + write to sysctl entries, either by sysctl(2) or through a /proc
49578 + interface. It is strongly recommended that you say Y here. If the
49579 + sysctl option is enabled, a sysctl option with name
49580 + "chroot_deny_sysctl" is created.
49581 +
49582 +config GRKERNSEC_CHROOT_CAPS
49583 + bool "Capability restrictions"
49584 + depends on GRKERNSEC_CHROOT
49585 + help
49586 + If you say Y here, the capabilities on all processes within a
49587 + chroot jail will be lowered to stop module insertion, raw i/o,
49588 + system and net admin tasks, rebooting the system, modifying immutable
49589 + files, modifying IPC owned by another, and changing the system time.
49590 + This is left an option because it can break some apps. Disable this
49591 + if your chrooted apps are having problems performing those kinds of
49592 + tasks. If the sysctl option is enabled, a sysctl option with
49593 + name "chroot_caps" is created.
49594 +
49595 +endmenu
49596 +menu "Kernel Auditing"
49597 +depends on GRKERNSEC
49598 +
49599 +config GRKERNSEC_AUDIT_GROUP
49600 + bool "Single group for auditing"
49601 + help
49602 + If you say Y here, the exec, chdir, and (un)mount logging features
49603 + will only operate on a group you specify. This option is recommended
49604 + if you only want to watch certain users instead of having a large
49605 + amount of logs from the entire system. If the sysctl option is enabled,
49606 + a sysctl option with name "audit_group" is created.
49607 +
49608 +config GRKERNSEC_AUDIT_GID
49609 + int "GID for auditing"
49610 + depends on GRKERNSEC_AUDIT_GROUP
49611 + default 1007
49612 +
49613 +config GRKERNSEC_EXECLOG
49614 + bool "Exec logging"
49615 + help
49616 + If you say Y here, all execve() calls will be logged (since the
49617 + other exec*() calls are frontends to execve(), all execution
49618 + will be logged). Useful for shell-servers that like to keep track
49619 + of their users. If the sysctl option is enabled, a sysctl option with
49620 + name "exec_logging" is created.
49621 + WARNING: This option when enabled will produce a LOT of logs, especially
49622 + on an active system.
49623 +
49624 +config GRKERNSEC_RESLOG
49625 + bool "Resource logging"
49626 + help
49627 + If you say Y here, all attempts to overstep resource limits will
49628 + be logged with the resource name, the requested size, and the current
49629 + limit. It is highly recommended that you say Y here. If the sysctl
49630 + option is enabled, a sysctl option with name "resource_logging" is
49631 + created. If the RBAC system is enabled, the sysctl value is ignored.
49632 +
49633 +config GRKERNSEC_CHROOT_EXECLOG
49634 + bool "Log execs within chroot"
49635 + help
49636 + If you say Y here, all executions inside a chroot jail will be logged
49637 + to syslog. This can cause a large amount of logs if certain
49638 + applications (eg. djb's daemontools) are installed on the system, and
49639 + is therefore left as an option. If the sysctl option is enabled, a
49640 + sysctl option with name "chroot_execlog" is created.
49641 +
49642 +config GRKERNSEC_AUDIT_PTRACE
49643 + bool "Ptrace logging"
49644 + help
49645 + If you say Y here, all attempts to attach to a process via ptrace
49646 + will be logged. If the sysctl option is enabled, a sysctl option
49647 + with name "audit_ptrace" is created.
49648 +
49649 +config GRKERNSEC_AUDIT_CHDIR
49650 + bool "Chdir logging"
49651 + help
49652 + If you say Y here, all chdir() calls will be logged. If the sysctl
49653 + option is enabled, a sysctl option with name "audit_chdir" is created.
49654 +
49655 +config GRKERNSEC_AUDIT_MOUNT
49656 + bool "(Un)Mount logging"
49657 + help
49658 + If you say Y here, all mounts and unmounts will be logged. If the
49659 + sysctl option is enabled, a sysctl option with name "audit_mount" is
49660 + created.
49661 +
49662 +config GRKERNSEC_SIGNAL
49663 + bool "Signal logging"
49664 + help
49665 + If you say Y here, certain important signals will be logged, such as
49666 + SIGSEGV, which will as a result inform you of when a error in a program
49667 + occurred, which in some cases could mean a possible exploit attempt.
49668 + If the sysctl option is enabled, a sysctl option with name
49669 + "signal_logging" is created.
49670 +
49671 +config GRKERNSEC_FORKFAIL
49672 + bool "Fork failure logging"
49673 + help
49674 + If you say Y here, all failed fork() attempts will be logged.
49675 + This could suggest a fork bomb, or someone attempting to overstep
49676 + their process limit. If the sysctl option is enabled, a sysctl option
49677 + with name "forkfail_logging" is created.
49678 +
49679 +config GRKERNSEC_TIME
49680 + bool "Time change logging"
49681 + help
49682 + If you say Y here, any changes of the system clock will be logged.
49683 + If the sysctl option is enabled, a sysctl option with name
49684 + "timechange_logging" is created.
49685 +
49686 +config GRKERNSEC_PROC_IPADDR
49687 + bool "/proc/<pid>/ipaddr support"
49688 + help
49689 + If you say Y here, a new entry will be added to each /proc/<pid>
49690 + directory that contains the IP address of the person using the task.
49691 + The IP is carried across local TCP and AF_UNIX stream sockets.
49692 + This information can be useful for IDS/IPSes to perform remote response
49693 + to a local attack. The entry is readable by only the owner of the
49694 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49695 + the RBAC system), and thus does not create privacy concerns.
49696 +
49697 +config GRKERNSEC_RWXMAP_LOG
49698 + bool 'Denied RWX mmap/mprotect logging'
49699 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49700 + help
49701 + If you say Y here, calls to mmap() and mprotect() with explicit
49702 + usage of PROT_WRITE and PROT_EXEC together will be logged when
49703 + denied by the PAX_MPROTECT feature. If the sysctl option is
49704 + enabled, a sysctl option with name "rwxmap_logging" is created.
49705 +
49706 +config GRKERNSEC_AUDIT_TEXTREL
49707 + bool 'ELF text relocations logging (READ HELP)'
49708 + depends on PAX_MPROTECT
49709 + help
49710 + If you say Y here, text relocations will be logged with the filename
49711 + of the offending library or binary. The purpose of the feature is
49712 + to help Linux distribution developers get rid of libraries and
49713 + binaries that need text relocations which hinder the future progress
49714 + of PaX. Only Linux distribution developers should say Y here, and
49715 + never on a production machine, as this option creates an information
49716 + leak that could aid an attacker in defeating the randomization of
49717 + a single memory region. If the sysctl option is enabled, a sysctl
49718 + option with name "audit_textrel" is created.
49719 +
49720 +endmenu
49721 +
49722 +menu "Executable Protections"
49723 +depends on GRKERNSEC
49724 +
49725 +config GRKERNSEC_DMESG
49726 + bool "Dmesg(8) restriction"
49727 + help
49728 + If you say Y here, non-root users will not be able to use dmesg(8)
49729 + to view up to the last 4kb of messages in the kernel's log buffer.
49730 + The kernel's log buffer often contains kernel addresses and other
49731 + identifying information useful to an attacker in fingerprinting a
49732 + system for a targeted exploit.
49733 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
49734 + created.
49735 +
49736 +config GRKERNSEC_HARDEN_PTRACE
49737 + bool "Deter ptrace-based process snooping"
49738 + help
49739 + If you say Y here, TTY sniffers and other malicious monitoring
49740 + programs implemented through ptrace will be defeated. If you
49741 + have been using the RBAC system, this option has already been
49742 + enabled for several years for all users, with the ability to make
49743 + fine-grained exceptions.
49744 +
49745 + This option only affects the ability of non-root users to ptrace
49746 + processes that are not a descendent of the ptracing process.
49747 + This means that strace ./binary and gdb ./binary will still work,
49748 + but attaching to arbitrary processes will not. If the sysctl
49749 + option is enabled, a sysctl option with name "harden_ptrace" is
49750 + created.
49751 +
49752 +config GRKERNSEC_PTRACE_READEXEC
49753 + bool "Require read access to ptrace sensitive binaries"
49754 + help
49755 + If you say Y here, unprivileged users will not be able to ptrace unreadable
49756 + binaries. This option is useful in environments that
49757 + remove the read bits (e.g. file mode 4711) from suid binaries to
49758 + prevent infoleaking of their contents. This option adds
49759 + consistency to the use of that file mode, as the binary could normally
49760 + be read out when run without privileges while ptracing.
49761 +
49762 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49763 + is created.
49764 +
49765 +config GRKERNSEC_SETXID
49766 + bool "Enforce consistent multithreaded privileges"
49767 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
49768 + help
49769 + If you say Y here, a change from a root uid to a non-root uid
49770 + in a multithreaded application will cause the resulting uids,
49771 + gids, supplementary groups, and capabilities in that thread
49772 + to be propagated to the other threads of the process. In most
49773 + cases this is unnecessary, as glibc will emulate this behavior
49774 + on behalf of the application. Other libcs do not act in the
49775 + same way, allowing the other threads of the process to continue
49776 + running with root privileges. If the sysctl option is enabled,
49777 + a sysctl option with name "consistent_setxid" is created.
49778 +
49779 +config GRKERNSEC_TPE
49780 + bool "Trusted Path Execution (TPE)"
49781 + help
49782 + If you say Y here, you will be able to choose a gid to add to the
49783 + supplementary groups of users you want to mark as "untrusted."
49784 + These users will not be able to execute any files that are not in
49785 + root-owned directories writable only by root. If the sysctl option
49786 + is enabled, a sysctl option with name "tpe" is created.
49787 +
49788 +config GRKERNSEC_TPE_ALL
49789 + bool "Partially restrict all non-root users"
49790 + depends on GRKERNSEC_TPE
49791 + help
49792 + If you say Y here, all non-root users will be covered under
49793 + a weaker TPE restriction. This is separate from, and in addition to,
49794 + the main TPE options that you have selected elsewhere. Thus, if a
49795 + "trusted" GID is chosen, this restriction applies to even that GID.
49796 + Under this restriction, all non-root users will only be allowed to
49797 + execute files in directories they own that are not group or
49798 + world-writable, or in directories owned by root and writable only by
49799 + root. If the sysctl option is enabled, a sysctl option with name
49800 + "tpe_restrict_all" is created.
49801 +
49802 +config GRKERNSEC_TPE_INVERT
49803 + bool "Invert GID option"
49804 + depends on GRKERNSEC_TPE
49805 + help
49806 + If you say Y here, the group you specify in the TPE configuration will
49807 + decide what group TPE restrictions will be *disabled* for. This
49808 + option is useful if you want TPE restrictions to be applied to most
49809 + users on the system. If the sysctl option is enabled, a sysctl option
49810 + with name "tpe_invert" is created. Unlike other sysctl options, this
49811 + entry will default to on for backward-compatibility.
49812 +
49813 +config GRKERNSEC_TPE_GID
49814 + int "GID for untrusted users"
49815 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49816 + default 1005
49817 + help
49818 + Setting this GID determines what group TPE restrictions will be
49819 + *enabled* for. If the sysctl option is enabled, a sysctl option
49820 + with name "tpe_gid" is created.
49821 +
49822 +config GRKERNSEC_TPE_GID
49823 + int "GID for trusted users"
49824 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49825 + default 1005
49826 + help
49827 + Setting this GID determines what group TPE restrictions will be
49828 + *disabled* for. If the sysctl option is enabled, a sysctl option
49829 + with name "tpe_gid" is created.
49830 +
49831 +endmenu
49832 +menu "Network Protections"
49833 +depends on GRKERNSEC
49834 +
49835 +config GRKERNSEC_RANDNET
49836 + bool "Larger entropy pools"
49837 + help
49838 + If you say Y here, the entropy pools used for many features of Linux
49839 + and grsecurity will be doubled in size. Since several grsecurity
49840 + features use additional randomness, it is recommended that you say Y
49841 + here. Saying Y here has a similar effect as modifying
49842 + /proc/sys/kernel/random/poolsize.
49843 +
49844 +config GRKERNSEC_BLACKHOLE
49845 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49846 + depends on NET
49847 + help
49848 + If you say Y here, neither TCP resets nor ICMP
49849 + destination-unreachable packets will be sent in response to packets
49850 + sent to ports for which no associated listening process exists.
49851 + This feature supports both IPV4 and IPV6 and exempts the
49852 + loopback interface from blackholing. Enabling this feature
49853 + makes a host more resilient to DoS attacks and reduces network
49854 + visibility against scanners.
49855 +
49856 + The blackhole feature as-implemented is equivalent to the FreeBSD
49857 + blackhole feature, as it prevents RST responses to all packets, not
49858 + just SYNs. Under most application behavior this causes no
49859 + problems, but applications (like haproxy) may not close certain
49860 + connections in a way that cleanly terminates them on the remote
49861 + end, leaving the remote host in LAST_ACK state. Because of this
49862 + side-effect and to prevent intentional LAST_ACK DoSes, this
49863 + feature also adds automatic mitigation against such attacks.
49864 + The mitigation drastically reduces the amount of time a socket
49865 + can spend in LAST_ACK state. If you're using haproxy and not
49866 + all servers it connects to have this option enabled, consider
49867 + disabling this feature on the haproxy host.
49868 +
49869 + If the sysctl option is enabled, two sysctl options with names
49870 + "ip_blackhole" and "lastack_retries" will be created.
49871 + While "ip_blackhole" takes the standard zero/non-zero on/off
49872 + toggle, "lastack_retries" uses the same kinds of values as
49873 + "tcp_retries1" and "tcp_retries2". The default value of 4
49874 + prevents a socket from lasting more than 45 seconds in LAST_ACK
49875 + state.
49876 +
49877 +config GRKERNSEC_SOCKET
49878 + bool "Socket restrictions"
49879 + depends on NET
49880 + help
49881 + If you say Y here, you will be able to choose from several options.
49882 + If you assign a GID on your system and add it to the supplementary
49883 + groups of users you want to restrict socket access to, this patch
49884 + will perform up to three things, based on the option(s) you choose.
49885 +
49886 +config GRKERNSEC_SOCKET_ALL
49887 + bool "Deny any sockets to group"
49888 + depends on GRKERNSEC_SOCKET
49889 + help
49890 + If you say Y here, you will be able to choose a GID of whose users will
49891 + be unable to connect to other hosts from your machine or run server
49892 + applications from your machine. If the sysctl option is enabled, a
49893 + sysctl option with name "socket_all" is created.
49894 +
49895 +config GRKERNSEC_SOCKET_ALL_GID
49896 + int "GID to deny all sockets for"
49897 + depends on GRKERNSEC_SOCKET_ALL
49898 + default 1004
49899 + help
49900 + Here you can choose the GID to disable socket access for. Remember to
49901 + add the users you want socket access disabled for to the GID
49902 + specified here. If the sysctl option is enabled, a sysctl option
49903 + with name "socket_all_gid" is created.
49904 +
49905 +config GRKERNSEC_SOCKET_CLIENT
49906 + bool "Deny client sockets to group"
49907 + depends on GRKERNSEC_SOCKET
49908 + help
49909 + If you say Y here, you will be able to choose a GID of whose users will
49910 + be unable to connect to other hosts from your machine, but will be
49911 + able to run servers. If this option is enabled, all users in the group
49912 + you specify will have to use passive mode when initiating ftp transfers
49913 + from the shell on your machine. If the sysctl option is enabled, a
49914 + sysctl option with name "socket_client" is created.
49915 +
49916 +config GRKERNSEC_SOCKET_CLIENT_GID
49917 + int "GID to deny client sockets for"
49918 + depends on GRKERNSEC_SOCKET_CLIENT
49919 + default 1003
49920 + help
49921 + Here you can choose the GID to disable client socket access for.
49922 + Remember to add the users you want client socket access disabled for to
49923 + the GID specified here. If the sysctl option is enabled, a sysctl
49924 + option with name "socket_client_gid" is created.
49925 +
49926 +config GRKERNSEC_SOCKET_SERVER
49927 + bool "Deny server sockets to group"
49928 + depends on GRKERNSEC_SOCKET
49929 + help
49930 + If you say Y here, you will be able to choose a GID of whose users will
49931 + be unable to run server applications from your machine. If the sysctl
49932 + option is enabled, a sysctl option with name "socket_server" is created.
49933 +
49934 +config GRKERNSEC_SOCKET_SERVER_GID
49935 + int "GID to deny server sockets for"
49936 + depends on GRKERNSEC_SOCKET_SERVER
49937 + default 1002
49938 + help
49939 + Here you can choose the GID to disable server socket access for.
49940 + Remember to add the users you want server socket access disabled for to
49941 + the GID specified here. If the sysctl option is enabled, a sysctl
49942 + option with name "socket_server_gid" is created.
49943 +
49944 +endmenu
49945 +menu "Sysctl support"
49946 +depends on GRKERNSEC && SYSCTL
49947 +
49948 +config GRKERNSEC_SYSCTL
49949 + bool "Sysctl support"
49950 + help
49951 + If you say Y here, you will be able to change the options that
49952 + grsecurity runs with at bootup, without having to recompile your
49953 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
49954 + to enable (1) or disable (0) various features. All the sysctl entries
49955 + are mutable until the "grsec_lock" entry is set to a non-zero value.
49956 + All features enabled in the kernel configuration are disabled at boot
49957 + if you do not say Y to the "Turn on features by default" option.
49958 + All options should be set at startup, and the grsec_lock entry should
49959 + be set to a non-zero value after all the options are set.
49960 + *THIS IS EXTREMELY IMPORTANT*
49961 +
49962 +config GRKERNSEC_SYSCTL_DISTRO
49963 + bool "Extra sysctl support for distro makers (READ HELP)"
49964 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
49965 + help
49966 + If you say Y here, additional sysctl options will be created
49967 + for features that affect processes running as root. Therefore,
49968 + it is critical when using this option that the grsec_lock entry be
49969 + enabled after boot. Only distros with prebuilt kernel packages
49970 + with this option enabled that can ensure grsec_lock is enabled
49971 + after boot should use this option.
49972 + *Failure to set grsec_lock after boot makes all grsec features
49973 + this option covers useless*
49974 +
49975 + Currently this option creates the following sysctl entries:
49976 + "Disable Privileged I/O": "disable_priv_io"
49977 +
49978 +config GRKERNSEC_SYSCTL_ON
49979 + bool "Turn on features by default"
49980 + depends on GRKERNSEC_SYSCTL
49981 + help
49982 + If you say Y here, instead of having all features enabled in the
49983 + kernel configuration disabled at boot time, the features will be
49984 + enabled at boot time. It is recommended you say Y here unless
49985 + there is some reason you would want all sysctl-tunable features to
49986 + be disabled by default. As mentioned elsewhere, it is important
49987 + to enable the grsec_lock entry once you have finished modifying
49988 + the sysctl entries.
49989 +
49990 +endmenu
49991 +menu "Logging Options"
49992 +depends on GRKERNSEC
49993 +
49994 +config GRKERNSEC_FLOODTIME
49995 + int "Seconds in between log messages (minimum)"
49996 + default 10
49997 + help
49998 + This option allows you to enforce the number of seconds between
49999 + grsecurity log messages. The default should be suitable for most
50000 + people, however, if you choose to change it, choose a value small enough
50001 + to allow informative logs to be produced, but large enough to
50002 + prevent flooding.
50003 +
50004 +config GRKERNSEC_FLOODBURST
50005 + int "Number of messages in a burst (maximum)"
50006 + default 6
50007 + help
50008 + This option allows you to choose the maximum number of messages allowed
50009 + within the flood time interval you chose in a separate option. The
50010 + default should be suitable for most people, however if you find that
50011 + many of your logs are being interpreted as flooding, you may want to
50012 + raise this value.
50013 +
50014 +endmenu
50015 +
50016 +endmenu
50017 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50018 new file mode 100644
50019 index 0000000..1b9afa9
50020 --- /dev/null
50021 +++ b/grsecurity/Makefile
50022 @@ -0,0 +1,38 @@
50023 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50024 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50025 +# into an RBAC system
50026 +#
50027 +# All code in this directory and various hooks inserted throughout the kernel
50028 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50029 +# under the GPL v2 or higher
50030 +
50031 +KBUILD_CFLAGS += -Werror
50032 +
50033 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50034 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50035 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50036 +
50037 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50038 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50039 + gracl_learn.o grsec_log.o
50040 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50041 +
50042 +ifdef CONFIG_NET
50043 +obj-y += grsec_sock.o
50044 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50045 +endif
50046 +
50047 +ifndef CONFIG_GRKERNSEC
50048 +obj-y += grsec_disabled.o
50049 +endif
50050 +
50051 +ifdef CONFIG_GRKERNSEC_HIDESYM
50052 +extra-y := grsec_hidesym.o
50053 +$(obj)/grsec_hidesym.o:
50054 + @-chmod -f 500 /boot
50055 + @-chmod -f 500 /lib/modules
50056 + @-chmod -f 500 /lib64/modules
50057 + @-chmod -f 500 /lib32/modules
50058 + @-chmod -f 700 .
50059 + @echo ' grsec: protected kernel image paths'
50060 +endif
50061 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50062 new file mode 100644
50063 index 0000000..00b6c54
50064 --- /dev/null
50065 +++ b/grsecurity/gracl.c
50066 @@ -0,0 +1,4012 @@
50067 +#include <linux/kernel.h>
50068 +#include <linux/module.h>
50069 +#include <linux/sched.h>
50070 +#include <linux/mm.h>
50071 +#include <linux/file.h>
50072 +#include <linux/fs.h>
50073 +#include <linux/namei.h>
50074 +#include <linux/mount.h>
50075 +#include <linux/tty.h>
50076 +#include <linux/proc_fs.h>
50077 +#include <linux/lglock.h>
50078 +#include <linux/slab.h>
50079 +#include <linux/vmalloc.h>
50080 +#include <linux/types.h>
50081 +#include <linux/sysctl.h>
50082 +#include <linux/netdevice.h>
50083 +#include <linux/ptrace.h>
50084 +#include <linux/gracl.h>
50085 +#include <linux/gralloc.h>
50086 +#include <linux/security.h>
50087 +#include <linux/grinternal.h>
50088 +#include <linux/pid_namespace.h>
50089 +#include <linux/fdtable.h>
50090 +#include <linux/percpu.h>
50091 +#include "../fs/mount.h"
50092 +
50093 +#include <asm/uaccess.h>
50094 +#include <asm/errno.h>
50095 +#include <asm/mman.h>
50096 +
50097 +static struct acl_role_db acl_role_set;
50098 +static struct name_db name_set;
50099 +static struct inodev_db inodev_set;
50100 +
50101 +/* for keeping track of userspace pointers used for subjects, so we
50102 + can share references in the kernel as well
50103 +*/
50104 +
50105 +static struct path real_root;
50106 +
50107 +static struct acl_subj_map_db subj_map_set;
50108 +
50109 +static struct acl_role_label *default_role;
50110 +
50111 +static struct acl_role_label *role_list;
50112 +
50113 +static u16 acl_sp_role_value;
50114 +
50115 +extern char *gr_shared_page[4];
50116 +static DEFINE_MUTEX(gr_dev_mutex);
50117 +DEFINE_RWLOCK(gr_inode_lock);
50118 +
50119 +struct gr_arg *gr_usermode;
50120 +
50121 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50122 +
50123 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50124 +extern void gr_clear_learn_entries(void);
50125 +
50126 +#ifdef CONFIG_GRKERNSEC_RESLOG
50127 +extern void gr_log_resource(const struct task_struct *task,
50128 + const int res, const unsigned long wanted, const int gt);
50129 +#endif
50130 +
50131 +unsigned char *gr_system_salt;
50132 +unsigned char *gr_system_sum;
50133 +
50134 +static struct sprole_pw **acl_special_roles = NULL;
50135 +static __u16 num_sprole_pws = 0;
50136 +
50137 +static struct acl_role_label *kernel_role = NULL;
50138 +
50139 +static unsigned int gr_auth_attempts = 0;
50140 +static unsigned long gr_auth_expires = 0UL;
50141 +
50142 +#ifdef CONFIG_NET
50143 +extern struct vfsmount *sock_mnt;
50144 +#endif
50145 +
50146 +extern struct vfsmount *pipe_mnt;
50147 +extern struct vfsmount *shm_mnt;
50148 +#ifdef CONFIG_HUGETLBFS
50149 +extern struct vfsmount *hugetlbfs_vfsmount;
50150 +#endif
50151 +
50152 +static struct acl_object_label *fakefs_obj_rw;
50153 +static struct acl_object_label *fakefs_obj_rwx;
50154 +
50155 +extern int gr_init_uidset(void);
50156 +extern void gr_free_uidset(void);
50157 +extern void gr_remove_uid(uid_t uid);
50158 +extern int gr_find_uid(uid_t uid);
50159 +
50160 +DECLARE_BRLOCK(vfsmount_lock);
50161 +
50162 +__inline__ int
50163 +gr_acl_is_enabled(void)
50164 +{
50165 + return (gr_status & GR_READY);
50166 +}
50167 +
50168 +#ifdef CONFIG_BTRFS_FS
50169 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50170 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50171 +#endif
50172 +
50173 +static inline dev_t __get_dev(const struct dentry *dentry)
50174 +{
50175 +#ifdef CONFIG_BTRFS_FS
50176 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50177 + return get_btrfs_dev_from_inode(dentry->d_inode);
50178 + else
50179 +#endif
50180 + return dentry->d_inode->i_sb->s_dev;
50181 +}
50182 +
50183 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50184 +{
50185 + return __get_dev(dentry);
50186 +}
50187 +
50188 +static char gr_task_roletype_to_char(struct task_struct *task)
50189 +{
50190 + switch (task->role->roletype &
50191 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50192 + GR_ROLE_SPECIAL)) {
50193 + case GR_ROLE_DEFAULT:
50194 + return 'D';
50195 + case GR_ROLE_USER:
50196 + return 'U';
50197 + case GR_ROLE_GROUP:
50198 + return 'G';
50199 + case GR_ROLE_SPECIAL:
50200 + return 'S';
50201 + }
50202 +
50203 + return 'X';
50204 +}
50205 +
50206 +char gr_roletype_to_char(void)
50207 +{
50208 + return gr_task_roletype_to_char(current);
50209 +}
50210 +
50211 +__inline__ int
50212 +gr_acl_tpe_check(void)
50213 +{
50214 + if (unlikely(!(gr_status & GR_READY)))
50215 + return 0;
50216 + if (current->role->roletype & GR_ROLE_TPE)
50217 + return 1;
50218 + else
50219 + return 0;
50220 +}
50221 +
50222 +int
50223 +gr_handle_rawio(const struct inode *inode)
50224 +{
50225 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50226 + if (inode && S_ISBLK(inode->i_mode) &&
50227 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50228 + !capable(CAP_SYS_RAWIO))
50229 + return 1;
50230 +#endif
50231 + return 0;
50232 +}
50233 +
50234 +static int
50235 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50236 +{
50237 + if (likely(lena != lenb))
50238 + return 0;
50239 +
50240 + return !memcmp(a, b, lena);
50241 +}
50242 +
50243 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50244 +{
50245 + *buflen -= namelen;
50246 + if (*buflen < 0)
50247 + return -ENAMETOOLONG;
50248 + *buffer -= namelen;
50249 + memcpy(*buffer, str, namelen);
50250 + return 0;
50251 +}
50252 +
50253 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50254 +{
50255 + return prepend(buffer, buflen, name->name, name->len);
50256 +}
50257 +
50258 +static int prepend_path(const struct path *path, struct path *root,
50259 + char **buffer, int *buflen)
50260 +{
50261 + struct dentry *dentry = path->dentry;
50262 + struct vfsmount *vfsmnt = path->mnt;
50263 + struct mount *mnt = real_mount(vfsmnt);
50264 + bool slash = false;
50265 + int error = 0;
50266 +
50267 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50268 + struct dentry * parent;
50269 +
50270 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50271 + /* Global root? */
50272 + if (!mnt_has_parent(mnt)) {
50273 + goto out;
50274 + }
50275 + dentry = mnt->mnt_mountpoint;
50276 + mnt = mnt->mnt_parent;
50277 + vfsmnt = &mnt->mnt;
50278 + continue;
50279 + }
50280 + parent = dentry->d_parent;
50281 + prefetch(parent);
50282 + spin_lock(&dentry->d_lock);
50283 + error = prepend_name(buffer, buflen, &dentry->d_name);
50284 + spin_unlock(&dentry->d_lock);
50285 + if (!error)
50286 + error = prepend(buffer, buflen, "/", 1);
50287 + if (error)
50288 + break;
50289 +
50290 + slash = true;
50291 + dentry = parent;
50292 + }
50293 +
50294 +out:
50295 + if (!error && !slash)
50296 + error = prepend(buffer, buflen, "/", 1);
50297 +
50298 + return error;
50299 +}
50300 +
50301 +/* this must be called with vfsmount_lock and rename_lock held */
50302 +
50303 +static char *__our_d_path(const struct path *path, struct path *root,
50304 + char *buf, int buflen)
50305 +{
50306 + char *res = buf + buflen;
50307 + int error;
50308 +
50309 + prepend(&res, &buflen, "\0", 1);
50310 + error = prepend_path(path, root, &res, &buflen);
50311 + if (error)
50312 + return ERR_PTR(error);
50313 +
50314 + return res;
50315 +}
50316 +
50317 +static char *
50318 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50319 +{
50320 + char *retval;
50321 +
50322 + retval = __our_d_path(path, root, buf, buflen);
50323 + if (unlikely(IS_ERR(retval)))
50324 + retval = strcpy(buf, "<path too long>");
50325 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50326 + retval[1] = '\0';
50327 +
50328 + return retval;
50329 +}
50330 +
50331 +static char *
50332 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50333 + char *buf, int buflen)
50334 +{
50335 + struct path path;
50336 + char *res;
50337 +
50338 + path.dentry = (struct dentry *)dentry;
50339 + path.mnt = (struct vfsmount *)vfsmnt;
50340 +
50341 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50342 + by the RBAC system */
50343 + res = gen_full_path(&path, &real_root, buf, buflen);
50344 +
50345 + return res;
50346 +}
50347 +
50348 +static char *
50349 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50350 + char *buf, int buflen)
50351 +{
50352 + char *res;
50353 + struct path path;
50354 + struct path root;
50355 + struct task_struct *reaper = init_pid_ns.child_reaper;
50356 +
50357 + path.dentry = (struct dentry *)dentry;
50358 + path.mnt = (struct vfsmount *)vfsmnt;
50359 +
50360 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50361 + get_fs_root(reaper->fs, &root);
50362 +
50363 + write_seqlock(&rename_lock);
50364 + br_read_lock(vfsmount_lock);
50365 + res = gen_full_path(&path, &root, buf, buflen);
50366 + br_read_unlock(vfsmount_lock);
50367 + write_sequnlock(&rename_lock);
50368 +
50369 + path_put(&root);
50370 + return res;
50371 +}
50372 +
50373 +static char *
50374 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50375 +{
50376 + char *ret;
50377 + write_seqlock(&rename_lock);
50378 + br_read_lock(vfsmount_lock);
50379 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50380 + PAGE_SIZE);
50381 + br_read_unlock(vfsmount_lock);
50382 + write_sequnlock(&rename_lock);
50383 + return ret;
50384 +}
50385 +
50386 +static char *
50387 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50388 +{
50389 + char *ret;
50390 + char *buf;
50391 + int buflen;
50392 +
50393 + write_seqlock(&rename_lock);
50394 + br_read_lock(vfsmount_lock);
50395 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50396 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50397 + buflen = (int)(ret - buf);
50398 + if (buflen >= 5)
50399 + prepend(&ret, &buflen, "/proc", 5);
50400 + else
50401 + ret = strcpy(buf, "<path too long>");
50402 + br_read_unlock(vfsmount_lock);
50403 + write_sequnlock(&rename_lock);
50404 + return ret;
50405 +}
50406 +
50407 +char *
50408 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50409 +{
50410 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50411 + PAGE_SIZE);
50412 +}
50413 +
50414 +char *
50415 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50416 +{
50417 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50418 + PAGE_SIZE);
50419 +}
50420 +
50421 +char *
50422 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50423 +{
50424 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50425 + PAGE_SIZE);
50426 +}
50427 +
50428 +char *
50429 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50430 +{
50431 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50432 + PAGE_SIZE);
50433 +}
50434 +
50435 +char *
50436 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50437 +{
50438 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50439 + PAGE_SIZE);
50440 +}
50441 +
50442 +__inline__ __u32
50443 +to_gr_audit(const __u32 reqmode)
50444 +{
50445 + /* masks off auditable permission flags, then shifts them to create
50446 + auditing flags, and adds the special case of append auditing if
50447 + we're requesting write */
50448 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50449 +}
50450 +
50451 +struct acl_subject_label *
50452 +lookup_subject_map(const struct acl_subject_label *userp)
50453 +{
50454 + unsigned int index = shash(userp, subj_map_set.s_size);
50455 + struct subject_map *match;
50456 +
50457 + match = subj_map_set.s_hash[index];
50458 +
50459 + while (match && match->user != userp)
50460 + match = match->next;
50461 +
50462 + if (match != NULL)
50463 + return match->kernel;
50464 + else
50465 + return NULL;
50466 +}
50467 +
50468 +static void
50469 +insert_subj_map_entry(struct subject_map *subjmap)
50470 +{
50471 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50472 + struct subject_map **curr;
50473 +
50474 + subjmap->prev = NULL;
50475 +
50476 + curr = &subj_map_set.s_hash[index];
50477 + if (*curr != NULL)
50478 + (*curr)->prev = subjmap;
50479 +
50480 + subjmap->next = *curr;
50481 + *curr = subjmap;
50482 +
50483 + return;
50484 +}
50485 +
50486 +static struct acl_role_label *
50487 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50488 + const gid_t gid)
50489 +{
50490 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50491 + struct acl_role_label *match;
50492 + struct role_allowed_ip *ipp;
50493 + unsigned int x;
50494 + u32 curr_ip = task->signal->curr_ip;
50495 +
50496 + task->signal->saved_ip = curr_ip;
50497 +
50498 + match = acl_role_set.r_hash[index];
50499 +
50500 + while (match) {
50501 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50502 + for (x = 0; x < match->domain_child_num; x++) {
50503 + if (match->domain_children[x] == uid)
50504 + goto found;
50505 + }
50506 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50507 + break;
50508 + match = match->next;
50509 + }
50510 +found:
50511 + if (match == NULL) {
50512 + try_group:
50513 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50514 + match = acl_role_set.r_hash[index];
50515 +
50516 + while (match) {
50517 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50518 + for (x = 0; x < match->domain_child_num; x++) {
50519 + if (match->domain_children[x] == gid)
50520 + goto found2;
50521 + }
50522 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50523 + break;
50524 + match = match->next;
50525 + }
50526 +found2:
50527 + if (match == NULL)
50528 + match = default_role;
50529 + if (match->allowed_ips == NULL)
50530 + return match;
50531 + else {
50532 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50533 + if (likely
50534 + ((ntohl(curr_ip) & ipp->netmask) ==
50535 + (ntohl(ipp->addr) & ipp->netmask)))
50536 + return match;
50537 + }
50538 + match = default_role;
50539 + }
50540 + } else if (match->allowed_ips == NULL) {
50541 + return match;
50542 + } else {
50543 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50544 + if (likely
50545 + ((ntohl(curr_ip) & ipp->netmask) ==
50546 + (ntohl(ipp->addr) & ipp->netmask)))
50547 + return match;
50548 + }
50549 + goto try_group;
50550 + }
50551 +
50552 + return match;
50553 +}
50554 +
50555 +struct acl_subject_label *
50556 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50557 + const struct acl_role_label *role)
50558 +{
50559 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50560 + struct acl_subject_label *match;
50561 +
50562 + match = role->subj_hash[index];
50563 +
50564 + while (match && (match->inode != ino || match->device != dev ||
50565 + (match->mode & GR_DELETED))) {
50566 + match = match->next;
50567 + }
50568 +
50569 + if (match && !(match->mode & GR_DELETED))
50570 + return match;
50571 + else
50572 + return NULL;
50573 +}
50574 +
50575 +struct acl_subject_label *
50576 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50577 + const struct acl_role_label *role)
50578 +{
50579 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50580 + struct acl_subject_label *match;
50581 +
50582 + match = role->subj_hash[index];
50583 +
50584 + while (match && (match->inode != ino || match->device != dev ||
50585 + !(match->mode & GR_DELETED))) {
50586 + match = match->next;
50587 + }
50588 +
50589 + if (match && (match->mode & GR_DELETED))
50590 + return match;
50591 + else
50592 + return NULL;
50593 +}
50594 +
50595 +static struct acl_object_label *
50596 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50597 + const struct acl_subject_label *subj)
50598 +{
50599 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50600 + struct acl_object_label *match;
50601 +
50602 + match = subj->obj_hash[index];
50603 +
50604 + while (match && (match->inode != ino || match->device != dev ||
50605 + (match->mode & GR_DELETED))) {
50606 + match = match->next;
50607 + }
50608 +
50609 + if (match && !(match->mode & GR_DELETED))
50610 + return match;
50611 + else
50612 + return NULL;
50613 +}
50614 +
50615 +static struct acl_object_label *
50616 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50617 + const struct acl_subject_label *subj)
50618 +{
50619 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50620 + struct acl_object_label *match;
50621 +
50622 + match = subj->obj_hash[index];
50623 +
50624 + while (match && (match->inode != ino || match->device != dev ||
50625 + !(match->mode & GR_DELETED))) {
50626 + match = match->next;
50627 + }
50628 +
50629 + if (match && (match->mode & GR_DELETED))
50630 + return match;
50631 +
50632 + match = subj->obj_hash[index];
50633 +
50634 + while (match && (match->inode != ino || match->device != dev ||
50635 + (match->mode & GR_DELETED))) {
50636 + match = match->next;
50637 + }
50638 +
50639 + if (match && !(match->mode & GR_DELETED))
50640 + return match;
50641 + else
50642 + return NULL;
50643 +}
50644 +
50645 +static struct name_entry *
50646 +lookup_name_entry(const char *name)
50647 +{
50648 + unsigned int len = strlen(name);
50649 + unsigned int key = full_name_hash(name, len);
50650 + unsigned int index = key % name_set.n_size;
50651 + struct name_entry *match;
50652 +
50653 + match = name_set.n_hash[index];
50654 +
50655 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50656 + match = match->next;
50657 +
50658 + return match;
50659 +}
50660 +
50661 +static struct name_entry *
50662 +lookup_name_entry_create(const char *name)
50663 +{
50664 + unsigned int len = strlen(name);
50665 + unsigned int key = full_name_hash(name, len);
50666 + unsigned int index = key % name_set.n_size;
50667 + struct name_entry *match;
50668 +
50669 + match = name_set.n_hash[index];
50670 +
50671 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50672 + !match->deleted))
50673 + match = match->next;
50674 +
50675 + if (match && match->deleted)
50676 + return match;
50677 +
50678 + match = name_set.n_hash[index];
50679 +
50680 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50681 + match->deleted))
50682 + match = match->next;
50683 +
50684 + if (match && !match->deleted)
50685 + return match;
50686 + else
50687 + return NULL;
50688 +}
50689 +
50690 +static struct inodev_entry *
50691 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
50692 +{
50693 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
50694 + struct inodev_entry *match;
50695 +
50696 + match = inodev_set.i_hash[index];
50697 +
50698 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50699 + match = match->next;
50700 +
50701 + return match;
50702 +}
50703 +
50704 +static void
50705 +insert_inodev_entry(struct inodev_entry *entry)
50706 +{
50707 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50708 + inodev_set.i_size);
50709 + struct inodev_entry **curr;
50710 +
50711 + entry->prev = NULL;
50712 +
50713 + curr = &inodev_set.i_hash[index];
50714 + if (*curr != NULL)
50715 + (*curr)->prev = entry;
50716 +
50717 + entry->next = *curr;
50718 + *curr = entry;
50719 +
50720 + return;
50721 +}
50722 +
50723 +static void
50724 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50725 +{
50726 + unsigned int index =
50727 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50728 + struct acl_role_label **curr;
50729 + struct acl_role_label *tmp, *tmp2;
50730 +
50731 + curr = &acl_role_set.r_hash[index];
50732 +
50733 + /* simple case, slot is empty, just set it to our role */
50734 + if (*curr == NULL) {
50735 + *curr = role;
50736 + } else {
50737 + /* example:
50738 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
50739 + 2 -> 3
50740 + */
50741 + /* first check to see if we can already be reached via this slot */
50742 + tmp = *curr;
50743 + while (tmp && tmp != role)
50744 + tmp = tmp->next;
50745 + if (tmp == role) {
50746 + /* we don't need to add ourselves to this slot's chain */
50747 + return;
50748 + }
50749 + /* we need to add ourselves to this chain, two cases */
50750 + if (role->next == NULL) {
50751 + /* simple case, append the current chain to our role */
50752 + role->next = *curr;
50753 + *curr = role;
50754 + } else {
50755 + /* 1 -> 2 -> 3 -> 4
50756 + 2 -> 3 -> 4
50757 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
50758 + */
50759 + /* trickier case: walk our role's chain until we find
50760 + the role for the start of the current slot's chain */
50761 + tmp = role;
50762 + tmp2 = *curr;
50763 + while (tmp->next && tmp->next != tmp2)
50764 + tmp = tmp->next;
50765 + if (tmp->next == tmp2) {
50766 + /* from example above, we found 3, so just
50767 + replace this slot's chain with ours */
50768 + *curr = role;
50769 + } else {
50770 + /* we didn't find a subset of our role's chain
50771 + in the current slot's chain, so append their
50772 + chain to ours, and set us as the first role in
50773 + the slot's chain
50774 +
50775 + we could fold this case with the case above,
50776 + but making it explicit for clarity
50777 + */
50778 + tmp->next = tmp2;
50779 + *curr = role;
50780 + }
50781 + }
50782 + }
50783 +
50784 + return;
50785 +}
50786 +
50787 +static void
50788 +insert_acl_role_label(struct acl_role_label *role)
50789 +{
50790 + int i;
50791 +
50792 + if (role_list == NULL) {
50793 + role_list = role;
50794 + role->prev = NULL;
50795 + } else {
50796 + role->prev = role_list;
50797 + role_list = role;
50798 + }
50799 +
50800 + /* used for hash chains */
50801 + role->next = NULL;
50802 +
50803 + if (role->roletype & GR_ROLE_DOMAIN) {
50804 + for (i = 0; i < role->domain_child_num; i++)
50805 + __insert_acl_role_label(role, role->domain_children[i]);
50806 + } else
50807 + __insert_acl_role_label(role, role->uidgid);
50808 +}
50809 +
50810 +static int
50811 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50812 +{
50813 + struct name_entry **curr, *nentry;
50814 + struct inodev_entry *ientry;
50815 + unsigned int len = strlen(name);
50816 + unsigned int key = full_name_hash(name, len);
50817 + unsigned int index = key % name_set.n_size;
50818 +
50819 + curr = &name_set.n_hash[index];
50820 +
50821 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50822 + curr = &((*curr)->next);
50823 +
50824 + if (*curr != NULL)
50825 + return 1;
50826 +
50827 + nentry = acl_alloc(sizeof (struct name_entry));
50828 + if (nentry == NULL)
50829 + return 0;
50830 + ientry = acl_alloc(sizeof (struct inodev_entry));
50831 + if (ientry == NULL)
50832 + return 0;
50833 + ientry->nentry = nentry;
50834 +
50835 + nentry->key = key;
50836 + nentry->name = name;
50837 + nentry->inode = inode;
50838 + nentry->device = device;
50839 + nentry->len = len;
50840 + nentry->deleted = deleted;
50841 +
50842 + nentry->prev = NULL;
50843 + curr = &name_set.n_hash[index];
50844 + if (*curr != NULL)
50845 + (*curr)->prev = nentry;
50846 + nentry->next = *curr;
50847 + *curr = nentry;
50848 +
50849 + /* insert us into the table searchable by inode/dev */
50850 + insert_inodev_entry(ientry);
50851 +
50852 + return 1;
50853 +}
50854 +
50855 +static void
50856 +insert_acl_obj_label(struct acl_object_label *obj,
50857 + struct acl_subject_label *subj)
50858 +{
50859 + unsigned int index =
50860 + fhash(obj->inode, obj->device, subj->obj_hash_size);
50861 + struct acl_object_label **curr;
50862 +
50863 +
50864 + obj->prev = NULL;
50865 +
50866 + curr = &subj->obj_hash[index];
50867 + if (*curr != NULL)
50868 + (*curr)->prev = obj;
50869 +
50870 + obj->next = *curr;
50871 + *curr = obj;
50872 +
50873 + return;
50874 +}
50875 +
50876 +static void
50877 +insert_acl_subj_label(struct acl_subject_label *obj,
50878 + struct acl_role_label *role)
50879 +{
50880 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
50881 + struct acl_subject_label **curr;
50882 +
50883 + obj->prev = NULL;
50884 +
50885 + curr = &role->subj_hash[index];
50886 + if (*curr != NULL)
50887 + (*curr)->prev = obj;
50888 +
50889 + obj->next = *curr;
50890 + *curr = obj;
50891 +
50892 + return;
50893 +}
50894 +
50895 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
50896 +
50897 +static void *
50898 +create_table(__u32 * len, int elementsize)
50899 +{
50900 + unsigned int table_sizes[] = {
50901 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
50902 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
50903 + 4194301, 8388593, 16777213, 33554393, 67108859
50904 + };
50905 + void *newtable = NULL;
50906 + unsigned int pwr = 0;
50907 +
50908 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
50909 + table_sizes[pwr] <= *len)
50910 + pwr++;
50911 +
50912 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
50913 + return newtable;
50914 +
50915 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
50916 + newtable =
50917 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
50918 + else
50919 + newtable = vmalloc(table_sizes[pwr] * elementsize);
50920 +
50921 + *len = table_sizes[pwr];
50922 +
50923 + return newtable;
50924 +}
50925 +
50926 +static int
50927 +init_variables(const struct gr_arg *arg)
50928 +{
50929 + struct task_struct *reaper = init_pid_ns.child_reaper;
50930 + unsigned int stacksize;
50931 +
50932 + subj_map_set.s_size = arg->role_db.num_subjects;
50933 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
50934 + name_set.n_size = arg->role_db.num_objects;
50935 + inodev_set.i_size = arg->role_db.num_objects;
50936 +
50937 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
50938 + !name_set.n_size || !inodev_set.i_size)
50939 + return 1;
50940 +
50941 + if (!gr_init_uidset())
50942 + return 1;
50943 +
50944 + /* set up the stack that holds allocation info */
50945 +
50946 + stacksize = arg->role_db.num_pointers + 5;
50947 +
50948 + if (!acl_alloc_stack_init(stacksize))
50949 + return 1;
50950 +
50951 + /* grab reference for the real root dentry and vfsmount */
50952 + get_fs_root(reaper->fs, &real_root);
50953 +
50954 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50955 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
50956 +#endif
50957 +
50958 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
50959 + if (fakefs_obj_rw == NULL)
50960 + return 1;
50961 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
50962 +
50963 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
50964 + if (fakefs_obj_rwx == NULL)
50965 + return 1;
50966 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
50967 +
50968 + subj_map_set.s_hash =
50969 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
50970 + acl_role_set.r_hash =
50971 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
50972 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
50973 + inodev_set.i_hash =
50974 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
50975 +
50976 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
50977 + !name_set.n_hash || !inodev_set.i_hash)
50978 + return 1;
50979 +
50980 + memset(subj_map_set.s_hash, 0,
50981 + sizeof(struct subject_map *) * subj_map_set.s_size);
50982 + memset(acl_role_set.r_hash, 0,
50983 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
50984 + memset(name_set.n_hash, 0,
50985 + sizeof (struct name_entry *) * name_set.n_size);
50986 + memset(inodev_set.i_hash, 0,
50987 + sizeof (struct inodev_entry *) * inodev_set.i_size);
50988 +
50989 + return 0;
50990 +}
50991 +
50992 +/* free information not needed after startup
50993 + currently contains user->kernel pointer mappings for subjects
50994 +*/
50995 +
50996 +static void
50997 +free_init_variables(void)
50998 +{
50999 + __u32 i;
51000 +
51001 + if (subj_map_set.s_hash) {
51002 + for (i = 0; i < subj_map_set.s_size; i++) {
51003 + if (subj_map_set.s_hash[i]) {
51004 + kfree(subj_map_set.s_hash[i]);
51005 + subj_map_set.s_hash[i] = NULL;
51006 + }
51007 + }
51008 +
51009 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51010 + PAGE_SIZE)
51011 + kfree(subj_map_set.s_hash);
51012 + else
51013 + vfree(subj_map_set.s_hash);
51014 + }
51015 +
51016 + return;
51017 +}
51018 +
51019 +static void
51020 +free_variables(void)
51021 +{
51022 + struct acl_subject_label *s;
51023 + struct acl_role_label *r;
51024 + struct task_struct *task, *task2;
51025 + unsigned int x;
51026 +
51027 + gr_clear_learn_entries();
51028 +
51029 + read_lock(&tasklist_lock);
51030 + do_each_thread(task2, task) {
51031 + task->acl_sp_role = 0;
51032 + task->acl_role_id = 0;
51033 + task->acl = NULL;
51034 + task->role = NULL;
51035 + } while_each_thread(task2, task);
51036 + read_unlock(&tasklist_lock);
51037 +
51038 + /* release the reference to the real root dentry and vfsmount */
51039 + path_put(&real_root);
51040 + memset(&real_root, 0, sizeof(real_root));
51041 +
51042 + /* free all object hash tables */
51043 +
51044 + FOR_EACH_ROLE_START(r)
51045 + if (r->subj_hash == NULL)
51046 + goto next_role;
51047 + FOR_EACH_SUBJECT_START(r, s, x)
51048 + if (s->obj_hash == NULL)
51049 + break;
51050 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51051 + kfree(s->obj_hash);
51052 + else
51053 + vfree(s->obj_hash);
51054 + FOR_EACH_SUBJECT_END(s, x)
51055 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51056 + if (s->obj_hash == NULL)
51057 + break;
51058 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51059 + kfree(s->obj_hash);
51060 + else
51061 + vfree(s->obj_hash);
51062 + FOR_EACH_NESTED_SUBJECT_END(s)
51063 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51064 + kfree(r->subj_hash);
51065 + else
51066 + vfree(r->subj_hash);
51067 + r->subj_hash = NULL;
51068 +next_role:
51069 + FOR_EACH_ROLE_END(r)
51070 +
51071 + acl_free_all();
51072 +
51073 + if (acl_role_set.r_hash) {
51074 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51075 + PAGE_SIZE)
51076 + kfree(acl_role_set.r_hash);
51077 + else
51078 + vfree(acl_role_set.r_hash);
51079 + }
51080 + if (name_set.n_hash) {
51081 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51082 + PAGE_SIZE)
51083 + kfree(name_set.n_hash);
51084 + else
51085 + vfree(name_set.n_hash);
51086 + }
51087 +
51088 + if (inodev_set.i_hash) {
51089 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51090 + PAGE_SIZE)
51091 + kfree(inodev_set.i_hash);
51092 + else
51093 + vfree(inodev_set.i_hash);
51094 + }
51095 +
51096 + gr_free_uidset();
51097 +
51098 + memset(&name_set, 0, sizeof (struct name_db));
51099 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51100 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51101 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51102 +
51103 + default_role = NULL;
51104 + kernel_role = NULL;
51105 + role_list = NULL;
51106 +
51107 + return;
51108 +}
51109 +
51110 +static __u32
51111 +count_user_objs(struct acl_object_label *userp)
51112 +{
51113 + struct acl_object_label o_tmp;
51114 + __u32 num = 0;
51115 +
51116 + while (userp) {
51117 + if (copy_from_user(&o_tmp, userp,
51118 + sizeof (struct acl_object_label)))
51119 + break;
51120 +
51121 + userp = o_tmp.prev;
51122 + num++;
51123 + }
51124 +
51125 + return num;
51126 +}
51127 +
51128 +static struct acl_subject_label *
51129 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51130 +
51131 +static int
51132 +copy_user_glob(struct acl_object_label *obj)
51133 +{
51134 + struct acl_object_label *g_tmp, **guser;
51135 + unsigned int len;
51136 + char *tmp;
51137 +
51138 + if (obj->globbed == NULL)
51139 + return 0;
51140 +
51141 + guser = &obj->globbed;
51142 + while (*guser) {
51143 + g_tmp = (struct acl_object_label *)
51144 + acl_alloc(sizeof (struct acl_object_label));
51145 + if (g_tmp == NULL)
51146 + return -ENOMEM;
51147 +
51148 + if (copy_from_user(g_tmp, *guser,
51149 + sizeof (struct acl_object_label)))
51150 + return -EFAULT;
51151 +
51152 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51153 +
51154 + if (!len || len >= PATH_MAX)
51155 + return -EINVAL;
51156 +
51157 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51158 + return -ENOMEM;
51159 +
51160 + if (copy_from_user(tmp, g_tmp->filename, len))
51161 + return -EFAULT;
51162 + tmp[len-1] = '\0';
51163 + g_tmp->filename = tmp;
51164 +
51165 + *guser = g_tmp;
51166 + guser = &(g_tmp->next);
51167 + }
51168 +
51169 + return 0;
51170 +}
51171 +
51172 +static int
51173 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51174 + struct acl_role_label *role)
51175 +{
51176 + struct acl_object_label *o_tmp;
51177 + unsigned int len;
51178 + int ret;
51179 + char *tmp;
51180 +
51181 + while (userp) {
51182 + if ((o_tmp = (struct acl_object_label *)
51183 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51184 + return -ENOMEM;
51185 +
51186 + if (copy_from_user(o_tmp, userp,
51187 + sizeof (struct acl_object_label)))
51188 + return -EFAULT;
51189 +
51190 + userp = o_tmp->prev;
51191 +
51192 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51193 +
51194 + if (!len || len >= PATH_MAX)
51195 + return -EINVAL;
51196 +
51197 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51198 + return -ENOMEM;
51199 +
51200 + if (copy_from_user(tmp, o_tmp->filename, len))
51201 + return -EFAULT;
51202 + tmp[len-1] = '\0';
51203 + o_tmp->filename = tmp;
51204 +
51205 + insert_acl_obj_label(o_tmp, subj);
51206 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51207 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51208 + return -ENOMEM;
51209 +
51210 + ret = copy_user_glob(o_tmp);
51211 + if (ret)
51212 + return ret;
51213 +
51214 + if (o_tmp->nested) {
51215 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51216 + if (IS_ERR(o_tmp->nested))
51217 + return PTR_ERR(o_tmp->nested);
51218 +
51219 + /* insert into nested subject list */
51220 + o_tmp->nested->next = role->hash->first;
51221 + role->hash->first = o_tmp->nested;
51222 + }
51223 + }
51224 +
51225 + return 0;
51226 +}
51227 +
51228 +static __u32
51229 +count_user_subjs(struct acl_subject_label *userp)
51230 +{
51231 + struct acl_subject_label s_tmp;
51232 + __u32 num = 0;
51233 +
51234 + while (userp) {
51235 + if (copy_from_user(&s_tmp, userp,
51236 + sizeof (struct acl_subject_label)))
51237 + break;
51238 +
51239 + userp = s_tmp.prev;
51240 + /* do not count nested subjects against this count, since
51241 + they are not included in the hash table, but are
51242 + attached to objects. We have already counted
51243 + the subjects in userspace for the allocation
51244 + stack
51245 + */
51246 + if (!(s_tmp.mode & GR_NESTED))
51247 + num++;
51248 + }
51249 +
51250 + return num;
51251 +}
51252 +
51253 +static int
51254 +copy_user_allowedips(struct acl_role_label *rolep)
51255 +{
51256 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51257 +
51258 + ruserip = rolep->allowed_ips;
51259 +
51260 + while (ruserip) {
51261 + rlast = rtmp;
51262 +
51263 + if ((rtmp = (struct role_allowed_ip *)
51264 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51265 + return -ENOMEM;
51266 +
51267 + if (copy_from_user(rtmp, ruserip,
51268 + sizeof (struct role_allowed_ip)))
51269 + return -EFAULT;
51270 +
51271 + ruserip = rtmp->prev;
51272 +
51273 + if (!rlast) {
51274 + rtmp->prev = NULL;
51275 + rolep->allowed_ips = rtmp;
51276 + } else {
51277 + rlast->next = rtmp;
51278 + rtmp->prev = rlast;
51279 + }
51280 +
51281 + if (!ruserip)
51282 + rtmp->next = NULL;
51283 + }
51284 +
51285 + return 0;
51286 +}
51287 +
51288 +static int
51289 +copy_user_transitions(struct acl_role_label *rolep)
51290 +{
51291 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51292 +
51293 + unsigned int len;
51294 + char *tmp;
51295 +
51296 + rusertp = rolep->transitions;
51297 +
51298 + while (rusertp) {
51299 + rlast = rtmp;
51300 +
51301 + if ((rtmp = (struct role_transition *)
51302 + acl_alloc(sizeof (struct role_transition))) == NULL)
51303 + return -ENOMEM;
51304 +
51305 + if (copy_from_user(rtmp, rusertp,
51306 + sizeof (struct role_transition)))
51307 + return -EFAULT;
51308 +
51309 + rusertp = rtmp->prev;
51310 +
51311 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51312 +
51313 + if (!len || len >= GR_SPROLE_LEN)
51314 + return -EINVAL;
51315 +
51316 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51317 + return -ENOMEM;
51318 +
51319 + if (copy_from_user(tmp, rtmp->rolename, len))
51320 + return -EFAULT;
51321 + tmp[len-1] = '\0';
51322 + rtmp->rolename = tmp;
51323 +
51324 + if (!rlast) {
51325 + rtmp->prev = NULL;
51326 + rolep->transitions = rtmp;
51327 + } else {
51328 + rlast->next = rtmp;
51329 + rtmp->prev = rlast;
51330 + }
51331 +
51332 + if (!rusertp)
51333 + rtmp->next = NULL;
51334 + }
51335 +
51336 + return 0;
51337 +}
51338 +
51339 +static struct acl_subject_label *
51340 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51341 +{
51342 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51343 + unsigned int len;
51344 + char *tmp;
51345 + __u32 num_objs;
51346 + struct acl_ip_label **i_tmp, *i_utmp2;
51347 + struct gr_hash_struct ghash;
51348 + struct subject_map *subjmap;
51349 + unsigned int i_num;
51350 + int err;
51351 +
51352 + s_tmp = lookup_subject_map(userp);
51353 +
51354 + /* we've already copied this subject into the kernel, just return
51355 + the reference to it, and don't copy it over again
51356 + */
51357 + if (s_tmp)
51358 + return(s_tmp);
51359 +
51360 + if ((s_tmp = (struct acl_subject_label *)
51361 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51362 + return ERR_PTR(-ENOMEM);
51363 +
51364 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51365 + if (subjmap == NULL)
51366 + return ERR_PTR(-ENOMEM);
51367 +
51368 + subjmap->user = userp;
51369 + subjmap->kernel = s_tmp;
51370 + insert_subj_map_entry(subjmap);
51371 +
51372 + if (copy_from_user(s_tmp, userp,
51373 + sizeof (struct acl_subject_label)))
51374 + return ERR_PTR(-EFAULT);
51375 +
51376 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51377 +
51378 + if (!len || len >= PATH_MAX)
51379 + return ERR_PTR(-EINVAL);
51380 +
51381 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51382 + return ERR_PTR(-ENOMEM);
51383 +
51384 + if (copy_from_user(tmp, s_tmp->filename, len))
51385 + return ERR_PTR(-EFAULT);
51386 + tmp[len-1] = '\0';
51387 + s_tmp->filename = tmp;
51388 +
51389 + if (!strcmp(s_tmp->filename, "/"))
51390 + role->root_label = s_tmp;
51391 +
51392 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51393 + return ERR_PTR(-EFAULT);
51394 +
51395 + /* copy user and group transition tables */
51396 +
51397 + if (s_tmp->user_trans_num) {
51398 + uid_t *uidlist;
51399 +
51400 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51401 + if (uidlist == NULL)
51402 + return ERR_PTR(-ENOMEM);
51403 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51404 + return ERR_PTR(-EFAULT);
51405 +
51406 + s_tmp->user_transitions = uidlist;
51407 + }
51408 +
51409 + if (s_tmp->group_trans_num) {
51410 + gid_t *gidlist;
51411 +
51412 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51413 + if (gidlist == NULL)
51414 + return ERR_PTR(-ENOMEM);
51415 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51416 + return ERR_PTR(-EFAULT);
51417 +
51418 + s_tmp->group_transitions = gidlist;
51419 + }
51420 +
51421 + /* set up object hash table */
51422 + num_objs = count_user_objs(ghash.first);
51423 +
51424 + s_tmp->obj_hash_size = num_objs;
51425 + s_tmp->obj_hash =
51426 + (struct acl_object_label **)
51427 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51428 +
51429 + if (!s_tmp->obj_hash)
51430 + return ERR_PTR(-ENOMEM);
51431 +
51432 + memset(s_tmp->obj_hash, 0,
51433 + s_tmp->obj_hash_size *
51434 + sizeof (struct acl_object_label *));
51435 +
51436 + /* add in objects */
51437 + err = copy_user_objs(ghash.first, s_tmp, role);
51438 +
51439 + if (err)
51440 + return ERR_PTR(err);
51441 +
51442 + /* set pointer for parent subject */
51443 + if (s_tmp->parent_subject) {
51444 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51445 +
51446 + if (IS_ERR(s_tmp2))
51447 + return s_tmp2;
51448 +
51449 + s_tmp->parent_subject = s_tmp2;
51450 + }
51451 +
51452 + /* add in ip acls */
51453 +
51454 + if (!s_tmp->ip_num) {
51455 + s_tmp->ips = NULL;
51456 + goto insert;
51457 + }
51458 +
51459 + i_tmp =
51460 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51461 + sizeof (struct acl_ip_label *));
51462 +
51463 + if (!i_tmp)
51464 + return ERR_PTR(-ENOMEM);
51465 +
51466 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51467 + *(i_tmp + i_num) =
51468 + (struct acl_ip_label *)
51469 + acl_alloc(sizeof (struct acl_ip_label));
51470 + if (!*(i_tmp + i_num))
51471 + return ERR_PTR(-ENOMEM);
51472 +
51473 + if (copy_from_user
51474 + (&i_utmp2, s_tmp->ips + i_num,
51475 + sizeof (struct acl_ip_label *)))
51476 + return ERR_PTR(-EFAULT);
51477 +
51478 + if (copy_from_user
51479 + (*(i_tmp + i_num), i_utmp2,
51480 + sizeof (struct acl_ip_label)))
51481 + return ERR_PTR(-EFAULT);
51482 +
51483 + if ((*(i_tmp + i_num))->iface == NULL)
51484 + continue;
51485 +
51486 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51487 + if (!len || len >= IFNAMSIZ)
51488 + return ERR_PTR(-EINVAL);
51489 + tmp = acl_alloc(len);
51490 + if (tmp == NULL)
51491 + return ERR_PTR(-ENOMEM);
51492 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51493 + return ERR_PTR(-EFAULT);
51494 + (*(i_tmp + i_num))->iface = tmp;
51495 + }
51496 +
51497 + s_tmp->ips = i_tmp;
51498 +
51499 +insert:
51500 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51501 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51502 + return ERR_PTR(-ENOMEM);
51503 +
51504 + return s_tmp;
51505 +}
51506 +
51507 +static int
51508 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51509 +{
51510 + struct acl_subject_label s_pre;
51511 + struct acl_subject_label * ret;
51512 + int err;
51513 +
51514 + while (userp) {
51515 + if (copy_from_user(&s_pre, userp,
51516 + sizeof (struct acl_subject_label)))
51517 + return -EFAULT;
51518 +
51519 + /* do not add nested subjects here, add
51520 + while parsing objects
51521 + */
51522 +
51523 + if (s_pre.mode & GR_NESTED) {
51524 + userp = s_pre.prev;
51525 + continue;
51526 + }
51527 +
51528 + ret = do_copy_user_subj(userp, role);
51529 +
51530 + err = PTR_ERR(ret);
51531 + if (IS_ERR(ret))
51532 + return err;
51533 +
51534 + insert_acl_subj_label(ret, role);
51535 +
51536 + userp = s_pre.prev;
51537 + }
51538 +
51539 + return 0;
51540 +}
51541 +
51542 +static int
51543 +copy_user_acl(struct gr_arg *arg)
51544 +{
51545 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51546 + struct sprole_pw *sptmp;
51547 + struct gr_hash_struct *ghash;
51548 + uid_t *domainlist;
51549 + unsigned int r_num;
51550 + unsigned int len;
51551 + char *tmp;
51552 + int err = 0;
51553 + __u16 i;
51554 + __u32 num_subjs;
51555 +
51556 + /* we need a default and kernel role */
51557 + if (arg->role_db.num_roles < 2)
51558 + return -EINVAL;
51559 +
51560 + /* copy special role authentication info from userspace */
51561 +
51562 + num_sprole_pws = arg->num_sprole_pws;
51563 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51564 +
51565 + if (!acl_special_roles && num_sprole_pws)
51566 + return -ENOMEM;
51567 +
51568 + for (i = 0; i < num_sprole_pws; i++) {
51569 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51570 + if (!sptmp)
51571 + return -ENOMEM;
51572 + if (copy_from_user(sptmp, arg->sprole_pws + i,
51573 + sizeof (struct sprole_pw)))
51574 + return -EFAULT;
51575 +
51576 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51577 +
51578 + if (!len || len >= GR_SPROLE_LEN)
51579 + return -EINVAL;
51580 +
51581 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51582 + return -ENOMEM;
51583 +
51584 + if (copy_from_user(tmp, sptmp->rolename, len))
51585 + return -EFAULT;
51586 +
51587 + tmp[len-1] = '\0';
51588 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51589 + printk(KERN_ALERT "Copying special role %s\n", tmp);
51590 +#endif
51591 + sptmp->rolename = tmp;
51592 + acl_special_roles[i] = sptmp;
51593 + }
51594 +
51595 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51596 +
51597 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51598 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
51599 +
51600 + if (!r_tmp)
51601 + return -ENOMEM;
51602 +
51603 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
51604 + sizeof (struct acl_role_label *)))
51605 + return -EFAULT;
51606 +
51607 + if (copy_from_user(r_tmp, r_utmp2,
51608 + sizeof (struct acl_role_label)))
51609 + return -EFAULT;
51610 +
51611 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51612 +
51613 + if (!len || len >= PATH_MAX)
51614 + return -EINVAL;
51615 +
51616 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51617 + return -ENOMEM;
51618 +
51619 + if (copy_from_user(tmp, r_tmp->rolename, len))
51620 + return -EFAULT;
51621 +
51622 + tmp[len-1] = '\0';
51623 + r_tmp->rolename = tmp;
51624 +
51625 + if (!strcmp(r_tmp->rolename, "default")
51626 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51627 + default_role = r_tmp;
51628 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51629 + kernel_role = r_tmp;
51630 + }
51631 +
51632 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
51633 + return -ENOMEM;
51634 +
51635 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
51636 + return -EFAULT;
51637 +
51638 + r_tmp->hash = ghash;
51639 +
51640 + num_subjs = count_user_subjs(r_tmp->hash->first);
51641 +
51642 + r_tmp->subj_hash_size = num_subjs;
51643 + r_tmp->subj_hash =
51644 + (struct acl_subject_label **)
51645 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51646 +
51647 + if (!r_tmp->subj_hash)
51648 + return -ENOMEM;
51649 +
51650 + err = copy_user_allowedips(r_tmp);
51651 + if (err)
51652 + return err;
51653 +
51654 + /* copy domain info */
51655 + if (r_tmp->domain_children != NULL) {
51656 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51657 + if (domainlist == NULL)
51658 + return -ENOMEM;
51659 +
51660 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
51661 + return -EFAULT;
51662 +
51663 + r_tmp->domain_children = domainlist;
51664 + }
51665 +
51666 + err = copy_user_transitions(r_tmp);
51667 + if (err)
51668 + return err;
51669 +
51670 + memset(r_tmp->subj_hash, 0,
51671 + r_tmp->subj_hash_size *
51672 + sizeof (struct acl_subject_label *));
51673 +
51674 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51675 +
51676 + if (err)
51677 + return err;
51678 +
51679 + /* set nested subject list to null */
51680 + r_tmp->hash->first = NULL;
51681 +
51682 + insert_acl_role_label(r_tmp);
51683 + }
51684 +
51685 + if (default_role == NULL || kernel_role == NULL)
51686 + return -EINVAL;
51687 +
51688 + return err;
51689 +}
51690 +
51691 +static int
51692 +gracl_init(struct gr_arg *args)
51693 +{
51694 + int error = 0;
51695 +
51696 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51697 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51698 +
51699 + if (init_variables(args)) {
51700 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51701 + error = -ENOMEM;
51702 + free_variables();
51703 + goto out;
51704 + }
51705 +
51706 + error = copy_user_acl(args);
51707 + free_init_variables();
51708 + if (error) {
51709 + free_variables();
51710 + goto out;
51711 + }
51712 +
51713 + if ((error = gr_set_acls(0))) {
51714 + free_variables();
51715 + goto out;
51716 + }
51717 +
51718 + pax_open_kernel();
51719 + gr_status |= GR_READY;
51720 + pax_close_kernel();
51721 +
51722 + out:
51723 + return error;
51724 +}
51725 +
51726 +/* derived from glibc fnmatch() 0: match, 1: no match*/
51727 +
51728 +static int
51729 +glob_match(const char *p, const char *n)
51730 +{
51731 + char c;
51732 +
51733 + while ((c = *p++) != '\0') {
51734 + switch (c) {
51735 + case '?':
51736 + if (*n == '\0')
51737 + return 1;
51738 + else if (*n == '/')
51739 + return 1;
51740 + break;
51741 + case '\\':
51742 + if (*n != c)
51743 + return 1;
51744 + break;
51745 + case '*':
51746 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
51747 + if (*n == '/')
51748 + return 1;
51749 + else if (c == '?') {
51750 + if (*n == '\0')
51751 + return 1;
51752 + else
51753 + ++n;
51754 + }
51755 + }
51756 + if (c == '\0') {
51757 + return 0;
51758 + } else {
51759 + const char *endp;
51760 +
51761 + if ((endp = strchr(n, '/')) == NULL)
51762 + endp = n + strlen(n);
51763 +
51764 + if (c == '[') {
51765 + for (--p; n < endp; ++n)
51766 + if (!glob_match(p, n))
51767 + return 0;
51768 + } else if (c == '/') {
51769 + while (*n != '\0' && *n != '/')
51770 + ++n;
51771 + if (*n == '/' && !glob_match(p, n + 1))
51772 + return 0;
51773 + } else {
51774 + for (--p; n < endp; ++n)
51775 + if (*n == c && !glob_match(p, n))
51776 + return 0;
51777 + }
51778 +
51779 + return 1;
51780 + }
51781 + case '[':
51782 + {
51783 + int not;
51784 + char cold;
51785 +
51786 + if (*n == '\0' || *n == '/')
51787 + return 1;
51788 +
51789 + not = (*p == '!' || *p == '^');
51790 + if (not)
51791 + ++p;
51792 +
51793 + c = *p++;
51794 + for (;;) {
51795 + unsigned char fn = (unsigned char)*n;
51796 +
51797 + if (c == '\0')
51798 + return 1;
51799 + else {
51800 + if (c == fn)
51801 + goto matched;
51802 + cold = c;
51803 + c = *p++;
51804 +
51805 + if (c == '-' && *p != ']') {
51806 + unsigned char cend = *p++;
51807 +
51808 + if (cend == '\0')
51809 + return 1;
51810 +
51811 + if (cold <= fn && fn <= cend)
51812 + goto matched;
51813 +
51814 + c = *p++;
51815 + }
51816 + }
51817 +
51818 + if (c == ']')
51819 + break;
51820 + }
51821 + if (!not)
51822 + return 1;
51823 + break;
51824 + matched:
51825 + while (c != ']') {
51826 + if (c == '\0')
51827 + return 1;
51828 +
51829 + c = *p++;
51830 + }
51831 + if (not)
51832 + return 1;
51833 + }
51834 + break;
51835 + default:
51836 + if (c != *n)
51837 + return 1;
51838 + }
51839 +
51840 + ++n;
51841 + }
51842 +
51843 + if (*n == '\0')
51844 + return 0;
51845 +
51846 + if (*n == '/')
51847 + return 0;
51848 +
51849 + return 1;
51850 +}
51851 +
51852 +static struct acl_object_label *
51853 +chk_glob_label(struct acl_object_label *globbed,
51854 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51855 +{
51856 + struct acl_object_label *tmp;
51857 +
51858 + if (*path == NULL)
51859 + *path = gr_to_filename_nolock(dentry, mnt);
51860 +
51861 + tmp = globbed;
51862 +
51863 + while (tmp) {
51864 + if (!glob_match(tmp->filename, *path))
51865 + return tmp;
51866 + tmp = tmp->next;
51867 + }
51868 +
51869 + return NULL;
51870 +}
51871 +
51872 +static struct acl_object_label *
51873 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51874 + const ino_t curr_ino, const dev_t curr_dev,
51875 + const struct acl_subject_label *subj, char **path, const int checkglob)
51876 +{
51877 + struct acl_subject_label *tmpsubj;
51878 + struct acl_object_label *retval;
51879 + struct acl_object_label *retval2;
51880 +
51881 + tmpsubj = (struct acl_subject_label *) subj;
51882 + read_lock(&gr_inode_lock);
51883 + do {
51884 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
51885 + if (retval) {
51886 + if (checkglob && retval->globbed) {
51887 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
51888 + if (retval2)
51889 + retval = retval2;
51890 + }
51891 + break;
51892 + }
51893 + } while ((tmpsubj = tmpsubj->parent_subject));
51894 + read_unlock(&gr_inode_lock);
51895 +
51896 + return retval;
51897 +}
51898 +
51899 +static __inline__ struct acl_object_label *
51900 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51901 + struct dentry *curr_dentry,
51902 + const struct acl_subject_label *subj, char **path, const int checkglob)
51903 +{
51904 + int newglob = checkglob;
51905 + ino_t inode;
51906 + dev_t device;
51907 +
51908 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
51909 + as we don't want a / * rule to match instead of the / object
51910 + don't do this for create lookups that call this function though, since they're looking up
51911 + on the parent and thus need globbing checks on all paths
51912 + */
51913 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
51914 + newglob = GR_NO_GLOB;
51915 +
51916 + spin_lock(&curr_dentry->d_lock);
51917 + inode = curr_dentry->d_inode->i_ino;
51918 + device = __get_dev(curr_dentry);
51919 + spin_unlock(&curr_dentry->d_lock);
51920 +
51921 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
51922 +}
51923 +
51924 +static struct acl_object_label *
51925 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51926 + const struct acl_subject_label *subj, char *path, const int checkglob)
51927 +{
51928 + struct dentry *dentry = (struct dentry *) l_dentry;
51929 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51930 + struct mount *real_mnt = real_mount(mnt);
51931 + struct acl_object_label *retval;
51932 + struct dentry *parent;
51933 +
51934 + write_seqlock(&rename_lock);
51935 + br_read_lock(vfsmount_lock);
51936 +
51937 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
51938 +#ifdef CONFIG_NET
51939 + mnt == sock_mnt ||
51940 +#endif
51941 +#ifdef CONFIG_HUGETLBFS
51942 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
51943 +#endif
51944 + /* ignore Eric Biederman */
51945 + IS_PRIVATE(l_dentry->d_inode))) {
51946 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
51947 + goto out;
51948 + }
51949 +
51950 + for (;;) {
51951 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51952 + break;
51953 +
51954 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51955 + if (!mnt_has_parent(real_mnt))
51956 + break;
51957 +
51958 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51959 + if (retval != NULL)
51960 + goto out;
51961 +
51962 + dentry = real_mnt->mnt_mountpoint;
51963 + real_mnt = real_mnt->mnt_parent;
51964 + mnt = &real_mnt->mnt;
51965 + continue;
51966 + }
51967 +
51968 + parent = dentry->d_parent;
51969 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51970 + if (retval != NULL)
51971 + goto out;
51972 +
51973 + dentry = parent;
51974 + }
51975 +
51976 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51977 +
51978 + /* real_root is pinned so we don't have to hold a reference */
51979 + if (retval == NULL)
51980 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
51981 +out:
51982 + br_read_unlock(vfsmount_lock);
51983 + write_sequnlock(&rename_lock);
51984 +
51985 + BUG_ON(retval == NULL);
51986 +
51987 + return retval;
51988 +}
51989 +
51990 +static __inline__ struct acl_object_label *
51991 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51992 + const struct acl_subject_label *subj)
51993 +{
51994 + char *path = NULL;
51995 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
51996 +}
51997 +
51998 +static __inline__ struct acl_object_label *
51999 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52000 + const struct acl_subject_label *subj)
52001 +{
52002 + char *path = NULL;
52003 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52004 +}
52005 +
52006 +static __inline__ struct acl_object_label *
52007 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52008 + const struct acl_subject_label *subj, char *path)
52009 +{
52010 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52011 +}
52012 +
52013 +static struct acl_subject_label *
52014 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52015 + const struct acl_role_label *role)
52016 +{
52017 + struct dentry *dentry = (struct dentry *) l_dentry;
52018 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52019 + struct mount *real_mnt = real_mount(mnt);
52020 + struct acl_subject_label *retval;
52021 + struct dentry *parent;
52022 +
52023 + write_seqlock(&rename_lock);
52024 + br_read_lock(vfsmount_lock);
52025 +
52026 + for (;;) {
52027 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52028 + break;
52029 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52030 + if (!mnt_has_parent(real_mnt))
52031 + break;
52032 +
52033 + spin_lock(&dentry->d_lock);
52034 + read_lock(&gr_inode_lock);
52035 + retval =
52036 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52037 + __get_dev(dentry), role);
52038 + read_unlock(&gr_inode_lock);
52039 + spin_unlock(&dentry->d_lock);
52040 + if (retval != NULL)
52041 + goto out;
52042 +
52043 + dentry = real_mnt->mnt_mountpoint;
52044 + real_mnt = real_mnt->mnt_parent;
52045 + mnt = &real_mnt->mnt;
52046 + continue;
52047 + }
52048 +
52049 + spin_lock(&dentry->d_lock);
52050 + read_lock(&gr_inode_lock);
52051 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52052 + __get_dev(dentry), role);
52053 + read_unlock(&gr_inode_lock);
52054 + parent = dentry->d_parent;
52055 + spin_unlock(&dentry->d_lock);
52056 +
52057 + if (retval != NULL)
52058 + goto out;
52059 +
52060 + dentry = parent;
52061 + }
52062 +
52063 + spin_lock(&dentry->d_lock);
52064 + read_lock(&gr_inode_lock);
52065 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52066 + __get_dev(dentry), role);
52067 + read_unlock(&gr_inode_lock);
52068 + spin_unlock(&dentry->d_lock);
52069 +
52070 + if (unlikely(retval == NULL)) {
52071 + /* real_root is pinned, we don't need to hold a reference */
52072 + read_lock(&gr_inode_lock);
52073 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52074 + __get_dev(real_root.dentry), role);
52075 + read_unlock(&gr_inode_lock);
52076 + }
52077 +out:
52078 + br_read_unlock(vfsmount_lock);
52079 + write_sequnlock(&rename_lock);
52080 +
52081 + BUG_ON(retval == NULL);
52082 +
52083 + return retval;
52084 +}
52085 +
52086 +static void
52087 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52088 +{
52089 + struct task_struct *task = current;
52090 + const struct cred *cred = current_cred();
52091 +
52092 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52093 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52094 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52095 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52096 +
52097 + return;
52098 +}
52099 +
52100 +static void
52101 +gr_log_learn_id_change(const char type, const unsigned int real,
52102 + const unsigned int effective, const unsigned int fs)
52103 +{
52104 + struct task_struct *task = current;
52105 + const struct cred *cred = current_cred();
52106 +
52107 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52108 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52109 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52110 + type, real, effective, fs, &task->signal->saved_ip);
52111 +
52112 + return;
52113 +}
52114 +
52115 +__u32
52116 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52117 + const struct vfsmount * mnt)
52118 +{
52119 + __u32 retval = mode;
52120 + struct acl_subject_label *curracl;
52121 + struct acl_object_label *currobj;
52122 +
52123 + if (unlikely(!(gr_status & GR_READY)))
52124 + return (mode & ~GR_AUDITS);
52125 +
52126 + curracl = current->acl;
52127 +
52128 + currobj = chk_obj_label(dentry, mnt, curracl);
52129 + retval = currobj->mode & mode;
52130 +
52131 + /* if we're opening a specified transfer file for writing
52132 + (e.g. /dev/initctl), then transfer our role to init
52133 + */
52134 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52135 + current->role->roletype & GR_ROLE_PERSIST)) {
52136 + struct task_struct *task = init_pid_ns.child_reaper;
52137 +
52138 + if (task->role != current->role) {
52139 + task->acl_sp_role = 0;
52140 + task->acl_role_id = current->acl_role_id;
52141 + task->role = current->role;
52142 + rcu_read_lock();
52143 + read_lock(&grsec_exec_file_lock);
52144 + gr_apply_subject_to_task(task);
52145 + read_unlock(&grsec_exec_file_lock);
52146 + rcu_read_unlock();
52147 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52148 + }
52149 + }
52150 +
52151 + if (unlikely
52152 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52153 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52154 + __u32 new_mode = mode;
52155 +
52156 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52157 +
52158 + retval = new_mode;
52159 +
52160 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52161 + new_mode |= GR_INHERIT;
52162 +
52163 + if (!(mode & GR_NOLEARN))
52164 + gr_log_learn(dentry, mnt, new_mode);
52165 + }
52166 +
52167 + return retval;
52168 +}
52169 +
52170 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52171 + const struct dentry *parent,
52172 + const struct vfsmount *mnt)
52173 +{
52174 + struct name_entry *match;
52175 + struct acl_object_label *matchpo;
52176 + struct acl_subject_label *curracl;
52177 + char *path;
52178 +
52179 + if (unlikely(!(gr_status & GR_READY)))
52180 + return NULL;
52181 +
52182 + preempt_disable();
52183 + path = gr_to_filename_rbac(new_dentry, mnt);
52184 + match = lookup_name_entry_create(path);
52185 +
52186 + curracl = current->acl;
52187 +
52188 + if (match) {
52189 + read_lock(&gr_inode_lock);
52190 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52191 + read_unlock(&gr_inode_lock);
52192 +
52193 + if (matchpo) {
52194 + preempt_enable();
52195 + return matchpo;
52196 + }
52197 + }
52198 +
52199 + // lookup parent
52200 +
52201 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52202 +
52203 + preempt_enable();
52204 + return matchpo;
52205 +}
52206 +
52207 +__u32
52208 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52209 + const struct vfsmount * mnt, const __u32 mode)
52210 +{
52211 + struct acl_object_label *matchpo;
52212 + __u32 retval;
52213 +
52214 + if (unlikely(!(gr_status & GR_READY)))
52215 + return (mode & ~GR_AUDITS);
52216 +
52217 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52218 +
52219 + retval = matchpo->mode & mode;
52220 +
52221 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52222 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52223 + __u32 new_mode = mode;
52224 +
52225 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52226 +
52227 + gr_log_learn(new_dentry, mnt, new_mode);
52228 + return new_mode;
52229 + }
52230 +
52231 + return retval;
52232 +}
52233 +
52234 +__u32
52235 +gr_check_link(const struct dentry * new_dentry,
52236 + const struct dentry * parent_dentry,
52237 + const struct vfsmount * parent_mnt,
52238 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52239 +{
52240 + struct acl_object_label *obj;
52241 + __u32 oldmode, newmode;
52242 + __u32 needmode;
52243 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52244 + GR_DELETE | GR_INHERIT;
52245 +
52246 + if (unlikely(!(gr_status & GR_READY)))
52247 + return (GR_CREATE | GR_LINK);
52248 +
52249 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52250 + oldmode = obj->mode;
52251 +
52252 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52253 + newmode = obj->mode;
52254 +
52255 + needmode = newmode & checkmodes;
52256 +
52257 + // old name for hardlink must have at least the permissions of the new name
52258 + if ((oldmode & needmode) != needmode)
52259 + goto bad;
52260 +
52261 + // if old name had restrictions/auditing, make sure the new name does as well
52262 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52263 +
52264 + // don't allow hardlinking of suid/sgid files without permission
52265 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52266 + needmode |= GR_SETID;
52267 +
52268 + if ((newmode & needmode) != needmode)
52269 + goto bad;
52270 +
52271 + // enforce minimum permissions
52272 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52273 + return newmode;
52274 +bad:
52275 + needmode = oldmode;
52276 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52277 + needmode |= GR_SETID;
52278 +
52279 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52280 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52281 + return (GR_CREATE | GR_LINK);
52282 + } else if (newmode & GR_SUPPRESS)
52283 + return GR_SUPPRESS;
52284 + else
52285 + return 0;
52286 +}
52287 +
52288 +int
52289 +gr_check_hidden_task(const struct task_struct *task)
52290 +{
52291 + if (unlikely(!(gr_status & GR_READY)))
52292 + return 0;
52293 +
52294 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52295 + return 1;
52296 +
52297 + return 0;
52298 +}
52299 +
52300 +int
52301 +gr_check_protected_task(const struct task_struct *task)
52302 +{
52303 + if (unlikely(!(gr_status & GR_READY) || !task))
52304 + return 0;
52305 +
52306 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52307 + task->acl != current->acl)
52308 + return 1;
52309 +
52310 + return 0;
52311 +}
52312 +
52313 +int
52314 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52315 +{
52316 + struct task_struct *p;
52317 + int ret = 0;
52318 +
52319 + if (unlikely(!(gr_status & GR_READY) || !pid))
52320 + return ret;
52321 +
52322 + read_lock(&tasklist_lock);
52323 + do_each_pid_task(pid, type, p) {
52324 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52325 + p->acl != current->acl) {
52326 + ret = 1;
52327 + goto out;
52328 + }
52329 + } while_each_pid_task(pid, type, p);
52330 +out:
52331 + read_unlock(&tasklist_lock);
52332 +
52333 + return ret;
52334 +}
52335 +
52336 +void
52337 +gr_copy_label(struct task_struct *tsk)
52338 +{
52339 + /* plain copying of fields is already done by dup_task_struct */
52340 + tsk->signal->used_accept = 0;
52341 + tsk->acl_sp_role = 0;
52342 + //tsk->acl_role_id = current->acl_role_id;
52343 + //tsk->acl = current->acl;
52344 + //tsk->role = current->role;
52345 + tsk->signal->curr_ip = current->signal->curr_ip;
52346 + tsk->signal->saved_ip = current->signal->saved_ip;
52347 + if (current->exec_file)
52348 + get_file(current->exec_file);
52349 + //tsk->exec_file = current->exec_file;
52350 + //tsk->is_writable = current->is_writable;
52351 + if (unlikely(current->signal->used_accept)) {
52352 + current->signal->curr_ip = 0;
52353 + current->signal->saved_ip = 0;
52354 + }
52355 +
52356 + return;
52357 +}
52358 +
52359 +static void
52360 +gr_set_proc_res(struct task_struct *task)
52361 +{
52362 + struct acl_subject_label *proc;
52363 + unsigned short i;
52364 +
52365 + proc = task->acl;
52366 +
52367 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52368 + return;
52369 +
52370 + for (i = 0; i < RLIM_NLIMITS; i++) {
52371 + if (!(proc->resmask & (1 << i)))
52372 + continue;
52373 +
52374 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52375 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52376 + }
52377 +
52378 + return;
52379 +}
52380 +
52381 +extern int __gr_process_user_ban(struct user_struct *user);
52382 +
52383 +int
52384 +gr_check_user_change(int real, int effective, int fs)
52385 +{
52386 + unsigned int i;
52387 + __u16 num;
52388 + uid_t *uidlist;
52389 + int curuid;
52390 + int realok = 0;
52391 + int effectiveok = 0;
52392 + int fsok = 0;
52393 +
52394 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52395 + struct user_struct *user;
52396 +
52397 + if (real == -1)
52398 + goto skipit;
52399 +
52400 + user = find_user(real);
52401 + if (user == NULL)
52402 + goto skipit;
52403 +
52404 + if (__gr_process_user_ban(user)) {
52405 + /* for find_user */
52406 + free_uid(user);
52407 + return 1;
52408 + }
52409 +
52410 + /* for find_user */
52411 + free_uid(user);
52412 +
52413 +skipit:
52414 +#endif
52415 +
52416 + if (unlikely(!(gr_status & GR_READY)))
52417 + return 0;
52418 +
52419 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52420 + gr_log_learn_id_change('u', real, effective, fs);
52421 +
52422 + num = current->acl->user_trans_num;
52423 + uidlist = current->acl->user_transitions;
52424 +
52425 + if (uidlist == NULL)
52426 + return 0;
52427 +
52428 + if (real == -1)
52429 + realok = 1;
52430 + if (effective == -1)
52431 + effectiveok = 1;
52432 + if (fs == -1)
52433 + fsok = 1;
52434 +
52435 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
52436 + for (i = 0; i < num; i++) {
52437 + curuid = (int)uidlist[i];
52438 + if (real == curuid)
52439 + realok = 1;
52440 + if (effective == curuid)
52441 + effectiveok = 1;
52442 + if (fs == curuid)
52443 + fsok = 1;
52444 + }
52445 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
52446 + for (i = 0; i < num; i++) {
52447 + curuid = (int)uidlist[i];
52448 + if (real == curuid)
52449 + break;
52450 + if (effective == curuid)
52451 + break;
52452 + if (fs == curuid)
52453 + break;
52454 + }
52455 + /* not in deny list */
52456 + if (i == num) {
52457 + realok = 1;
52458 + effectiveok = 1;
52459 + fsok = 1;
52460 + }
52461 + }
52462 +
52463 + if (realok && effectiveok && fsok)
52464 + return 0;
52465 + else {
52466 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52467 + return 1;
52468 + }
52469 +}
52470 +
52471 +int
52472 +gr_check_group_change(int real, int effective, int fs)
52473 +{
52474 + unsigned int i;
52475 + __u16 num;
52476 + gid_t *gidlist;
52477 + int curgid;
52478 + int realok = 0;
52479 + int effectiveok = 0;
52480 + int fsok = 0;
52481 +
52482 + if (unlikely(!(gr_status & GR_READY)))
52483 + return 0;
52484 +
52485 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52486 + gr_log_learn_id_change('g', real, effective, fs);
52487 +
52488 + num = current->acl->group_trans_num;
52489 + gidlist = current->acl->group_transitions;
52490 +
52491 + if (gidlist == NULL)
52492 + return 0;
52493 +
52494 + if (real == -1)
52495 + realok = 1;
52496 + if (effective == -1)
52497 + effectiveok = 1;
52498 + if (fs == -1)
52499 + fsok = 1;
52500 +
52501 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52502 + for (i = 0; i < num; i++) {
52503 + curgid = (int)gidlist[i];
52504 + if (real == curgid)
52505 + realok = 1;
52506 + if (effective == curgid)
52507 + effectiveok = 1;
52508 + if (fs == curgid)
52509 + fsok = 1;
52510 + }
52511 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
52512 + for (i = 0; i < num; i++) {
52513 + curgid = (int)gidlist[i];
52514 + if (real == curgid)
52515 + break;
52516 + if (effective == curgid)
52517 + break;
52518 + if (fs == curgid)
52519 + break;
52520 + }
52521 + /* not in deny list */
52522 + if (i == num) {
52523 + realok = 1;
52524 + effectiveok = 1;
52525 + fsok = 1;
52526 + }
52527 + }
52528 +
52529 + if (realok && effectiveok && fsok)
52530 + return 0;
52531 + else {
52532 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52533 + return 1;
52534 + }
52535 +}
52536 +
52537 +extern int gr_acl_is_capable(const int cap);
52538 +
52539 +void
52540 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52541 +{
52542 + struct acl_role_label *role = task->role;
52543 + struct acl_subject_label *subj = NULL;
52544 + struct acl_object_label *obj;
52545 + struct file *filp;
52546 +
52547 + if (unlikely(!(gr_status & GR_READY)))
52548 + return;
52549 +
52550 + filp = task->exec_file;
52551 +
52552 + /* kernel process, we'll give them the kernel role */
52553 + if (unlikely(!filp)) {
52554 + task->role = kernel_role;
52555 + task->acl = kernel_role->root_label;
52556 + return;
52557 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52558 + role = lookup_acl_role_label(task, uid, gid);
52559 +
52560 + /* don't change the role if we're not a privileged process */
52561 + if (role && task->role != role &&
52562 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52563 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52564 + return;
52565 +
52566 + /* perform subject lookup in possibly new role
52567 + we can use this result below in the case where role == task->role
52568 + */
52569 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52570 +
52571 + /* if we changed uid/gid, but result in the same role
52572 + and are using inheritance, don't lose the inherited subject
52573 + if current subject is other than what normal lookup
52574 + would result in, we arrived via inheritance, don't
52575 + lose subject
52576 + */
52577 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52578 + (subj == task->acl)))
52579 + task->acl = subj;
52580 +
52581 + task->role = role;
52582 +
52583 + task->is_writable = 0;
52584 +
52585 + /* ignore additional mmap checks for processes that are writable
52586 + by the default ACL */
52587 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52588 + if (unlikely(obj->mode & GR_WRITE))
52589 + task->is_writable = 1;
52590 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52591 + if (unlikely(obj->mode & GR_WRITE))
52592 + task->is_writable = 1;
52593 +
52594 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52595 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52596 +#endif
52597 +
52598 + gr_set_proc_res(task);
52599 +
52600 + return;
52601 +}
52602 +
52603 +int
52604 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52605 + const int unsafe_flags)
52606 +{
52607 + struct task_struct *task = current;
52608 + struct acl_subject_label *newacl;
52609 + struct acl_object_label *obj;
52610 + __u32 retmode;
52611 +
52612 + if (unlikely(!(gr_status & GR_READY)))
52613 + return 0;
52614 +
52615 + newacl = chk_subj_label(dentry, mnt, task->role);
52616 +
52617 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
52618 + did an exec
52619 + */
52620 + rcu_read_lock();
52621 + read_lock(&tasklist_lock);
52622 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
52623 + (task->parent->acl->mode & GR_POVERRIDE))) {
52624 + read_unlock(&tasklist_lock);
52625 + rcu_read_unlock();
52626 + goto skip_check;
52627 + }
52628 + read_unlock(&tasklist_lock);
52629 + rcu_read_unlock();
52630 +
52631 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52632 + !(task->role->roletype & GR_ROLE_GOD) &&
52633 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52634 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52635 + if (unsafe_flags & LSM_UNSAFE_SHARE)
52636 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52637 + else
52638 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52639 + return -EACCES;
52640 + }
52641 +
52642 +skip_check:
52643 +
52644 + obj = chk_obj_label(dentry, mnt, task->acl);
52645 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52646 +
52647 + if (!(task->acl->mode & GR_INHERITLEARN) &&
52648 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52649 + if (obj->nested)
52650 + task->acl = obj->nested;
52651 + else
52652 + task->acl = newacl;
52653 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52654 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52655 +
52656 + task->is_writable = 0;
52657 +
52658 + /* ignore additional mmap checks for processes that are writable
52659 + by the default ACL */
52660 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
52661 + if (unlikely(obj->mode & GR_WRITE))
52662 + task->is_writable = 1;
52663 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
52664 + if (unlikely(obj->mode & GR_WRITE))
52665 + task->is_writable = 1;
52666 +
52667 + gr_set_proc_res(task);
52668 +
52669 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52670 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52671 +#endif
52672 + return 0;
52673 +}
52674 +
52675 +/* always called with valid inodev ptr */
52676 +static void
52677 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52678 +{
52679 + struct acl_object_label *matchpo;
52680 + struct acl_subject_label *matchps;
52681 + struct acl_subject_label *subj;
52682 + struct acl_role_label *role;
52683 + unsigned int x;
52684 +
52685 + FOR_EACH_ROLE_START(role)
52686 + FOR_EACH_SUBJECT_START(role, subj, x)
52687 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52688 + matchpo->mode |= GR_DELETED;
52689 + FOR_EACH_SUBJECT_END(subj,x)
52690 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52691 + if (subj->inode == ino && subj->device == dev)
52692 + subj->mode |= GR_DELETED;
52693 + FOR_EACH_NESTED_SUBJECT_END(subj)
52694 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52695 + matchps->mode |= GR_DELETED;
52696 + FOR_EACH_ROLE_END(role)
52697 +
52698 + inodev->nentry->deleted = 1;
52699 +
52700 + return;
52701 +}
52702 +
52703 +void
52704 +gr_handle_delete(const ino_t ino, const dev_t dev)
52705 +{
52706 + struct inodev_entry *inodev;
52707 +
52708 + if (unlikely(!(gr_status & GR_READY)))
52709 + return;
52710 +
52711 + write_lock(&gr_inode_lock);
52712 + inodev = lookup_inodev_entry(ino, dev);
52713 + if (inodev != NULL)
52714 + do_handle_delete(inodev, ino, dev);
52715 + write_unlock(&gr_inode_lock);
52716 +
52717 + return;
52718 +}
52719 +
52720 +static void
52721 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52722 + const ino_t newinode, const dev_t newdevice,
52723 + struct acl_subject_label *subj)
52724 +{
52725 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52726 + struct acl_object_label *match;
52727 +
52728 + match = subj->obj_hash[index];
52729 +
52730 + while (match && (match->inode != oldinode ||
52731 + match->device != olddevice ||
52732 + !(match->mode & GR_DELETED)))
52733 + match = match->next;
52734 +
52735 + if (match && (match->inode == oldinode)
52736 + && (match->device == olddevice)
52737 + && (match->mode & GR_DELETED)) {
52738 + if (match->prev == NULL) {
52739 + subj->obj_hash[index] = match->next;
52740 + if (match->next != NULL)
52741 + match->next->prev = NULL;
52742 + } else {
52743 + match->prev->next = match->next;
52744 + if (match->next != NULL)
52745 + match->next->prev = match->prev;
52746 + }
52747 + match->prev = NULL;
52748 + match->next = NULL;
52749 + match->inode = newinode;
52750 + match->device = newdevice;
52751 + match->mode &= ~GR_DELETED;
52752 +
52753 + insert_acl_obj_label(match, subj);
52754 + }
52755 +
52756 + return;
52757 +}
52758 +
52759 +static void
52760 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52761 + const ino_t newinode, const dev_t newdevice,
52762 + struct acl_role_label *role)
52763 +{
52764 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52765 + struct acl_subject_label *match;
52766 +
52767 + match = role->subj_hash[index];
52768 +
52769 + while (match && (match->inode != oldinode ||
52770 + match->device != olddevice ||
52771 + !(match->mode & GR_DELETED)))
52772 + match = match->next;
52773 +
52774 + if (match && (match->inode == oldinode)
52775 + && (match->device == olddevice)
52776 + && (match->mode & GR_DELETED)) {
52777 + if (match->prev == NULL) {
52778 + role->subj_hash[index] = match->next;
52779 + if (match->next != NULL)
52780 + match->next->prev = NULL;
52781 + } else {
52782 + match->prev->next = match->next;
52783 + if (match->next != NULL)
52784 + match->next->prev = match->prev;
52785 + }
52786 + match->prev = NULL;
52787 + match->next = NULL;
52788 + match->inode = newinode;
52789 + match->device = newdevice;
52790 + match->mode &= ~GR_DELETED;
52791 +
52792 + insert_acl_subj_label(match, role);
52793 + }
52794 +
52795 + return;
52796 +}
52797 +
52798 +static void
52799 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52800 + const ino_t newinode, const dev_t newdevice)
52801 +{
52802 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52803 + struct inodev_entry *match;
52804 +
52805 + match = inodev_set.i_hash[index];
52806 +
52807 + while (match && (match->nentry->inode != oldinode ||
52808 + match->nentry->device != olddevice || !match->nentry->deleted))
52809 + match = match->next;
52810 +
52811 + if (match && (match->nentry->inode == oldinode)
52812 + && (match->nentry->device == olddevice) &&
52813 + match->nentry->deleted) {
52814 + if (match->prev == NULL) {
52815 + inodev_set.i_hash[index] = match->next;
52816 + if (match->next != NULL)
52817 + match->next->prev = NULL;
52818 + } else {
52819 + match->prev->next = match->next;
52820 + if (match->next != NULL)
52821 + match->next->prev = match->prev;
52822 + }
52823 + match->prev = NULL;
52824 + match->next = NULL;
52825 + match->nentry->inode = newinode;
52826 + match->nentry->device = newdevice;
52827 + match->nentry->deleted = 0;
52828 +
52829 + insert_inodev_entry(match);
52830 + }
52831 +
52832 + return;
52833 +}
52834 +
52835 +static void
52836 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52837 +{
52838 + struct acl_subject_label *subj;
52839 + struct acl_role_label *role;
52840 + unsigned int x;
52841 +
52842 + FOR_EACH_ROLE_START(role)
52843 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52844 +
52845 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52846 + if ((subj->inode == ino) && (subj->device == dev)) {
52847 + subj->inode = ino;
52848 + subj->device = dev;
52849 + }
52850 + FOR_EACH_NESTED_SUBJECT_END(subj)
52851 + FOR_EACH_SUBJECT_START(role, subj, x)
52852 + update_acl_obj_label(matchn->inode, matchn->device,
52853 + ino, dev, subj);
52854 + FOR_EACH_SUBJECT_END(subj,x)
52855 + FOR_EACH_ROLE_END(role)
52856 +
52857 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
52858 +
52859 + return;
52860 +}
52861 +
52862 +static void
52863 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
52864 + const struct vfsmount *mnt)
52865 +{
52866 + ino_t ino = dentry->d_inode->i_ino;
52867 + dev_t dev = __get_dev(dentry);
52868 +
52869 + __do_handle_create(matchn, ino, dev);
52870 +
52871 + return;
52872 +}
52873 +
52874 +void
52875 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52876 +{
52877 + struct name_entry *matchn;
52878 +
52879 + if (unlikely(!(gr_status & GR_READY)))
52880 + return;
52881 +
52882 + preempt_disable();
52883 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
52884 +
52885 + if (unlikely((unsigned long)matchn)) {
52886 + write_lock(&gr_inode_lock);
52887 + do_handle_create(matchn, dentry, mnt);
52888 + write_unlock(&gr_inode_lock);
52889 + }
52890 + preempt_enable();
52891 +
52892 + return;
52893 +}
52894 +
52895 +void
52896 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
52897 +{
52898 + struct name_entry *matchn;
52899 +
52900 + if (unlikely(!(gr_status & GR_READY)))
52901 + return;
52902 +
52903 + preempt_disable();
52904 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
52905 +
52906 + if (unlikely((unsigned long)matchn)) {
52907 + write_lock(&gr_inode_lock);
52908 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
52909 + write_unlock(&gr_inode_lock);
52910 + }
52911 + preempt_enable();
52912 +
52913 + return;
52914 +}
52915 +
52916 +void
52917 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52918 + struct dentry *old_dentry,
52919 + struct dentry *new_dentry,
52920 + struct vfsmount *mnt, const __u8 replace)
52921 +{
52922 + struct name_entry *matchn;
52923 + struct inodev_entry *inodev;
52924 + struct inode *inode = new_dentry->d_inode;
52925 + ino_t old_ino = old_dentry->d_inode->i_ino;
52926 + dev_t old_dev = __get_dev(old_dentry);
52927 +
52928 + /* vfs_rename swaps the name and parent link for old_dentry and
52929 + new_dentry
52930 + at this point, old_dentry has the new name, parent link, and inode
52931 + for the renamed file
52932 + if a file is being replaced by a rename, new_dentry has the inode
52933 + and name for the replaced file
52934 + */
52935 +
52936 + if (unlikely(!(gr_status & GR_READY)))
52937 + return;
52938 +
52939 + preempt_disable();
52940 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
52941 +
52942 + /* we wouldn't have to check d_inode if it weren't for
52943 + NFS silly-renaming
52944 + */
52945 +
52946 + write_lock(&gr_inode_lock);
52947 + if (unlikely(replace && inode)) {
52948 + ino_t new_ino = inode->i_ino;
52949 + dev_t new_dev = __get_dev(new_dentry);
52950 +
52951 + inodev = lookup_inodev_entry(new_ino, new_dev);
52952 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
52953 + do_handle_delete(inodev, new_ino, new_dev);
52954 + }
52955 +
52956 + inodev = lookup_inodev_entry(old_ino, old_dev);
52957 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
52958 + do_handle_delete(inodev, old_ino, old_dev);
52959 +
52960 + if (unlikely((unsigned long)matchn))
52961 + do_handle_create(matchn, old_dentry, mnt);
52962 +
52963 + write_unlock(&gr_inode_lock);
52964 + preempt_enable();
52965 +
52966 + return;
52967 +}
52968 +
52969 +static int
52970 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
52971 + unsigned char **sum)
52972 +{
52973 + struct acl_role_label *r;
52974 + struct role_allowed_ip *ipp;
52975 + struct role_transition *trans;
52976 + unsigned int i;
52977 + int found = 0;
52978 + u32 curr_ip = current->signal->curr_ip;
52979 +
52980 + current->signal->saved_ip = curr_ip;
52981 +
52982 + /* check transition table */
52983 +
52984 + for (trans = current->role->transitions; trans; trans = trans->next) {
52985 + if (!strcmp(rolename, trans->rolename)) {
52986 + found = 1;
52987 + break;
52988 + }
52989 + }
52990 +
52991 + if (!found)
52992 + return 0;
52993 +
52994 + /* handle special roles that do not require authentication
52995 + and check ip */
52996 +
52997 + FOR_EACH_ROLE_START(r)
52998 + if (!strcmp(rolename, r->rolename) &&
52999 + (r->roletype & GR_ROLE_SPECIAL)) {
53000 + found = 0;
53001 + if (r->allowed_ips != NULL) {
53002 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53003 + if ((ntohl(curr_ip) & ipp->netmask) ==
53004 + (ntohl(ipp->addr) & ipp->netmask))
53005 + found = 1;
53006 + }
53007 + } else
53008 + found = 2;
53009 + if (!found)
53010 + return 0;
53011 +
53012 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53013 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53014 + *salt = NULL;
53015 + *sum = NULL;
53016 + return 1;
53017 + }
53018 + }
53019 + FOR_EACH_ROLE_END(r)
53020 +
53021 + for (i = 0; i < num_sprole_pws; i++) {
53022 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53023 + *salt = acl_special_roles[i]->salt;
53024 + *sum = acl_special_roles[i]->sum;
53025 + return 1;
53026 + }
53027 + }
53028 +
53029 + return 0;
53030 +}
53031 +
53032 +static void
53033 +assign_special_role(char *rolename)
53034 +{
53035 + struct acl_object_label *obj;
53036 + struct acl_role_label *r;
53037 + struct acl_role_label *assigned = NULL;
53038 + struct task_struct *tsk;
53039 + struct file *filp;
53040 +
53041 + FOR_EACH_ROLE_START(r)
53042 + if (!strcmp(rolename, r->rolename) &&
53043 + (r->roletype & GR_ROLE_SPECIAL)) {
53044 + assigned = r;
53045 + break;
53046 + }
53047 + FOR_EACH_ROLE_END(r)
53048 +
53049 + if (!assigned)
53050 + return;
53051 +
53052 + read_lock(&tasklist_lock);
53053 + read_lock(&grsec_exec_file_lock);
53054 +
53055 + tsk = current->real_parent;
53056 + if (tsk == NULL)
53057 + goto out_unlock;
53058 +
53059 + filp = tsk->exec_file;
53060 + if (filp == NULL)
53061 + goto out_unlock;
53062 +
53063 + tsk->is_writable = 0;
53064 +
53065 + tsk->acl_sp_role = 1;
53066 + tsk->acl_role_id = ++acl_sp_role_value;
53067 + tsk->role = assigned;
53068 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53069 +
53070 + /* ignore additional mmap checks for processes that are writable
53071 + by the default ACL */
53072 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53073 + if (unlikely(obj->mode & GR_WRITE))
53074 + tsk->is_writable = 1;
53075 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53076 + if (unlikely(obj->mode & GR_WRITE))
53077 + tsk->is_writable = 1;
53078 +
53079 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53080 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53081 +#endif
53082 +
53083 +out_unlock:
53084 + read_unlock(&grsec_exec_file_lock);
53085 + read_unlock(&tasklist_lock);
53086 + return;
53087 +}
53088 +
53089 +int gr_check_secure_terminal(struct task_struct *task)
53090 +{
53091 + struct task_struct *p, *p2, *p3;
53092 + struct files_struct *files;
53093 + struct fdtable *fdt;
53094 + struct file *our_file = NULL, *file;
53095 + int i;
53096 +
53097 + if (task->signal->tty == NULL)
53098 + return 1;
53099 +
53100 + files = get_files_struct(task);
53101 + if (files != NULL) {
53102 + rcu_read_lock();
53103 + fdt = files_fdtable(files);
53104 + for (i=0; i < fdt->max_fds; i++) {
53105 + file = fcheck_files(files, i);
53106 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53107 + get_file(file);
53108 + our_file = file;
53109 + }
53110 + }
53111 + rcu_read_unlock();
53112 + put_files_struct(files);
53113 + }
53114 +
53115 + if (our_file == NULL)
53116 + return 1;
53117 +
53118 + read_lock(&tasklist_lock);
53119 + do_each_thread(p2, p) {
53120 + files = get_files_struct(p);
53121 + if (files == NULL ||
53122 + (p->signal && p->signal->tty == task->signal->tty)) {
53123 + if (files != NULL)
53124 + put_files_struct(files);
53125 + continue;
53126 + }
53127 + rcu_read_lock();
53128 + fdt = files_fdtable(files);
53129 + for (i=0; i < fdt->max_fds; i++) {
53130 + file = fcheck_files(files, i);
53131 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53132 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53133 + p3 = task;
53134 + while (p3->pid > 0) {
53135 + if (p3 == p)
53136 + break;
53137 + p3 = p3->real_parent;
53138 + }
53139 + if (p3 == p)
53140 + break;
53141 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53142 + gr_handle_alertkill(p);
53143 + rcu_read_unlock();
53144 + put_files_struct(files);
53145 + read_unlock(&tasklist_lock);
53146 + fput(our_file);
53147 + return 0;
53148 + }
53149 + }
53150 + rcu_read_unlock();
53151 + put_files_struct(files);
53152 + } while_each_thread(p2, p);
53153 + read_unlock(&tasklist_lock);
53154 +
53155 + fput(our_file);
53156 + return 1;
53157 +}
53158 +
53159 +ssize_t
53160 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53161 +{
53162 + struct gr_arg_wrapper uwrap;
53163 + unsigned char *sprole_salt = NULL;
53164 + unsigned char *sprole_sum = NULL;
53165 + int error = sizeof (struct gr_arg_wrapper);
53166 + int error2 = 0;
53167 +
53168 + mutex_lock(&gr_dev_mutex);
53169 +
53170 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53171 + error = -EPERM;
53172 + goto out;
53173 + }
53174 +
53175 + if (count != sizeof (struct gr_arg_wrapper)) {
53176 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53177 + error = -EINVAL;
53178 + goto out;
53179 + }
53180 +
53181 +
53182 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53183 + gr_auth_expires = 0;
53184 + gr_auth_attempts = 0;
53185 + }
53186 +
53187 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53188 + error = -EFAULT;
53189 + goto out;
53190 + }
53191 +
53192 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53193 + error = -EINVAL;
53194 + goto out;
53195 + }
53196 +
53197 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53198 + error = -EFAULT;
53199 + goto out;
53200 + }
53201 +
53202 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53203 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53204 + time_after(gr_auth_expires, get_seconds())) {
53205 + error = -EBUSY;
53206 + goto out;
53207 + }
53208 +
53209 + /* if non-root trying to do anything other than use a special role,
53210 + do not attempt authentication, do not count towards authentication
53211 + locking
53212 + */
53213 +
53214 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53215 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53216 + current_uid()) {
53217 + error = -EPERM;
53218 + goto out;
53219 + }
53220 +
53221 + /* ensure pw and special role name are null terminated */
53222 +
53223 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53224 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53225 +
53226 + /* Okay.
53227 + * We have our enough of the argument structure..(we have yet
53228 + * to copy_from_user the tables themselves) . Copy the tables
53229 + * only if we need them, i.e. for loading operations. */
53230 +
53231 + switch (gr_usermode->mode) {
53232 + case GR_STATUS:
53233 + if (gr_status & GR_READY) {
53234 + error = 1;
53235 + if (!gr_check_secure_terminal(current))
53236 + error = 3;
53237 + } else
53238 + error = 2;
53239 + goto out;
53240 + case GR_SHUTDOWN:
53241 + if ((gr_status & GR_READY)
53242 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53243 + pax_open_kernel();
53244 + gr_status &= ~GR_READY;
53245 + pax_close_kernel();
53246 +
53247 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53248 + free_variables();
53249 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53250 + memset(gr_system_salt, 0, GR_SALT_LEN);
53251 + memset(gr_system_sum, 0, GR_SHA_LEN);
53252 + } else if (gr_status & GR_READY) {
53253 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53254 + error = -EPERM;
53255 + } else {
53256 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53257 + error = -EAGAIN;
53258 + }
53259 + break;
53260 + case GR_ENABLE:
53261 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53262 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53263 + else {
53264 + if (gr_status & GR_READY)
53265 + error = -EAGAIN;
53266 + else
53267 + error = error2;
53268 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53269 + }
53270 + break;
53271 + case GR_RELOAD:
53272 + if (!(gr_status & GR_READY)) {
53273 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53274 + error = -EAGAIN;
53275 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53276 + preempt_disable();
53277 +
53278 + pax_open_kernel();
53279 + gr_status &= ~GR_READY;
53280 + pax_close_kernel();
53281 +
53282 + free_variables();
53283 + if (!(error2 = gracl_init(gr_usermode))) {
53284 + preempt_enable();
53285 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53286 + } else {
53287 + preempt_enable();
53288 + error = error2;
53289 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53290 + }
53291 + } else {
53292 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53293 + error = -EPERM;
53294 + }
53295 + break;
53296 + case GR_SEGVMOD:
53297 + if (unlikely(!(gr_status & GR_READY))) {
53298 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53299 + error = -EAGAIN;
53300 + break;
53301 + }
53302 +
53303 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53304 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53305 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53306 + struct acl_subject_label *segvacl;
53307 + segvacl =
53308 + lookup_acl_subj_label(gr_usermode->segv_inode,
53309 + gr_usermode->segv_device,
53310 + current->role);
53311 + if (segvacl) {
53312 + segvacl->crashes = 0;
53313 + segvacl->expires = 0;
53314 + }
53315 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53316 + gr_remove_uid(gr_usermode->segv_uid);
53317 + }
53318 + } else {
53319 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53320 + error = -EPERM;
53321 + }
53322 + break;
53323 + case GR_SPROLE:
53324 + case GR_SPROLEPAM:
53325 + if (unlikely(!(gr_status & GR_READY))) {
53326 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53327 + error = -EAGAIN;
53328 + break;
53329 + }
53330 +
53331 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53332 + current->role->expires = 0;
53333 + current->role->auth_attempts = 0;
53334 + }
53335 +
53336 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53337 + time_after(current->role->expires, get_seconds())) {
53338 + error = -EBUSY;
53339 + goto out;
53340 + }
53341 +
53342 + if (lookup_special_role_auth
53343 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53344 + && ((!sprole_salt && !sprole_sum)
53345 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53346 + char *p = "";
53347 + assign_special_role(gr_usermode->sp_role);
53348 + read_lock(&tasklist_lock);
53349 + if (current->real_parent)
53350 + p = current->real_parent->role->rolename;
53351 + read_unlock(&tasklist_lock);
53352 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53353 + p, acl_sp_role_value);
53354 + } else {
53355 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53356 + error = -EPERM;
53357 + if(!(current->role->auth_attempts++))
53358 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53359 +
53360 + goto out;
53361 + }
53362 + break;
53363 + case GR_UNSPROLE:
53364 + if (unlikely(!(gr_status & GR_READY))) {
53365 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53366 + error = -EAGAIN;
53367 + break;
53368 + }
53369 +
53370 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53371 + char *p = "";
53372 + int i = 0;
53373 +
53374 + read_lock(&tasklist_lock);
53375 + if (current->real_parent) {
53376 + p = current->real_parent->role->rolename;
53377 + i = current->real_parent->acl_role_id;
53378 + }
53379 + read_unlock(&tasklist_lock);
53380 +
53381 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53382 + gr_set_acls(1);
53383 + } else {
53384 + error = -EPERM;
53385 + goto out;
53386 + }
53387 + break;
53388 + default:
53389 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53390 + error = -EINVAL;
53391 + break;
53392 + }
53393 +
53394 + if (error != -EPERM)
53395 + goto out;
53396 +
53397 + if(!(gr_auth_attempts++))
53398 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53399 +
53400 + out:
53401 + mutex_unlock(&gr_dev_mutex);
53402 + return error;
53403 +}
53404 +
53405 +/* must be called with
53406 + rcu_read_lock();
53407 + read_lock(&tasklist_lock);
53408 + read_lock(&grsec_exec_file_lock);
53409 +*/
53410 +int gr_apply_subject_to_task(struct task_struct *task)
53411 +{
53412 + struct acl_object_label *obj;
53413 + char *tmpname;
53414 + struct acl_subject_label *tmpsubj;
53415 + struct file *filp;
53416 + struct name_entry *nmatch;
53417 +
53418 + filp = task->exec_file;
53419 + if (filp == NULL)
53420 + return 0;
53421 +
53422 + /* the following is to apply the correct subject
53423 + on binaries running when the RBAC system
53424 + is enabled, when the binaries have been
53425 + replaced or deleted since their execution
53426 + -----
53427 + when the RBAC system starts, the inode/dev
53428 + from exec_file will be one the RBAC system
53429 + is unaware of. It only knows the inode/dev
53430 + of the present file on disk, or the absence
53431 + of it.
53432 + */
53433 + preempt_disable();
53434 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53435 +
53436 + nmatch = lookup_name_entry(tmpname);
53437 + preempt_enable();
53438 + tmpsubj = NULL;
53439 + if (nmatch) {
53440 + if (nmatch->deleted)
53441 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53442 + else
53443 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53444 + if (tmpsubj != NULL)
53445 + task->acl = tmpsubj;
53446 + }
53447 + if (tmpsubj == NULL)
53448 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53449 + task->role);
53450 + if (task->acl) {
53451 + task->is_writable = 0;
53452 + /* ignore additional mmap checks for processes that are writable
53453 + by the default ACL */
53454 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53455 + if (unlikely(obj->mode & GR_WRITE))
53456 + task->is_writable = 1;
53457 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53458 + if (unlikely(obj->mode & GR_WRITE))
53459 + task->is_writable = 1;
53460 +
53461 + gr_set_proc_res(task);
53462 +
53463 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53464 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53465 +#endif
53466 + } else {
53467 + return 1;
53468 + }
53469 +
53470 + return 0;
53471 +}
53472 +
53473 +int
53474 +gr_set_acls(const int type)
53475 +{
53476 + struct task_struct *task, *task2;
53477 + struct acl_role_label *role = current->role;
53478 + __u16 acl_role_id = current->acl_role_id;
53479 + const struct cred *cred;
53480 + int ret;
53481 +
53482 + rcu_read_lock();
53483 + read_lock(&tasklist_lock);
53484 + read_lock(&grsec_exec_file_lock);
53485 + do_each_thread(task2, task) {
53486 + /* check to see if we're called from the exit handler,
53487 + if so, only replace ACLs that have inherited the admin
53488 + ACL */
53489 +
53490 + if (type && (task->role != role ||
53491 + task->acl_role_id != acl_role_id))
53492 + continue;
53493 +
53494 + task->acl_role_id = 0;
53495 + task->acl_sp_role = 0;
53496 +
53497 + if (task->exec_file) {
53498 + cred = __task_cred(task);
53499 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53500 + ret = gr_apply_subject_to_task(task);
53501 + if (ret) {
53502 + read_unlock(&grsec_exec_file_lock);
53503 + read_unlock(&tasklist_lock);
53504 + rcu_read_unlock();
53505 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53506 + return ret;
53507 + }
53508 + } else {
53509 + // it's a kernel process
53510 + task->role = kernel_role;
53511 + task->acl = kernel_role->root_label;
53512 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53513 + task->acl->mode &= ~GR_PROCFIND;
53514 +#endif
53515 + }
53516 + } while_each_thread(task2, task);
53517 + read_unlock(&grsec_exec_file_lock);
53518 + read_unlock(&tasklist_lock);
53519 + rcu_read_unlock();
53520 +
53521 + return 0;
53522 +}
53523 +
53524 +void
53525 +gr_learn_resource(const struct task_struct *task,
53526 + const int res, const unsigned long wanted, const int gt)
53527 +{
53528 + struct acl_subject_label *acl;
53529 + const struct cred *cred;
53530 +
53531 + if (unlikely((gr_status & GR_READY) &&
53532 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53533 + goto skip_reslog;
53534 +
53535 +#ifdef CONFIG_GRKERNSEC_RESLOG
53536 + gr_log_resource(task, res, wanted, gt);
53537 +#endif
53538 + skip_reslog:
53539 +
53540 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53541 + return;
53542 +
53543 + acl = task->acl;
53544 +
53545 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53546 + !(acl->resmask & (1 << (unsigned short) res))))
53547 + return;
53548 +
53549 + if (wanted >= acl->res[res].rlim_cur) {
53550 + unsigned long res_add;
53551 +
53552 + res_add = wanted;
53553 + switch (res) {
53554 + case RLIMIT_CPU:
53555 + res_add += GR_RLIM_CPU_BUMP;
53556 + break;
53557 + case RLIMIT_FSIZE:
53558 + res_add += GR_RLIM_FSIZE_BUMP;
53559 + break;
53560 + case RLIMIT_DATA:
53561 + res_add += GR_RLIM_DATA_BUMP;
53562 + break;
53563 + case RLIMIT_STACK:
53564 + res_add += GR_RLIM_STACK_BUMP;
53565 + break;
53566 + case RLIMIT_CORE:
53567 + res_add += GR_RLIM_CORE_BUMP;
53568 + break;
53569 + case RLIMIT_RSS:
53570 + res_add += GR_RLIM_RSS_BUMP;
53571 + break;
53572 + case RLIMIT_NPROC:
53573 + res_add += GR_RLIM_NPROC_BUMP;
53574 + break;
53575 + case RLIMIT_NOFILE:
53576 + res_add += GR_RLIM_NOFILE_BUMP;
53577 + break;
53578 + case RLIMIT_MEMLOCK:
53579 + res_add += GR_RLIM_MEMLOCK_BUMP;
53580 + break;
53581 + case RLIMIT_AS:
53582 + res_add += GR_RLIM_AS_BUMP;
53583 + break;
53584 + case RLIMIT_LOCKS:
53585 + res_add += GR_RLIM_LOCKS_BUMP;
53586 + break;
53587 + case RLIMIT_SIGPENDING:
53588 + res_add += GR_RLIM_SIGPENDING_BUMP;
53589 + break;
53590 + case RLIMIT_MSGQUEUE:
53591 + res_add += GR_RLIM_MSGQUEUE_BUMP;
53592 + break;
53593 + case RLIMIT_NICE:
53594 + res_add += GR_RLIM_NICE_BUMP;
53595 + break;
53596 + case RLIMIT_RTPRIO:
53597 + res_add += GR_RLIM_RTPRIO_BUMP;
53598 + break;
53599 + case RLIMIT_RTTIME:
53600 + res_add += GR_RLIM_RTTIME_BUMP;
53601 + break;
53602 + }
53603 +
53604 + acl->res[res].rlim_cur = res_add;
53605 +
53606 + if (wanted > acl->res[res].rlim_max)
53607 + acl->res[res].rlim_max = res_add;
53608 +
53609 + /* only log the subject filename, since resource logging is supported for
53610 + single-subject learning only */
53611 + rcu_read_lock();
53612 + cred = __task_cred(task);
53613 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53614 + task->role->roletype, cred->uid, cred->gid, acl->filename,
53615 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53616 + "", (unsigned long) res, &task->signal->saved_ip);
53617 + rcu_read_unlock();
53618 + }
53619 +
53620 + return;
53621 +}
53622 +
53623 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53624 +void
53625 +pax_set_initial_flags(struct linux_binprm *bprm)
53626 +{
53627 + struct task_struct *task = current;
53628 + struct acl_subject_label *proc;
53629 + unsigned long flags;
53630 +
53631 + if (unlikely(!(gr_status & GR_READY)))
53632 + return;
53633 +
53634 + flags = pax_get_flags(task);
53635 +
53636 + proc = task->acl;
53637 +
53638 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53639 + flags &= ~MF_PAX_PAGEEXEC;
53640 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53641 + flags &= ~MF_PAX_SEGMEXEC;
53642 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53643 + flags &= ~MF_PAX_RANDMMAP;
53644 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53645 + flags &= ~MF_PAX_EMUTRAMP;
53646 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53647 + flags &= ~MF_PAX_MPROTECT;
53648 +
53649 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53650 + flags |= MF_PAX_PAGEEXEC;
53651 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53652 + flags |= MF_PAX_SEGMEXEC;
53653 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53654 + flags |= MF_PAX_RANDMMAP;
53655 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53656 + flags |= MF_PAX_EMUTRAMP;
53657 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53658 + flags |= MF_PAX_MPROTECT;
53659 +
53660 + pax_set_flags(task, flags);
53661 +
53662 + return;
53663 +}
53664 +#endif
53665 +
53666 +int
53667 +gr_handle_proc_ptrace(struct task_struct *task)
53668 +{
53669 + struct file *filp;
53670 + struct task_struct *tmp = task;
53671 + struct task_struct *curtemp = current;
53672 + __u32 retmode;
53673 +
53674 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53675 + if (unlikely(!(gr_status & GR_READY)))
53676 + return 0;
53677 +#endif
53678 +
53679 + read_lock(&tasklist_lock);
53680 + read_lock(&grsec_exec_file_lock);
53681 + filp = task->exec_file;
53682 +
53683 + while (tmp->pid > 0) {
53684 + if (tmp == curtemp)
53685 + break;
53686 + tmp = tmp->real_parent;
53687 + }
53688 +
53689 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53690 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53691 + read_unlock(&grsec_exec_file_lock);
53692 + read_unlock(&tasklist_lock);
53693 + return 1;
53694 + }
53695 +
53696 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53697 + if (!(gr_status & GR_READY)) {
53698 + read_unlock(&grsec_exec_file_lock);
53699 + read_unlock(&tasklist_lock);
53700 + return 0;
53701 + }
53702 +#endif
53703 +
53704 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53705 + read_unlock(&grsec_exec_file_lock);
53706 + read_unlock(&tasklist_lock);
53707 +
53708 + if (retmode & GR_NOPTRACE)
53709 + return 1;
53710 +
53711 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53712 + && (current->acl != task->acl || (current->acl != current->role->root_label
53713 + && current->pid != task->pid)))
53714 + return 1;
53715 +
53716 + return 0;
53717 +}
53718 +
53719 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53720 +{
53721 + if (unlikely(!(gr_status & GR_READY)))
53722 + return;
53723 +
53724 + if (!(current->role->roletype & GR_ROLE_GOD))
53725 + return;
53726 +
53727 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53728 + p->role->rolename, gr_task_roletype_to_char(p),
53729 + p->acl->filename);
53730 +}
53731 +
53732 +int
53733 +gr_handle_ptrace(struct task_struct *task, const long request)
53734 +{
53735 + struct task_struct *tmp = task;
53736 + struct task_struct *curtemp = current;
53737 + __u32 retmode;
53738 +
53739 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53740 + if (unlikely(!(gr_status & GR_READY)))
53741 + return 0;
53742 +#endif
53743 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
53744 + read_lock(&tasklist_lock);
53745 + while (tmp->pid > 0) {
53746 + if (tmp == curtemp)
53747 + break;
53748 + tmp = tmp->real_parent;
53749 + }
53750 +
53751 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53752 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53753 + read_unlock(&tasklist_lock);
53754 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53755 + return 1;
53756 + }
53757 + read_unlock(&tasklist_lock);
53758 + }
53759 +
53760 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53761 + if (!(gr_status & GR_READY))
53762 + return 0;
53763 +#endif
53764 +
53765 + read_lock(&grsec_exec_file_lock);
53766 + if (unlikely(!task->exec_file)) {
53767 + read_unlock(&grsec_exec_file_lock);
53768 + return 0;
53769 + }
53770 +
53771 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53772 + read_unlock(&grsec_exec_file_lock);
53773 +
53774 + if (retmode & GR_NOPTRACE) {
53775 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53776 + return 1;
53777 + }
53778 +
53779 + if (retmode & GR_PTRACERD) {
53780 + switch (request) {
53781 + case PTRACE_SEIZE:
53782 + case PTRACE_POKETEXT:
53783 + case PTRACE_POKEDATA:
53784 + case PTRACE_POKEUSR:
53785 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53786 + case PTRACE_SETREGS:
53787 + case PTRACE_SETFPREGS:
53788 +#endif
53789 +#ifdef CONFIG_X86
53790 + case PTRACE_SETFPXREGS:
53791 +#endif
53792 +#ifdef CONFIG_ALTIVEC
53793 + case PTRACE_SETVRREGS:
53794 +#endif
53795 + return 1;
53796 + default:
53797 + return 0;
53798 + }
53799 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
53800 + !(current->role->roletype & GR_ROLE_GOD) &&
53801 + (current->acl != task->acl)) {
53802 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53803 + return 1;
53804 + }
53805 +
53806 + return 0;
53807 +}
53808 +
53809 +static int is_writable_mmap(const struct file *filp)
53810 +{
53811 + struct task_struct *task = current;
53812 + struct acl_object_label *obj, *obj2;
53813 +
53814 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53815 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53816 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53817 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53818 + task->role->root_label);
53819 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53820 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53821 + return 1;
53822 + }
53823 + }
53824 + return 0;
53825 +}
53826 +
53827 +int
53828 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53829 +{
53830 + __u32 mode;
53831 +
53832 + if (unlikely(!file || !(prot & PROT_EXEC)))
53833 + return 1;
53834 +
53835 + if (is_writable_mmap(file))
53836 + return 0;
53837 +
53838 + mode =
53839 + gr_search_file(file->f_path.dentry,
53840 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53841 + file->f_path.mnt);
53842 +
53843 + if (!gr_tpe_allow(file))
53844 + return 0;
53845 +
53846 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53847 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53848 + return 0;
53849 + } else if (unlikely(!(mode & GR_EXEC))) {
53850 + return 0;
53851 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53852 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53853 + return 1;
53854 + }
53855 +
53856 + return 1;
53857 +}
53858 +
53859 +int
53860 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53861 +{
53862 + __u32 mode;
53863 +
53864 + if (unlikely(!file || !(prot & PROT_EXEC)))
53865 + return 1;
53866 +
53867 + if (is_writable_mmap(file))
53868 + return 0;
53869 +
53870 + mode =
53871 + gr_search_file(file->f_path.dentry,
53872 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53873 + file->f_path.mnt);
53874 +
53875 + if (!gr_tpe_allow(file))
53876 + return 0;
53877 +
53878 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53879 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53880 + return 0;
53881 + } else if (unlikely(!(mode & GR_EXEC))) {
53882 + return 0;
53883 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53884 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53885 + return 1;
53886 + }
53887 +
53888 + return 1;
53889 +}
53890 +
53891 +void
53892 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53893 +{
53894 + unsigned long runtime;
53895 + unsigned long cputime;
53896 + unsigned int wday, cday;
53897 + __u8 whr, chr;
53898 + __u8 wmin, cmin;
53899 + __u8 wsec, csec;
53900 + struct timespec timeval;
53901 +
53902 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
53903 + !(task->acl->mode & GR_PROCACCT)))
53904 + return;
53905 +
53906 + do_posix_clock_monotonic_gettime(&timeval);
53907 + runtime = timeval.tv_sec - task->start_time.tv_sec;
53908 + wday = runtime / (3600 * 24);
53909 + runtime -= wday * (3600 * 24);
53910 + whr = runtime / 3600;
53911 + runtime -= whr * 3600;
53912 + wmin = runtime / 60;
53913 + runtime -= wmin * 60;
53914 + wsec = runtime;
53915 +
53916 + cputime = (task->utime + task->stime) / HZ;
53917 + cday = cputime / (3600 * 24);
53918 + cputime -= cday * (3600 * 24);
53919 + chr = cputime / 3600;
53920 + cputime -= chr * 3600;
53921 + cmin = cputime / 60;
53922 + cputime -= cmin * 60;
53923 + csec = cputime;
53924 +
53925 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
53926 +
53927 + return;
53928 +}
53929 +
53930 +void gr_set_kernel_label(struct task_struct *task)
53931 +{
53932 + if (gr_status & GR_READY) {
53933 + task->role = kernel_role;
53934 + task->acl = kernel_role->root_label;
53935 + }
53936 + return;
53937 +}
53938 +
53939 +#ifdef CONFIG_TASKSTATS
53940 +int gr_is_taskstats_denied(int pid)
53941 +{
53942 + struct task_struct *task;
53943 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53944 + const struct cred *cred;
53945 +#endif
53946 + int ret = 0;
53947 +
53948 + /* restrict taskstats viewing to un-chrooted root users
53949 + who have the 'view' subject flag if the RBAC system is enabled
53950 + */
53951 +
53952 + rcu_read_lock();
53953 + read_lock(&tasklist_lock);
53954 + task = find_task_by_vpid(pid);
53955 + if (task) {
53956 +#ifdef CONFIG_GRKERNSEC_CHROOT
53957 + if (proc_is_chrooted(task))
53958 + ret = -EACCES;
53959 +#endif
53960 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53961 + cred = __task_cred(task);
53962 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53963 + if (cred->uid != 0)
53964 + ret = -EACCES;
53965 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53966 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
53967 + ret = -EACCES;
53968 +#endif
53969 +#endif
53970 + if (gr_status & GR_READY) {
53971 + if (!(task->acl->mode & GR_VIEW))
53972 + ret = -EACCES;
53973 + }
53974 + } else
53975 + ret = -ENOENT;
53976 +
53977 + read_unlock(&tasklist_lock);
53978 + rcu_read_unlock();
53979 +
53980 + return ret;
53981 +}
53982 +#endif
53983 +
53984 +/* AUXV entries are filled via a descendant of search_binary_handler
53985 + after we've already applied the subject for the target
53986 +*/
53987 +int gr_acl_enable_at_secure(void)
53988 +{
53989 + if (unlikely(!(gr_status & GR_READY)))
53990 + return 0;
53991 +
53992 + if (current->acl->mode & GR_ATSECURE)
53993 + return 1;
53994 +
53995 + return 0;
53996 +}
53997 +
53998 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
53999 +{
54000 + struct task_struct *task = current;
54001 + struct dentry *dentry = file->f_path.dentry;
54002 + struct vfsmount *mnt = file->f_path.mnt;
54003 + struct acl_object_label *obj, *tmp;
54004 + struct acl_subject_label *subj;
54005 + unsigned int bufsize;
54006 + int is_not_root;
54007 + char *path;
54008 + dev_t dev = __get_dev(dentry);
54009 +
54010 + if (unlikely(!(gr_status & GR_READY)))
54011 + return 1;
54012 +
54013 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54014 + return 1;
54015 +
54016 + /* ignore Eric Biederman */
54017 + if (IS_PRIVATE(dentry->d_inode))
54018 + return 1;
54019 +
54020 + subj = task->acl;
54021 + do {
54022 + obj = lookup_acl_obj_label(ino, dev, subj);
54023 + if (obj != NULL)
54024 + return (obj->mode & GR_FIND) ? 1 : 0;
54025 + } while ((subj = subj->parent_subject));
54026 +
54027 + /* this is purely an optimization since we're looking for an object
54028 + for the directory we're doing a readdir on
54029 + if it's possible for any globbed object to match the entry we're
54030 + filling into the directory, then the object we find here will be
54031 + an anchor point with attached globbed objects
54032 + */
54033 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54034 + if (obj->globbed == NULL)
54035 + return (obj->mode & GR_FIND) ? 1 : 0;
54036 +
54037 + is_not_root = ((obj->filename[0] == '/') &&
54038 + (obj->filename[1] == '\0')) ? 0 : 1;
54039 + bufsize = PAGE_SIZE - namelen - is_not_root;
54040 +
54041 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54042 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54043 + return 1;
54044 +
54045 + preempt_disable();
54046 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54047 + bufsize);
54048 +
54049 + bufsize = strlen(path);
54050 +
54051 + /* if base is "/", don't append an additional slash */
54052 + if (is_not_root)
54053 + *(path + bufsize) = '/';
54054 + memcpy(path + bufsize + is_not_root, name, namelen);
54055 + *(path + bufsize + namelen + is_not_root) = '\0';
54056 +
54057 + tmp = obj->globbed;
54058 + while (tmp) {
54059 + if (!glob_match(tmp->filename, path)) {
54060 + preempt_enable();
54061 + return (tmp->mode & GR_FIND) ? 1 : 0;
54062 + }
54063 + tmp = tmp->next;
54064 + }
54065 + preempt_enable();
54066 + return (obj->mode & GR_FIND) ? 1 : 0;
54067 +}
54068 +
54069 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54070 +EXPORT_SYMBOL(gr_acl_is_enabled);
54071 +#endif
54072 +EXPORT_SYMBOL(gr_learn_resource);
54073 +EXPORT_SYMBOL(gr_set_kernel_label);
54074 +#ifdef CONFIG_SECURITY
54075 +EXPORT_SYMBOL(gr_check_user_change);
54076 +EXPORT_SYMBOL(gr_check_group_change);
54077 +#endif
54078 +
54079 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54080 new file mode 100644
54081 index 0000000..34fefda
54082 --- /dev/null
54083 +++ b/grsecurity/gracl_alloc.c
54084 @@ -0,0 +1,105 @@
54085 +#include <linux/kernel.h>
54086 +#include <linux/mm.h>
54087 +#include <linux/slab.h>
54088 +#include <linux/vmalloc.h>
54089 +#include <linux/gracl.h>
54090 +#include <linux/grsecurity.h>
54091 +
54092 +static unsigned long alloc_stack_next = 1;
54093 +static unsigned long alloc_stack_size = 1;
54094 +static void **alloc_stack;
54095 +
54096 +static __inline__ int
54097 +alloc_pop(void)
54098 +{
54099 + if (alloc_stack_next == 1)
54100 + return 0;
54101 +
54102 + kfree(alloc_stack[alloc_stack_next - 2]);
54103 +
54104 + alloc_stack_next--;
54105 +
54106 + return 1;
54107 +}
54108 +
54109 +static __inline__ int
54110 +alloc_push(void *buf)
54111 +{
54112 + if (alloc_stack_next >= alloc_stack_size)
54113 + return 1;
54114 +
54115 + alloc_stack[alloc_stack_next - 1] = buf;
54116 +
54117 + alloc_stack_next++;
54118 +
54119 + return 0;
54120 +}
54121 +
54122 +void *
54123 +acl_alloc(unsigned long len)
54124 +{
54125 + void *ret = NULL;
54126 +
54127 + if (!len || len > PAGE_SIZE)
54128 + goto out;
54129 +
54130 + ret = kmalloc(len, GFP_KERNEL);
54131 +
54132 + if (ret) {
54133 + if (alloc_push(ret)) {
54134 + kfree(ret);
54135 + ret = NULL;
54136 + }
54137 + }
54138 +
54139 +out:
54140 + return ret;
54141 +}
54142 +
54143 +void *
54144 +acl_alloc_num(unsigned long num, unsigned long len)
54145 +{
54146 + if (!len || (num > (PAGE_SIZE / len)))
54147 + return NULL;
54148 +
54149 + return acl_alloc(num * len);
54150 +}
54151 +
54152 +void
54153 +acl_free_all(void)
54154 +{
54155 + if (gr_acl_is_enabled() || !alloc_stack)
54156 + return;
54157 +
54158 + while (alloc_pop()) ;
54159 +
54160 + if (alloc_stack) {
54161 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54162 + kfree(alloc_stack);
54163 + else
54164 + vfree(alloc_stack);
54165 + }
54166 +
54167 + alloc_stack = NULL;
54168 + alloc_stack_size = 1;
54169 + alloc_stack_next = 1;
54170 +
54171 + return;
54172 +}
54173 +
54174 +int
54175 +acl_alloc_stack_init(unsigned long size)
54176 +{
54177 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54178 + alloc_stack =
54179 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54180 + else
54181 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54182 +
54183 + alloc_stack_size = size;
54184 +
54185 + if (!alloc_stack)
54186 + return 0;
54187 + else
54188 + return 1;
54189 +}
54190 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54191 new file mode 100644
54192 index 0000000..6d21049
54193 --- /dev/null
54194 +++ b/grsecurity/gracl_cap.c
54195 @@ -0,0 +1,110 @@
54196 +#include <linux/kernel.h>
54197 +#include <linux/module.h>
54198 +#include <linux/sched.h>
54199 +#include <linux/gracl.h>
54200 +#include <linux/grsecurity.h>
54201 +#include <linux/grinternal.h>
54202 +
54203 +extern const char *captab_log[];
54204 +extern int captab_log_entries;
54205 +
54206 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54207 +{
54208 + struct acl_subject_label *curracl;
54209 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54210 + kernel_cap_t cap_audit = __cap_empty_set;
54211 +
54212 + if (!gr_acl_is_enabled())
54213 + return 1;
54214 +
54215 + curracl = task->acl;
54216 +
54217 + cap_drop = curracl->cap_lower;
54218 + cap_mask = curracl->cap_mask;
54219 + cap_audit = curracl->cap_invert_audit;
54220 +
54221 + while ((curracl = curracl->parent_subject)) {
54222 + /* if the cap isn't specified in the current computed mask but is specified in the
54223 + current level subject, and is lowered in the current level subject, then add
54224 + it to the set of dropped capabilities
54225 + otherwise, add the current level subject's mask to the current computed mask
54226 + */
54227 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54228 + cap_raise(cap_mask, cap);
54229 + if (cap_raised(curracl->cap_lower, cap))
54230 + cap_raise(cap_drop, cap);
54231 + if (cap_raised(curracl->cap_invert_audit, cap))
54232 + cap_raise(cap_audit, cap);
54233 + }
54234 + }
54235 +
54236 + if (!cap_raised(cap_drop, cap)) {
54237 + if (cap_raised(cap_audit, cap))
54238 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54239 + return 1;
54240 + }
54241 +
54242 + curracl = task->acl;
54243 +
54244 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54245 + && cap_raised(cred->cap_effective, cap)) {
54246 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54247 + task->role->roletype, cred->uid,
54248 + cred->gid, task->exec_file ?
54249 + gr_to_filename(task->exec_file->f_path.dentry,
54250 + task->exec_file->f_path.mnt) : curracl->filename,
54251 + curracl->filename, 0UL,
54252 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54253 + return 1;
54254 + }
54255 +
54256 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54257 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54258 +
54259 + return 0;
54260 +}
54261 +
54262 +int
54263 +gr_acl_is_capable(const int cap)
54264 +{
54265 + return gr_task_acl_is_capable(current, current_cred(), cap);
54266 +}
54267 +
54268 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54269 +{
54270 + struct acl_subject_label *curracl;
54271 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54272 +
54273 + if (!gr_acl_is_enabled())
54274 + return 1;
54275 +
54276 + curracl = task->acl;
54277 +
54278 + cap_drop = curracl->cap_lower;
54279 + cap_mask = curracl->cap_mask;
54280 +
54281 + while ((curracl = curracl->parent_subject)) {
54282 + /* if the cap isn't specified in the current computed mask but is specified in the
54283 + current level subject, and is lowered in the current level subject, then add
54284 + it to the set of dropped capabilities
54285 + otherwise, add the current level subject's mask to the current computed mask
54286 + */
54287 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54288 + cap_raise(cap_mask, cap);
54289 + if (cap_raised(curracl->cap_lower, cap))
54290 + cap_raise(cap_drop, cap);
54291 + }
54292 + }
54293 +
54294 + if (!cap_raised(cap_drop, cap))
54295 + return 1;
54296 +
54297 + return 0;
54298 +}
54299 +
54300 +int
54301 +gr_acl_is_capable_nolog(const int cap)
54302 +{
54303 + return gr_task_acl_is_capable_nolog(current, cap);
54304 +}
54305 +
54306 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54307 new file mode 100644
54308 index 0000000..88d0e87
54309 --- /dev/null
54310 +++ b/grsecurity/gracl_fs.c
54311 @@ -0,0 +1,435 @@
54312 +#include <linux/kernel.h>
54313 +#include <linux/sched.h>
54314 +#include <linux/types.h>
54315 +#include <linux/fs.h>
54316 +#include <linux/file.h>
54317 +#include <linux/stat.h>
54318 +#include <linux/grsecurity.h>
54319 +#include <linux/grinternal.h>
54320 +#include <linux/gracl.h>
54321 +
54322 +umode_t
54323 +gr_acl_umask(void)
54324 +{
54325 + if (unlikely(!gr_acl_is_enabled()))
54326 + return 0;
54327 +
54328 + return current->role->umask;
54329 +}
54330 +
54331 +__u32
54332 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54333 + const struct vfsmount * mnt)
54334 +{
54335 + __u32 mode;
54336 +
54337 + if (unlikely(!dentry->d_inode))
54338 + return GR_FIND;
54339 +
54340 + mode =
54341 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54342 +
54343 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54344 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54345 + return mode;
54346 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54347 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54348 + return 0;
54349 + } else if (unlikely(!(mode & GR_FIND)))
54350 + return 0;
54351 +
54352 + return GR_FIND;
54353 +}
54354 +
54355 +__u32
54356 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54357 + int acc_mode)
54358 +{
54359 + __u32 reqmode = GR_FIND;
54360 + __u32 mode;
54361 +
54362 + if (unlikely(!dentry->d_inode))
54363 + return reqmode;
54364 +
54365 + if (acc_mode & MAY_APPEND)
54366 + reqmode |= GR_APPEND;
54367 + else if (acc_mode & MAY_WRITE)
54368 + reqmode |= GR_WRITE;
54369 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54370 + reqmode |= GR_READ;
54371 +
54372 + mode =
54373 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54374 + mnt);
54375 +
54376 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54377 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54378 + reqmode & GR_READ ? " reading" : "",
54379 + reqmode & GR_WRITE ? " writing" : reqmode &
54380 + GR_APPEND ? " appending" : "");
54381 + return reqmode;
54382 + } else
54383 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54384 + {
54385 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54386 + reqmode & GR_READ ? " reading" : "",
54387 + reqmode & GR_WRITE ? " writing" : reqmode &
54388 + GR_APPEND ? " appending" : "");
54389 + return 0;
54390 + } else if (unlikely((mode & reqmode) != reqmode))
54391 + return 0;
54392 +
54393 + return reqmode;
54394 +}
54395 +
54396 +__u32
54397 +gr_acl_handle_creat(const struct dentry * dentry,
54398 + const struct dentry * p_dentry,
54399 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54400 + const int imode)
54401 +{
54402 + __u32 reqmode = GR_WRITE | GR_CREATE;
54403 + __u32 mode;
54404 +
54405 + if (acc_mode & MAY_APPEND)
54406 + reqmode |= GR_APPEND;
54407 + // if a directory was required or the directory already exists, then
54408 + // don't count this open as a read
54409 + if ((acc_mode & MAY_READ) &&
54410 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54411 + reqmode |= GR_READ;
54412 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54413 + reqmode |= GR_SETID;
54414 +
54415 + mode =
54416 + gr_check_create(dentry, p_dentry, p_mnt,
54417 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54418 +
54419 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54420 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54421 + reqmode & GR_READ ? " reading" : "",
54422 + reqmode & GR_WRITE ? " writing" : reqmode &
54423 + GR_APPEND ? " appending" : "");
54424 + return reqmode;
54425 + } else
54426 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54427 + {
54428 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54429 + reqmode & GR_READ ? " reading" : "",
54430 + reqmode & GR_WRITE ? " writing" : reqmode &
54431 + GR_APPEND ? " appending" : "");
54432 + return 0;
54433 + } else if (unlikely((mode & reqmode) != reqmode))
54434 + return 0;
54435 +
54436 + return reqmode;
54437 +}
54438 +
54439 +__u32
54440 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54441 + const int fmode)
54442 +{
54443 + __u32 mode, reqmode = GR_FIND;
54444 +
54445 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54446 + reqmode |= GR_EXEC;
54447 + if (fmode & S_IWOTH)
54448 + reqmode |= GR_WRITE;
54449 + if (fmode & S_IROTH)
54450 + reqmode |= GR_READ;
54451 +
54452 + mode =
54453 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54454 + mnt);
54455 +
54456 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54457 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54458 + reqmode & GR_READ ? " reading" : "",
54459 + reqmode & GR_WRITE ? " writing" : "",
54460 + reqmode & GR_EXEC ? " executing" : "");
54461 + return reqmode;
54462 + } else
54463 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54464 + {
54465 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54466 + reqmode & GR_READ ? " reading" : "",
54467 + reqmode & GR_WRITE ? " writing" : "",
54468 + reqmode & GR_EXEC ? " executing" : "");
54469 + return 0;
54470 + } else if (unlikely((mode & reqmode) != reqmode))
54471 + return 0;
54472 +
54473 + return reqmode;
54474 +}
54475 +
54476 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54477 +{
54478 + __u32 mode;
54479 +
54480 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54481 +
54482 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54483 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54484 + return mode;
54485 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54486 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54487 + return 0;
54488 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54489 + return 0;
54490 +
54491 + return (reqmode);
54492 +}
54493 +
54494 +__u32
54495 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54496 +{
54497 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54498 +}
54499 +
54500 +__u32
54501 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54502 +{
54503 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54504 +}
54505 +
54506 +__u32
54507 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54508 +{
54509 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54510 +}
54511 +
54512 +__u32
54513 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54514 +{
54515 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54516 +}
54517 +
54518 +__u32
54519 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54520 + umode_t *modeptr)
54521 +{
54522 + umode_t mode;
54523 +
54524 + *modeptr &= ~gr_acl_umask();
54525 + mode = *modeptr;
54526 +
54527 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54528 + return 1;
54529 +
54530 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
54531 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54532 + GR_CHMOD_ACL_MSG);
54533 + } else {
54534 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54535 + }
54536 +}
54537 +
54538 +__u32
54539 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54540 +{
54541 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54542 +}
54543 +
54544 +__u32
54545 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54546 +{
54547 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54548 +}
54549 +
54550 +__u32
54551 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54552 +{
54553 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54554 +}
54555 +
54556 +__u32
54557 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54558 +{
54559 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54560 + GR_UNIXCONNECT_ACL_MSG);
54561 +}
54562 +
54563 +/* hardlinks require at minimum create and link permission,
54564 + any additional privilege required is based on the
54565 + privilege of the file being linked to
54566 +*/
54567 +__u32
54568 +gr_acl_handle_link(const struct dentry * new_dentry,
54569 + const struct dentry * parent_dentry,
54570 + const struct vfsmount * parent_mnt,
54571 + const struct dentry * old_dentry,
54572 + const struct vfsmount * old_mnt, const char *to)
54573 +{
54574 + __u32 mode;
54575 + __u32 needmode = GR_CREATE | GR_LINK;
54576 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54577 +
54578 + mode =
54579 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54580 + old_mnt);
54581 +
54582 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54583 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54584 + return mode;
54585 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54586 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54587 + return 0;
54588 + } else if (unlikely((mode & needmode) != needmode))
54589 + return 0;
54590 +
54591 + return 1;
54592 +}
54593 +
54594 +__u32
54595 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54596 + const struct dentry * parent_dentry,
54597 + const struct vfsmount * parent_mnt, const char *from)
54598 +{
54599 + __u32 needmode = GR_WRITE | GR_CREATE;
54600 + __u32 mode;
54601 +
54602 + mode =
54603 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
54604 + GR_CREATE | GR_AUDIT_CREATE |
54605 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54606 +
54607 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54608 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54609 + return mode;
54610 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54611 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54612 + return 0;
54613 + } else if (unlikely((mode & needmode) != needmode))
54614 + return 0;
54615 +
54616 + return (GR_WRITE | GR_CREATE);
54617 +}
54618 +
54619 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54620 +{
54621 + __u32 mode;
54622 +
54623 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54624 +
54625 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54626 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54627 + return mode;
54628 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54629 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54630 + return 0;
54631 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54632 + return 0;
54633 +
54634 + return (reqmode);
54635 +}
54636 +
54637 +__u32
54638 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54639 + const struct dentry * parent_dentry,
54640 + const struct vfsmount * parent_mnt,
54641 + const int mode)
54642 +{
54643 + __u32 reqmode = GR_WRITE | GR_CREATE;
54644 + if (unlikely(mode & (S_ISUID | S_ISGID)))
54645 + reqmode |= GR_SETID;
54646 +
54647 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54648 + reqmode, GR_MKNOD_ACL_MSG);
54649 +}
54650 +
54651 +__u32
54652 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
54653 + const struct dentry *parent_dentry,
54654 + const struct vfsmount *parent_mnt)
54655 +{
54656 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54657 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54658 +}
54659 +
54660 +#define RENAME_CHECK_SUCCESS(old, new) \
54661 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54662 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54663 +
54664 +int
54665 +gr_acl_handle_rename(struct dentry *new_dentry,
54666 + struct dentry *parent_dentry,
54667 + const struct vfsmount *parent_mnt,
54668 + struct dentry *old_dentry,
54669 + struct inode *old_parent_inode,
54670 + struct vfsmount *old_mnt, const char *newname)
54671 +{
54672 + __u32 comp1, comp2;
54673 + int error = 0;
54674 +
54675 + if (unlikely(!gr_acl_is_enabled()))
54676 + return 0;
54677 +
54678 + if (!new_dentry->d_inode) {
54679 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54680 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54681 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54682 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54683 + GR_DELETE | GR_AUDIT_DELETE |
54684 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54685 + GR_SUPPRESS, old_mnt);
54686 + } else {
54687 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54688 + GR_CREATE | GR_DELETE |
54689 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54690 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54691 + GR_SUPPRESS, parent_mnt);
54692 + comp2 =
54693 + gr_search_file(old_dentry,
54694 + GR_READ | GR_WRITE | GR_AUDIT_READ |
54695 + GR_DELETE | GR_AUDIT_DELETE |
54696 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54697 + }
54698 +
54699 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54700 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54701 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54702 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54703 + && !(comp2 & GR_SUPPRESS)) {
54704 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54705 + error = -EACCES;
54706 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54707 + error = -EACCES;
54708 +
54709 + return error;
54710 +}
54711 +
54712 +void
54713 +gr_acl_handle_exit(void)
54714 +{
54715 + u16 id;
54716 + char *rolename;
54717 + struct file *exec_file;
54718 +
54719 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54720 + !(current->role->roletype & GR_ROLE_PERSIST))) {
54721 + id = current->acl_role_id;
54722 + rolename = current->role->rolename;
54723 + gr_set_acls(1);
54724 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54725 + }
54726 +
54727 + write_lock(&grsec_exec_file_lock);
54728 + exec_file = current->exec_file;
54729 + current->exec_file = NULL;
54730 + write_unlock(&grsec_exec_file_lock);
54731 +
54732 + if (exec_file)
54733 + fput(exec_file);
54734 +}
54735 +
54736 +int
54737 +gr_acl_handle_procpidmem(const struct task_struct *task)
54738 +{
54739 + if (unlikely(!gr_acl_is_enabled()))
54740 + return 0;
54741 +
54742 + if (task != current && task->acl->mode & GR_PROTPROCFD)
54743 + return -EACCES;
54744 +
54745 + return 0;
54746 +}
54747 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54748 new file mode 100644
54749 index 0000000..58800a7
54750 --- /dev/null
54751 +++ b/grsecurity/gracl_ip.c
54752 @@ -0,0 +1,384 @@
54753 +#include <linux/kernel.h>
54754 +#include <asm/uaccess.h>
54755 +#include <asm/errno.h>
54756 +#include <net/sock.h>
54757 +#include <linux/file.h>
54758 +#include <linux/fs.h>
54759 +#include <linux/net.h>
54760 +#include <linux/in.h>
54761 +#include <linux/skbuff.h>
54762 +#include <linux/ip.h>
54763 +#include <linux/udp.h>
54764 +#include <linux/types.h>
54765 +#include <linux/sched.h>
54766 +#include <linux/netdevice.h>
54767 +#include <linux/inetdevice.h>
54768 +#include <linux/gracl.h>
54769 +#include <linux/grsecurity.h>
54770 +#include <linux/grinternal.h>
54771 +
54772 +#define GR_BIND 0x01
54773 +#define GR_CONNECT 0x02
54774 +#define GR_INVERT 0x04
54775 +#define GR_BINDOVERRIDE 0x08
54776 +#define GR_CONNECTOVERRIDE 0x10
54777 +#define GR_SOCK_FAMILY 0x20
54778 +
54779 +static const char * gr_protocols[IPPROTO_MAX] = {
54780 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54781 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54782 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54783 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54784 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54785 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54786 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54787 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54788 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54789 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54790 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54791 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54792 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54793 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54794 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54795 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54796 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54797 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54798 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54799 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54800 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54801 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54802 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54803 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54804 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54805 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54806 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54807 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54808 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54809 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54810 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54811 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54812 + };
54813 +
54814 +static const char * gr_socktypes[SOCK_MAX] = {
54815 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54816 + "unknown:7", "unknown:8", "unknown:9", "packet"
54817 + };
54818 +
54819 +static const char * gr_sockfamilies[AF_MAX+1] = {
54820 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54821 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54822 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54823 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54824 + };
54825 +
54826 +const char *
54827 +gr_proto_to_name(unsigned char proto)
54828 +{
54829 + return gr_protocols[proto];
54830 +}
54831 +
54832 +const char *
54833 +gr_socktype_to_name(unsigned char type)
54834 +{
54835 + return gr_socktypes[type];
54836 +}
54837 +
54838 +const char *
54839 +gr_sockfamily_to_name(unsigned char family)
54840 +{
54841 + return gr_sockfamilies[family];
54842 +}
54843 +
54844 +int
54845 +gr_search_socket(const int domain, const int type, const int protocol)
54846 +{
54847 + struct acl_subject_label *curr;
54848 + const struct cred *cred = current_cred();
54849 +
54850 + if (unlikely(!gr_acl_is_enabled()))
54851 + goto exit;
54852 +
54853 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
54854 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54855 + goto exit; // let the kernel handle it
54856 +
54857 + curr = current->acl;
54858 +
54859 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
54860 + /* the family is allowed, if this is PF_INET allow it only if
54861 + the extra sock type/protocol checks pass */
54862 + if (domain == PF_INET)
54863 + goto inet_check;
54864 + goto exit;
54865 + } else {
54866 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54867 + __u32 fakeip = 0;
54868 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54869 + current->role->roletype, cred->uid,
54870 + cred->gid, current->exec_file ?
54871 + gr_to_filename(current->exec_file->f_path.dentry,
54872 + current->exec_file->f_path.mnt) :
54873 + curr->filename, curr->filename,
54874 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
54875 + &current->signal->saved_ip);
54876 + goto exit;
54877 + }
54878 + goto exit_fail;
54879 + }
54880 +
54881 +inet_check:
54882 + /* the rest of this checking is for IPv4 only */
54883 + if (!curr->ips)
54884 + goto exit;
54885 +
54886 + if ((curr->ip_type & (1 << type)) &&
54887 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
54888 + goto exit;
54889 +
54890 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54891 + /* we don't place acls on raw sockets , and sometimes
54892 + dgram/ip sockets are opened for ioctl and not
54893 + bind/connect, so we'll fake a bind learn log */
54894 + if (type == SOCK_RAW || type == SOCK_PACKET) {
54895 + __u32 fakeip = 0;
54896 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54897 + current->role->roletype, cred->uid,
54898 + cred->gid, current->exec_file ?
54899 + gr_to_filename(current->exec_file->f_path.dentry,
54900 + current->exec_file->f_path.mnt) :
54901 + curr->filename, curr->filename,
54902 + &fakeip, 0, type,
54903 + protocol, GR_CONNECT, &current->signal->saved_ip);
54904 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
54905 + __u32 fakeip = 0;
54906 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54907 + current->role->roletype, cred->uid,
54908 + cred->gid, current->exec_file ?
54909 + gr_to_filename(current->exec_file->f_path.dentry,
54910 + current->exec_file->f_path.mnt) :
54911 + curr->filename, curr->filename,
54912 + &fakeip, 0, type,
54913 + protocol, GR_BIND, &current->signal->saved_ip);
54914 + }
54915 + /* we'll log when they use connect or bind */
54916 + goto exit;
54917 + }
54918 +
54919 +exit_fail:
54920 + if (domain == PF_INET)
54921 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
54922 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
54923 + else
54924 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
54925 + gr_socktype_to_name(type), protocol);
54926 +
54927 + return 0;
54928 +exit:
54929 + return 1;
54930 +}
54931 +
54932 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
54933 +{
54934 + if ((ip->mode & mode) &&
54935 + (ip_port >= ip->low) &&
54936 + (ip_port <= ip->high) &&
54937 + ((ntohl(ip_addr) & our_netmask) ==
54938 + (ntohl(our_addr) & our_netmask))
54939 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
54940 + && (ip->type & (1 << type))) {
54941 + if (ip->mode & GR_INVERT)
54942 + return 2; // specifically denied
54943 + else
54944 + return 1; // allowed
54945 + }
54946 +
54947 + return 0; // not specifically allowed, may continue parsing
54948 +}
54949 +
54950 +static int
54951 +gr_search_connectbind(const int full_mode, struct sock *sk,
54952 + struct sockaddr_in *addr, const int type)
54953 +{
54954 + char iface[IFNAMSIZ] = {0};
54955 + struct acl_subject_label *curr;
54956 + struct acl_ip_label *ip;
54957 + struct inet_sock *isk;
54958 + struct net_device *dev;
54959 + struct in_device *idev;
54960 + unsigned long i;
54961 + int ret;
54962 + int mode = full_mode & (GR_BIND | GR_CONNECT);
54963 + __u32 ip_addr = 0;
54964 + __u32 our_addr;
54965 + __u32 our_netmask;
54966 + char *p;
54967 + __u16 ip_port = 0;
54968 + const struct cred *cred = current_cred();
54969 +
54970 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
54971 + return 0;
54972 +
54973 + curr = current->acl;
54974 + isk = inet_sk(sk);
54975 +
54976 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
54977 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
54978 + addr->sin_addr.s_addr = curr->inaddr_any_override;
54979 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
54980 + struct sockaddr_in saddr;
54981 + int err;
54982 +
54983 + saddr.sin_family = AF_INET;
54984 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
54985 + saddr.sin_port = isk->inet_sport;
54986 +
54987 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54988 + if (err)
54989 + return err;
54990 +
54991 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54992 + if (err)
54993 + return err;
54994 + }
54995 +
54996 + if (!curr->ips)
54997 + return 0;
54998 +
54999 + ip_addr = addr->sin_addr.s_addr;
55000 + ip_port = ntohs(addr->sin_port);
55001 +
55002 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55003 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55004 + current->role->roletype, cred->uid,
55005 + cred->gid, current->exec_file ?
55006 + gr_to_filename(current->exec_file->f_path.dentry,
55007 + current->exec_file->f_path.mnt) :
55008 + curr->filename, curr->filename,
55009 + &ip_addr, ip_port, type,
55010 + sk->sk_protocol, mode, &current->signal->saved_ip);
55011 + return 0;
55012 + }
55013 +
55014 + for (i = 0; i < curr->ip_num; i++) {
55015 + ip = *(curr->ips + i);
55016 + if (ip->iface != NULL) {
55017 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55018 + p = strchr(iface, ':');
55019 + if (p != NULL)
55020 + *p = '\0';
55021 + dev = dev_get_by_name(sock_net(sk), iface);
55022 + if (dev == NULL)
55023 + continue;
55024 + idev = in_dev_get(dev);
55025 + if (idev == NULL) {
55026 + dev_put(dev);
55027 + continue;
55028 + }
55029 + rcu_read_lock();
55030 + for_ifa(idev) {
55031 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55032 + our_addr = ifa->ifa_address;
55033 + our_netmask = 0xffffffff;
55034 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55035 + if (ret == 1) {
55036 + rcu_read_unlock();
55037 + in_dev_put(idev);
55038 + dev_put(dev);
55039 + return 0;
55040 + } else if (ret == 2) {
55041 + rcu_read_unlock();
55042 + in_dev_put(idev);
55043 + dev_put(dev);
55044 + goto denied;
55045 + }
55046 + }
55047 + } endfor_ifa(idev);
55048 + rcu_read_unlock();
55049 + in_dev_put(idev);
55050 + dev_put(dev);
55051 + } else {
55052 + our_addr = ip->addr;
55053 + our_netmask = ip->netmask;
55054 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55055 + if (ret == 1)
55056 + return 0;
55057 + else if (ret == 2)
55058 + goto denied;
55059 + }
55060 + }
55061 +
55062 +denied:
55063 + if (mode == GR_BIND)
55064 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55065 + else if (mode == GR_CONNECT)
55066 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55067 +
55068 + return -EACCES;
55069 +}
55070 +
55071 +int
55072 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55073 +{
55074 + /* always allow disconnection of dgram sockets with connect */
55075 + if (addr->sin_family == AF_UNSPEC)
55076 + return 0;
55077 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55078 +}
55079 +
55080 +int
55081 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55082 +{
55083 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55084 +}
55085 +
55086 +int gr_search_listen(struct socket *sock)
55087 +{
55088 + struct sock *sk = sock->sk;
55089 + struct sockaddr_in addr;
55090 +
55091 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55092 + addr.sin_port = inet_sk(sk)->inet_sport;
55093 +
55094 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55095 +}
55096 +
55097 +int gr_search_accept(struct socket *sock)
55098 +{
55099 + struct sock *sk = sock->sk;
55100 + struct sockaddr_in addr;
55101 +
55102 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55103 + addr.sin_port = inet_sk(sk)->inet_sport;
55104 +
55105 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55106 +}
55107 +
55108 +int
55109 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55110 +{
55111 + if (addr)
55112 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55113 + else {
55114 + struct sockaddr_in sin;
55115 + const struct inet_sock *inet = inet_sk(sk);
55116 +
55117 + sin.sin_addr.s_addr = inet->inet_daddr;
55118 + sin.sin_port = inet->inet_dport;
55119 +
55120 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55121 + }
55122 +}
55123 +
55124 +int
55125 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55126 +{
55127 + struct sockaddr_in sin;
55128 +
55129 + if (unlikely(skb->len < sizeof (struct udphdr)))
55130 + return 0; // skip this packet
55131 +
55132 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55133 + sin.sin_port = udp_hdr(skb)->source;
55134 +
55135 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55136 +}
55137 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55138 new file mode 100644
55139 index 0000000..25f54ef
55140 --- /dev/null
55141 +++ b/grsecurity/gracl_learn.c
55142 @@ -0,0 +1,207 @@
55143 +#include <linux/kernel.h>
55144 +#include <linux/mm.h>
55145 +#include <linux/sched.h>
55146 +#include <linux/poll.h>
55147 +#include <linux/string.h>
55148 +#include <linux/file.h>
55149 +#include <linux/types.h>
55150 +#include <linux/vmalloc.h>
55151 +#include <linux/grinternal.h>
55152 +
55153 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55154 + size_t count, loff_t *ppos);
55155 +extern int gr_acl_is_enabled(void);
55156 +
55157 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55158 +static int gr_learn_attached;
55159 +
55160 +/* use a 512k buffer */
55161 +#define LEARN_BUFFER_SIZE (512 * 1024)
55162 +
55163 +static DEFINE_SPINLOCK(gr_learn_lock);
55164 +static DEFINE_MUTEX(gr_learn_user_mutex);
55165 +
55166 +/* we need to maintain two buffers, so that the kernel context of grlearn
55167 + uses a semaphore around the userspace copying, and the other kernel contexts
55168 + use a spinlock when copying into the buffer, since they cannot sleep
55169 +*/
55170 +static char *learn_buffer;
55171 +static char *learn_buffer_user;
55172 +static int learn_buffer_len;
55173 +static int learn_buffer_user_len;
55174 +
55175 +static ssize_t
55176 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55177 +{
55178 + DECLARE_WAITQUEUE(wait, current);
55179 + ssize_t retval = 0;
55180 +
55181 + add_wait_queue(&learn_wait, &wait);
55182 + set_current_state(TASK_INTERRUPTIBLE);
55183 + do {
55184 + mutex_lock(&gr_learn_user_mutex);
55185 + spin_lock(&gr_learn_lock);
55186 + if (learn_buffer_len)
55187 + break;
55188 + spin_unlock(&gr_learn_lock);
55189 + mutex_unlock(&gr_learn_user_mutex);
55190 + if (file->f_flags & O_NONBLOCK) {
55191 + retval = -EAGAIN;
55192 + goto out;
55193 + }
55194 + if (signal_pending(current)) {
55195 + retval = -ERESTARTSYS;
55196 + goto out;
55197 + }
55198 +
55199 + schedule();
55200 + } while (1);
55201 +
55202 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55203 + learn_buffer_user_len = learn_buffer_len;
55204 + retval = learn_buffer_len;
55205 + learn_buffer_len = 0;
55206 +
55207 + spin_unlock(&gr_learn_lock);
55208 +
55209 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55210 + retval = -EFAULT;
55211 +
55212 + mutex_unlock(&gr_learn_user_mutex);
55213 +out:
55214 + set_current_state(TASK_RUNNING);
55215 + remove_wait_queue(&learn_wait, &wait);
55216 + return retval;
55217 +}
55218 +
55219 +static unsigned int
55220 +poll_learn(struct file * file, poll_table * wait)
55221 +{
55222 + poll_wait(file, &learn_wait, wait);
55223 +
55224 + if (learn_buffer_len)
55225 + return (POLLIN | POLLRDNORM);
55226 +
55227 + return 0;
55228 +}
55229 +
55230 +void
55231 +gr_clear_learn_entries(void)
55232 +{
55233 + char *tmp;
55234 +
55235 + mutex_lock(&gr_learn_user_mutex);
55236 + spin_lock(&gr_learn_lock);
55237 + tmp = learn_buffer;
55238 + learn_buffer = NULL;
55239 + spin_unlock(&gr_learn_lock);
55240 + if (tmp)
55241 + vfree(tmp);
55242 + if (learn_buffer_user != NULL) {
55243 + vfree(learn_buffer_user);
55244 + learn_buffer_user = NULL;
55245 + }
55246 + learn_buffer_len = 0;
55247 + mutex_unlock(&gr_learn_user_mutex);
55248 +
55249 + return;
55250 +}
55251 +
55252 +void
55253 +gr_add_learn_entry(const char *fmt, ...)
55254 +{
55255 + va_list args;
55256 + unsigned int len;
55257 +
55258 + if (!gr_learn_attached)
55259 + return;
55260 +
55261 + spin_lock(&gr_learn_lock);
55262 +
55263 + /* leave a gap at the end so we know when it's "full" but don't have to
55264 + compute the exact length of the string we're trying to append
55265 + */
55266 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55267 + spin_unlock(&gr_learn_lock);
55268 + wake_up_interruptible(&learn_wait);
55269 + return;
55270 + }
55271 + if (learn_buffer == NULL) {
55272 + spin_unlock(&gr_learn_lock);
55273 + return;
55274 + }
55275 +
55276 + va_start(args, fmt);
55277 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55278 + va_end(args);
55279 +
55280 + learn_buffer_len += len + 1;
55281 +
55282 + spin_unlock(&gr_learn_lock);
55283 + wake_up_interruptible(&learn_wait);
55284 +
55285 + return;
55286 +}
55287 +
55288 +static int
55289 +open_learn(struct inode *inode, struct file *file)
55290 +{
55291 + if (file->f_mode & FMODE_READ && gr_learn_attached)
55292 + return -EBUSY;
55293 + if (file->f_mode & FMODE_READ) {
55294 + int retval = 0;
55295 + mutex_lock(&gr_learn_user_mutex);
55296 + if (learn_buffer == NULL)
55297 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55298 + if (learn_buffer_user == NULL)
55299 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55300 + if (learn_buffer == NULL) {
55301 + retval = -ENOMEM;
55302 + goto out_error;
55303 + }
55304 + if (learn_buffer_user == NULL) {
55305 + retval = -ENOMEM;
55306 + goto out_error;
55307 + }
55308 + learn_buffer_len = 0;
55309 + learn_buffer_user_len = 0;
55310 + gr_learn_attached = 1;
55311 +out_error:
55312 + mutex_unlock(&gr_learn_user_mutex);
55313 + return retval;
55314 + }
55315 + return 0;
55316 +}
55317 +
55318 +static int
55319 +close_learn(struct inode *inode, struct file *file)
55320 +{
55321 + if (file->f_mode & FMODE_READ) {
55322 + char *tmp = NULL;
55323 + mutex_lock(&gr_learn_user_mutex);
55324 + spin_lock(&gr_learn_lock);
55325 + tmp = learn_buffer;
55326 + learn_buffer = NULL;
55327 + spin_unlock(&gr_learn_lock);
55328 + if (tmp)
55329 + vfree(tmp);
55330 + if (learn_buffer_user != NULL) {
55331 + vfree(learn_buffer_user);
55332 + learn_buffer_user = NULL;
55333 + }
55334 + learn_buffer_len = 0;
55335 + learn_buffer_user_len = 0;
55336 + gr_learn_attached = 0;
55337 + mutex_unlock(&gr_learn_user_mutex);
55338 + }
55339 +
55340 + return 0;
55341 +}
55342 +
55343 +const struct file_operations grsec_fops = {
55344 + .read = read_learn,
55345 + .write = write_grsec_handler,
55346 + .open = open_learn,
55347 + .release = close_learn,
55348 + .poll = poll_learn,
55349 +};
55350 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55351 new file mode 100644
55352 index 0000000..39645c9
55353 --- /dev/null
55354 +++ b/grsecurity/gracl_res.c
55355 @@ -0,0 +1,68 @@
55356 +#include <linux/kernel.h>
55357 +#include <linux/sched.h>
55358 +#include <linux/gracl.h>
55359 +#include <linux/grinternal.h>
55360 +
55361 +static const char *restab_log[] = {
55362 + [RLIMIT_CPU] = "RLIMIT_CPU",
55363 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55364 + [RLIMIT_DATA] = "RLIMIT_DATA",
55365 + [RLIMIT_STACK] = "RLIMIT_STACK",
55366 + [RLIMIT_CORE] = "RLIMIT_CORE",
55367 + [RLIMIT_RSS] = "RLIMIT_RSS",
55368 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55369 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55370 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55371 + [RLIMIT_AS] = "RLIMIT_AS",
55372 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55373 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55374 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55375 + [RLIMIT_NICE] = "RLIMIT_NICE",
55376 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55377 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55378 + [GR_CRASH_RES] = "RLIMIT_CRASH"
55379 +};
55380 +
55381 +void
55382 +gr_log_resource(const struct task_struct *task,
55383 + const int res, const unsigned long wanted, const int gt)
55384 +{
55385 + const struct cred *cred;
55386 + unsigned long rlim;
55387 +
55388 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
55389 + return;
55390 +
55391 + // not yet supported resource
55392 + if (unlikely(!restab_log[res]))
55393 + return;
55394 +
55395 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55396 + rlim = task_rlimit_max(task, res);
55397 + else
55398 + rlim = task_rlimit(task, res);
55399 +
55400 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55401 + return;
55402 +
55403 + rcu_read_lock();
55404 + cred = __task_cred(task);
55405 +
55406 + if (res == RLIMIT_NPROC &&
55407 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55408 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55409 + goto out_rcu_unlock;
55410 + else if (res == RLIMIT_MEMLOCK &&
55411 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55412 + goto out_rcu_unlock;
55413 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55414 + goto out_rcu_unlock;
55415 + rcu_read_unlock();
55416 +
55417 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55418 +
55419 + return;
55420 +out_rcu_unlock:
55421 + rcu_read_unlock();
55422 + return;
55423 +}
55424 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55425 new file mode 100644
55426 index 0000000..5556be3
55427 --- /dev/null
55428 +++ b/grsecurity/gracl_segv.c
55429 @@ -0,0 +1,299 @@
55430 +#include <linux/kernel.h>
55431 +#include <linux/mm.h>
55432 +#include <asm/uaccess.h>
55433 +#include <asm/errno.h>
55434 +#include <asm/mman.h>
55435 +#include <net/sock.h>
55436 +#include <linux/file.h>
55437 +#include <linux/fs.h>
55438 +#include <linux/net.h>
55439 +#include <linux/in.h>
55440 +#include <linux/slab.h>
55441 +#include <linux/types.h>
55442 +#include <linux/sched.h>
55443 +#include <linux/timer.h>
55444 +#include <linux/gracl.h>
55445 +#include <linux/grsecurity.h>
55446 +#include <linux/grinternal.h>
55447 +
55448 +static struct crash_uid *uid_set;
55449 +static unsigned short uid_used;
55450 +static DEFINE_SPINLOCK(gr_uid_lock);
55451 +extern rwlock_t gr_inode_lock;
55452 +extern struct acl_subject_label *
55453 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55454 + struct acl_role_label *role);
55455 +
55456 +#ifdef CONFIG_BTRFS_FS
55457 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55458 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55459 +#endif
55460 +
55461 +static inline dev_t __get_dev(const struct dentry *dentry)
55462 +{
55463 +#ifdef CONFIG_BTRFS_FS
55464 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55465 + return get_btrfs_dev_from_inode(dentry->d_inode);
55466 + else
55467 +#endif
55468 + return dentry->d_inode->i_sb->s_dev;
55469 +}
55470 +
55471 +int
55472 +gr_init_uidset(void)
55473 +{
55474 + uid_set =
55475 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55476 + uid_used = 0;
55477 +
55478 + return uid_set ? 1 : 0;
55479 +}
55480 +
55481 +void
55482 +gr_free_uidset(void)
55483 +{
55484 + if (uid_set)
55485 + kfree(uid_set);
55486 +
55487 + return;
55488 +}
55489 +
55490 +int
55491 +gr_find_uid(const uid_t uid)
55492 +{
55493 + struct crash_uid *tmp = uid_set;
55494 + uid_t buid;
55495 + int low = 0, high = uid_used - 1, mid;
55496 +
55497 + while (high >= low) {
55498 + mid = (low + high) >> 1;
55499 + buid = tmp[mid].uid;
55500 + if (buid == uid)
55501 + return mid;
55502 + if (buid > uid)
55503 + high = mid - 1;
55504 + if (buid < uid)
55505 + low = mid + 1;
55506 + }
55507 +
55508 + return -1;
55509 +}
55510 +
55511 +static __inline__ void
55512 +gr_insertsort(void)
55513 +{
55514 + unsigned short i, j;
55515 + struct crash_uid index;
55516 +
55517 + for (i = 1; i < uid_used; i++) {
55518 + index = uid_set[i];
55519 + j = i;
55520 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55521 + uid_set[j] = uid_set[j - 1];
55522 + j--;
55523 + }
55524 + uid_set[j] = index;
55525 + }
55526 +
55527 + return;
55528 +}
55529 +
55530 +static __inline__ void
55531 +gr_insert_uid(const uid_t uid, const unsigned long expires)
55532 +{
55533 + int loc;
55534 +
55535 + if (uid_used == GR_UIDTABLE_MAX)
55536 + return;
55537 +
55538 + loc = gr_find_uid(uid);
55539 +
55540 + if (loc >= 0) {
55541 + uid_set[loc].expires = expires;
55542 + return;
55543 + }
55544 +
55545 + uid_set[uid_used].uid = uid;
55546 + uid_set[uid_used].expires = expires;
55547 + uid_used++;
55548 +
55549 + gr_insertsort();
55550 +
55551 + return;
55552 +}
55553 +
55554 +void
55555 +gr_remove_uid(const unsigned short loc)
55556 +{
55557 + unsigned short i;
55558 +
55559 + for (i = loc + 1; i < uid_used; i++)
55560 + uid_set[i - 1] = uid_set[i];
55561 +
55562 + uid_used--;
55563 +
55564 + return;
55565 +}
55566 +
55567 +int
55568 +gr_check_crash_uid(const uid_t uid)
55569 +{
55570 + int loc;
55571 + int ret = 0;
55572 +
55573 + if (unlikely(!gr_acl_is_enabled()))
55574 + return 0;
55575 +
55576 + spin_lock(&gr_uid_lock);
55577 + loc = gr_find_uid(uid);
55578 +
55579 + if (loc < 0)
55580 + goto out_unlock;
55581 +
55582 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
55583 + gr_remove_uid(loc);
55584 + else
55585 + ret = 1;
55586 +
55587 +out_unlock:
55588 + spin_unlock(&gr_uid_lock);
55589 + return ret;
55590 +}
55591 +
55592 +static __inline__ int
55593 +proc_is_setxid(const struct cred *cred)
55594 +{
55595 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
55596 + cred->uid != cred->fsuid)
55597 + return 1;
55598 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55599 + cred->gid != cred->fsgid)
55600 + return 1;
55601 +
55602 + return 0;
55603 +}
55604 +
55605 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
55606 +
55607 +void
55608 +gr_handle_crash(struct task_struct *task, const int sig)
55609 +{
55610 + struct acl_subject_label *curr;
55611 + struct task_struct *tsk, *tsk2;
55612 + const struct cred *cred;
55613 + const struct cred *cred2;
55614 +
55615 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55616 + return;
55617 +
55618 + if (unlikely(!gr_acl_is_enabled()))
55619 + return;
55620 +
55621 + curr = task->acl;
55622 +
55623 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
55624 + return;
55625 +
55626 + if (time_before_eq(curr->expires, get_seconds())) {
55627 + curr->expires = 0;
55628 + curr->crashes = 0;
55629 + }
55630 +
55631 + curr->crashes++;
55632 +
55633 + if (!curr->expires)
55634 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55635 +
55636 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55637 + time_after(curr->expires, get_seconds())) {
55638 + rcu_read_lock();
55639 + cred = __task_cred(task);
55640 + if (cred->uid && proc_is_setxid(cred)) {
55641 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55642 + spin_lock(&gr_uid_lock);
55643 + gr_insert_uid(cred->uid, curr->expires);
55644 + spin_unlock(&gr_uid_lock);
55645 + curr->expires = 0;
55646 + curr->crashes = 0;
55647 + read_lock(&tasklist_lock);
55648 + do_each_thread(tsk2, tsk) {
55649 + cred2 = __task_cred(tsk);
55650 + if (tsk != task && cred2->uid == cred->uid)
55651 + gr_fake_force_sig(SIGKILL, tsk);
55652 + } while_each_thread(tsk2, tsk);
55653 + read_unlock(&tasklist_lock);
55654 + } else {
55655 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55656 + read_lock(&tasklist_lock);
55657 + read_lock(&grsec_exec_file_lock);
55658 + do_each_thread(tsk2, tsk) {
55659 + if (likely(tsk != task)) {
55660 + // if this thread has the same subject as the one that triggered
55661 + // RES_CRASH and it's the same binary, kill it
55662 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55663 + gr_fake_force_sig(SIGKILL, tsk);
55664 + }
55665 + } while_each_thread(tsk2, tsk);
55666 + read_unlock(&grsec_exec_file_lock);
55667 + read_unlock(&tasklist_lock);
55668 + }
55669 + rcu_read_unlock();
55670 + }
55671 +
55672 + return;
55673 +}
55674 +
55675 +int
55676 +gr_check_crash_exec(const struct file *filp)
55677 +{
55678 + struct acl_subject_label *curr;
55679 +
55680 + if (unlikely(!gr_acl_is_enabled()))
55681 + return 0;
55682 +
55683 + read_lock(&gr_inode_lock);
55684 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55685 + __get_dev(filp->f_path.dentry),
55686 + current->role);
55687 + read_unlock(&gr_inode_lock);
55688 +
55689 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55690 + (!curr->crashes && !curr->expires))
55691 + return 0;
55692 +
55693 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55694 + time_after(curr->expires, get_seconds()))
55695 + return 1;
55696 + else if (time_before_eq(curr->expires, get_seconds())) {
55697 + curr->crashes = 0;
55698 + curr->expires = 0;
55699 + }
55700 +
55701 + return 0;
55702 +}
55703 +
55704 +void
55705 +gr_handle_alertkill(struct task_struct *task)
55706 +{
55707 + struct acl_subject_label *curracl;
55708 + __u32 curr_ip;
55709 + struct task_struct *p, *p2;
55710 +
55711 + if (unlikely(!gr_acl_is_enabled()))
55712 + return;
55713 +
55714 + curracl = task->acl;
55715 + curr_ip = task->signal->curr_ip;
55716 +
55717 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55718 + read_lock(&tasklist_lock);
55719 + do_each_thread(p2, p) {
55720 + if (p->signal->curr_ip == curr_ip)
55721 + gr_fake_force_sig(SIGKILL, p);
55722 + } while_each_thread(p2, p);
55723 + read_unlock(&tasklist_lock);
55724 + } else if (curracl->mode & GR_KILLPROC)
55725 + gr_fake_force_sig(SIGKILL, task);
55726 +
55727 + return;
55728 +}
55729 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55730 new file mode 100644
55731 index 0000000..9d83a69
55732 --- /dev/null
55733 +++ b/grsecurity/gracl_shm.c
55734 @@ -0,0 +1,40 @@
55735 +#include <linux/kernel.h>
55736 +#include <linux/mm.h>
55737 +#include <linux/sched.h>
55738 +#include <linux/file.h>
55739 +#include <linux/ipc.h>
55740 +#include <linux/gracl.h>
55741 +#include <linux/grsecurity.h>
55742 +#include <linux/grinternal.h>
55743 +
55744 +int
55745 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55746 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55747 +{
55748 + struct task_struct *task;
55749 +
55750 + if (!gr_acl_is_enabled())
55751 + return 1;
55752 +
55753 + rcu_read_lock();
55754 + read_lock(&tasklist_lock);
55755 +
55756 + task = find_task_by_vpid(shm_cprid);
55757 +
55758 + if (unlikely(!task))
55759 + task = find_task_by_vpid(shm_lapid);
55760 +
55761 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55762 + (task->pid == shm_lapid)) &&
55763 + (task->acl->mode & GR_PROTSHM) &&
55764 + (task->acl != current->acl))) {
55765 + read_unlock(&tasklist_lock);
55766 + rcu_read_unlock();
55767 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55768 + return 0;
55769 + }
55770 + read_unlock(&tasklist_lock);
55771 + rcu_read_unlock();
55772 +
55773 + return 1;
55774 +}
55775 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55776 new file mode 100644
55777 index 0000000..bc0be01
55778 --- /dev/null
55779 +++ b/grsecurity/grsec_chdir.c
55780 @@ -0,0 +1,19 @@
55781 +#include <linux/kernel.h>
55782 +#include <linux/sched.h>
55783 +#include <linux/fs.h>
55784 +#include <linux/file.h>
55785 +#include <linux/grsecurity.h>
55786 +#include <linux/grinternal.h>
55787 +
55788 +void
55789 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55790 +{
55791 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55792 + if ((grsec_enable_chdir && grsec_enable_group &&
55793 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55794 + !grsec_enable_group)) {
55795 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55796 + }
55797 +#endif
55798 + return;
55799 +}
55800 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55801 new file mode 100644
55802 index 0000000..9807ee2
55803 --- /dev/null
55804 +++ b/grsecurity/grsec_chroot.c
55805 @@ -0,0 +1,368 @@
55806 +#include <linux/kernel.h>
55807 +#include <linux/module.h>
55808 +#include <linux/sched.h>
55809 +#include <linux/file.h>
55810 +#include <linux/fs.h>
55811 +#include <linux/mount.h>
55812 +#include <linux/types.h>
55813 +#include "../fs/mount.h"
55814 +#include <linux/grsecurity.h>
55815 +#include <linux/grinternal.h>
55816 +
55817 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55818 +{
55819 +#ifdef CONFIG_GRKERNSEC
55820 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55821 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
55822 + task->gr_is_chrooted = 1;
55823 + else
55824 + task->gr_is_chrooted = 0;
55825 +
55826 + task->gr_chroot_dentry = path->dentry;
55827 +#endif
55828 + return;
55829 +}
55830 +
55831 +void gr_clear_chroot_entries(struct task_struct *task)
55832 +{
55833 +#ifdef CONFIG_GRKERNSEC
55834 + task->gr_is_chrooted = 0;
55835 + task->gr_chroot_dentry = NULL;
55836 +#endif
55837 + return;
55838 +}
55839 +
55840 +int
55841 +gr_handle_chroot_unix(const pid_t pid)
55842 +{
55843 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55844 + struct task_struct *p;
55845 +
55846 + if (unlikely(!grsec_enable_chroot_unix))
55847 + return 1;
55848 +
55849 + if (likely(!proc_is_chrooted(current)))
55850 + return 1;
55851 +
55852 + rcu_read_lock();
55853 + read_lock(&tasklist_lock);
55854 + p = find_task_by_vpid_unrestricted(pid);
55855 + if (unlikely(p && !have_same_root(current, p))) {
55856 + read_unlock(&tasklist_lock);
55857 + rcu_read_unlock();
55858 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
55859 + return 0;
55860 + }
55861 + read_unlock(&tasklist_lock);
55862 + rcu_read_unlock();
55863 +#endif
55864 + return 1;
55865 +}
55866 +
55867 +int
55868 +gr_handle_chroot_nice(void)
55869 +{
55870 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55871 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
55872 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
55873 + return -EPERM;
55874 + }
55875 +#endif
55876 + return 0;
55877 +}
55878 +
55879 +int
55880 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
55881 +{
55882 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55883 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
55884 + && proc_is_chrooted(current)) {
55885 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
55886 + return -EACCES;
55887 + }
55888 +#endif
55889 + return 0;
55890 +}
55891 +
55892 +int
55893 +gr_handle_chroot_rawio(const struct inode *inode)
55894 +{
55895 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55896 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55897 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
55898 + return 1;
55899 +#endif
55900 + return 0;
55901 +}
55902 +
55903 +int
55904 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
55905 +{
55906 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55907 + struct task_struct *p;
55908 + int ret = 0;
55909 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
55910 + return ret;
55911 +
55912 + read_lock(&tasklist_lock);
55913 + do_each_pid_task(pid, type, p) {
55914 + if (!have_same_root(current, p)) {
55915 + ret = 1;
55916 + goto out;
55917 + }
55918 + } while_each_pid_task(pid, type, p);
55919 +out:
55920 + read_unlock(&tasklist_lock);
55921 + return ret;
55922 +#endif
55923 + return 0;
55924 +}
55925 +
55926 +int
55927 +gr_pid_is_chrooted(struct task_struct *p)
55928 +{
55929 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55930 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
55931 + return 0;
55932 +
55933 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
55934 + !have_same_root(current, p)) {
55935 + return 1;
55936 + }
55937 +#endif
55938 + return 0;
55939 +}
55940 +
55941 +EXPORT_SYMBOL(gr_pid_is_chrooted);
55942 +
55943 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
55944 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
55945 +{
55946 + struct path path, currentroot;
55947 + int ret = 0;
55948 +
55949 + path.dentry = (struct dentry *)u_dentry;
55950 + path.mnt = (struct vfsmount *)u_mnt;
55951 + get_fs_root(current->fs, &currentroot);
55952 + if (path_is_under(&path, &currentroot))
55953 + ret = 1;
55954 + path_put(&currentroot);
55955 +
55956 + return ret;
55957 +}
55958 +#endif
55959 +
55960 +int
55961 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
55962 +{
55963 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55964 + if (!grsec_enable_chroot_fchdir)
55965 + return 1;
55966 +
55967 + if (!proc_is_chrooted(current))
55968 + return 1;
55969 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
55970 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
55971 + return 0;
55972 + }
55973 +#endif
55974 + return 1;
55975 +}
55976 +
55977 +int
55978 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55979 + const time_t shm_createtime)
55980 +{
55981 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55982 + struct task_struct *p;
55983 + time_t starttime;
55984 +
55985 + if (unlikely(!grsec_enable_chroot_shmat))
55986 + return 1;
55987 +
55988 + if (likely(!proc_is_chrooted(current)))
55989 + return 1;
55990 +
55991 + rcu_read_lock();
55992 + read_lock(&tasklist_lock);
55993 +
55994 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
55995 + starttime = p->start_time.tv_sec;
55996 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
55997 + if (have_same_root(current, p)) {
55998 + goto allow;
55999 + } else {
56000 + read_unlock(&tasklist_lock);
56001 + rcu_read_unlock();
56002 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56003 + return 0;
56004 + }
56005 + }
56006 + /* creator exited, pid reuse, fall through to next check */
56007 + }
56008 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56009 + if (unlikely(!have_same_root(current, p))) {
56010 + read_unlock(&tasklist_lock);
56011 + rcu_read_unlock();
56012 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56013 + return 0;
56014 + }
56015 + }
56016 +
56017 +allow:
56018 + read_unlock(&tasklist_lock);
56019 + rcu_read_unlock();
56020 +#endif
56021 + return 1;
56022 +}
56023 +
56024 +void
56025 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56026 +{
56027 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56028 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56029 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56030 +#endif
56031 + return;
56032 +}
56033 +
56034 +int
56035 +gr_handle_chroot_mknod(const struct dentry *dentry,
56036 + const struct vfsmount *mnt, const int mode)
56037 +{
56038 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56039 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56040 + proc_is_chrooted(current)) {
56041 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56042 + return -EPERM;
56043 + }
56044 +#endif
56045 + return 0;
56046 +}
56047 +
56048 +int
56049 +gr_handle_chroot_mount(const struct dentry *dentry,
56050 + const struct vfsmount *mnt, const char *dev_name)
56051 +{
56052 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56053 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56054 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56055 + return -EPERM;
56056 + }
56057 +#endif
56058 + return 0;
56059 +}
56060 +
56061 +int
56062 +gr_handle_chroot_pivot(void)
56063 +{
56064 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56065 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56066 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56067 + return -EPERM;
56068 + }
56069 +#endif
56070 + return 0;
56071 +}
56072 +
56073 +int
56074 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56075 +{
56076 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56077 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56078 + !gr_is_outside_chroot(dentry, mnt)) {
56079 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56080 + return -EPERM;
56081 + }
56082 +#endif
56083 + return 0;
56084 +}
56085 +
56086 +extern const char *captab_log[];
56087 +extern int captab_log_entries;
56088 +
56089 +int
56090 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56091 +{
56092 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56093 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56094 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56095 + if (cap_raised(chroot_caps, cap)) {
56096 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56097 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56098 + }
56099 + return 0;
56100 + }
56101 + }
56102 +#endif
56103 + return 1;
56104 +}
56105 +
56106 +int
56107 +gr_chroot_is_capable(const int cap)
56108 +{
56109 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56110 + return gr_task_chroot_is_capable(current, current_cred(), cap);
56111 +#endif
56112 + return 1;
56113 +}
56114 +
56115 +int
56116 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56117 +{
56118 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56119 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56120 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56121 + if (cap_raised(chroot_caps, cap)) {
56122 + return 0;
56123 + }
56124 + }
56125 +#endif
56126 + return 1;
56127 +}
56128 +
56129 +int
56130 +gr_chroot_is_capable_nolog(const int cap)
56131 +{
56132 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56133 + return gr_task_chroot_is_capable_nolog(current, cap);
56134 +#endif
56135 + return 1;
56136 +}
56137 +
56138 +int
56139 +gr_handle_chroot_sysctl(const int op)
56140 +{
56141 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56142 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56143 + proc_is_chrooted(current))
56144 + return -EACCES;
56145 +#endif
56146 + return 0;
56147 +}
56148 +
56149 +void
56150 +gr_handle_chroot_chdir(struct path *path)
56151 +{
56152 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56153 + if (grsec_enable_chroot_chdir)
56154 + set_fs_pwd(current->fs, path);
56155 +#endif
56156 + return;
56157 +}
56158 +
56159 +int
56160 +gr_handle_chroot_chmod(const struct dentry *dentry,
56161 + const struct vfsmount *mnt, const int mode)
56162 +{
56163 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56164 + /* allow chmod +s on directories, but not files */
56165 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56166 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56167 + proc_is_chrooted(current)) {
56168 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56169 + return -EPERM;
56170 + }
56171 +#endif
56172 + return 0;
56173 +}
56174 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56175 new file mode 100644
56176 index 0000000..213ad8b
56177 --- /dev/null
56178 +++ b/grsecurity/grsec_disabled.c
56179 @@ -0,0 +1,437 @@
56180 +#include <linux/kernel.h>
56181 +#include <linux/module.h>
56182 +#include <linux/sched.h>
56183 +#include <linux/file.h>
56184 +#include <linux/fs.h>
56185 +#include <linux/kdev_t.h>
56186 +#include <linux/net.h>
56187 +#include <linux/in.h>
56188 +#include <linux/ip.h>
56189 +#include <linux/skbuff.h>
56190 +#include <linux/sysctl.h>
56191 +
56192 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56193 +void
56194 +pax_set_initial_flags(struct linux_binprm *bprm)
56195 +{
56196 + return;
56197 +}
56198 +#endif
56199 +
56200 +#ifdef CONFIG_SYSCTL
56201 +__u32
56202 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56203 +{
56204 + return 0;
56205 +}
56206 +#endif
56207 +
56208 +#ifdef CONFIG_TASKSTATS
56209 +int gr_is_taskstats_denied(int pid)
56210 +{
56211 + return 0;
56212 +}
56213 +#endif
56214 +
56215 +int
56216 +gr_acl_is_enabled(void)
56217 +{
56218 + return 0;
56219 +}
56220 +
56221 +void
56222 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56223 +{
56224 + return;
56225 +}
56226 +
56227 +int
56228 +gr_handle_rawio(const struct inode *inode)
56229 +{
56230 + return 0;
56231 +}
56232 +
56233 +void
56234 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56235 +{
56236 + return;
56237 +}
56238 +
56239 +int
56240 +gr_handle_ptrace(struct task_struct *task, const long request)
56241 +{
56242 + return 0;
56243 +}
56244 +
56245 +int
56246 +gr_handle_proc_ptrace(struct task_struct *task)
56247 +{
56248 + return 0;
56249 +}
56250 +
56251 +void
56252 +gr_learn_resource(const struct task_struct *task,
56253 + const int res, const unsigned long wanted, const int gt)
56254 +{
56255 + return;
56256 +}
56257 +
56258 +int
56259 +gr_set_acls(const int type)
56260 +{
56261 + return 0;
56262 +}
56263 +
56264 +int
56265 +gr_check_hidden_task(const struct task_struct *tsk)
56266 +{
56267 + return 0;
56268 +}
56269 +
56270 +int
56271 +gr_check_protected_task(const struct task_struct *task)
56272 +{
56273 + return 0;
56274 +}
56275 +
56276 +int
56277 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56278 +{
56279 + return 0;
56280 +}
56281 +
56282 +void
56283 +gr_copy_label(struct task_struct *tsk)
56284 +{
56285 + return;
56286 +}
56287 +
56288 +void
56289 +gr_set_pax_flags(struct task_struct *task)
56290 +{
56291 + return;
56292 +}
56293 +
56294 +int
56295 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56296 + const int unsafe_share)
56297 +{
56298 + return 0;
56299 +}
56300 +
56301 +void
56302 +gr_handle_delete(const ino_t ino, const dev_t dev)
56303 +{
56304 + return;
56305 +}
56306 +
56307 +void
56308 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56309 +{
56310 + return;
56311 +}
56312 +
56313 +void
56314 +gr_handle_crash(struct task_struct *task, const int sig)
56315 +{
56316 + return;
56317 +}
56318 +
56319 +int
56320 +gr_check_crash_exec(const struct file *filp)
56321 +{
56322 + return 0;
56323 +}
56324 +
56325 +int
56326 +gr_check_crash_uid(const uid_t uid)
56327 +{
56328 + return 0;
56329 +}
56330 +
56331 +void
56332 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56333 + struct dentry *old_dentry,
56334 + struct dentry *new_dentry,
56335 + struct vfsmount *mnt, const __u8 replace)
56336 +{
56337 + return;
56338 +}
56339 +
56340 +int
56341 +gr_search_socket(const int family, const int type, const int protocol)
56342 +{
56343 + return 1;
56344 +}
56345 +
56346 +int
56347 +gr_search_connectbind(const int mode, const struct socket *sock,
56348 + const struct sockaddr_in *addr)
56349 +{
56350 + return 0;
56351 +}
56352 +
56353 +void
56354 +gr_handle_alertkill(struct task_struct *task)
56355 +{
56356 + return;
56357 +}
56358 +
56359 +__u32
56360 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56361 +{
56362 + return 1;
56363 +}
56364 +
56365 +__u32
56366 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56367 + const struct vfsmount * mnt)
56368 +{
56369 + return 1;
56370 +}
56371 +
56372 +__u32
56373 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56374 + int acc_mode)
56375 +{
56376 + return 1;
56377 +}
56378 +
56379 +__u32
56380 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56381 +{
56382 + return 1;
56383 +}
56384 +
56385 +__u32
56386 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56387 +{
56388 + return 1;
56389 +}
56390 +
56391 +int
56392 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56393 + unsigned int *vm_flags)
56394 +{
56395 + return 1;
56396 +}
56397 +
56398 +__u32
56399 +gr_acl_handle_truncate(const struct dentry * dentry,
56400 + const struct vfsmount * mnt)
56401 +{
56402 + return 1;
56403 +}
56404 +
56405 +__u32
56406 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56407 +{
56408 + return 1;
56409 +}
56410 +
56411 +__u32
56412 +gr_acl_handle_access(const struct dentry * dentry,
56413 + const struct vfsmount * mnt, const int fmode)
56414 +{
56415 + return 1;
56416 +}
56417 +
56418 +__u32
56419 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56420 + umode_t *mode)
56421 +{
56422 + return 1;
56423 +}
56424 +
56425 +__u32
56426 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56427 +{
56428 + return 1;
56429 +}
56430 +
56431 +__u32
56432 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56433 +{
56434 + return 1;
56435 +}
56436 +
56437 +void
56438 +grsecurity_init(void)
56439 +{
56440 + return;
56441 +}
56442 +
56443 +umode_t gr_acl_umask(void)
56444 +{
56445 + return 0;
56446 +}
56447 +
56448 +__u32
56449 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56450 + const struct dentry * parent_dentry,
56451 + const struct vfsmount * parent_mnt,
56452 + const int mode)
56453 +{
56454 + return 1;
56455 +}
56456 +
56457 +__u32
56458 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
56459 + const struct dentry * parent_dentry,
56460 + const struct vfsmount * parent_mnt)
56461 +{
56462 + return 1;
56463 +}
56464 +
56465 +__u32
56466 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56467 + const struct dentry * parent_dentry,
56468 + const struct vfsmount * parent_mnt, const char *from)
56469 +{
56470 + return 1;
56471 +}
56472 +
56473 +__u32
56474 +gr_acl_handle_link(const struct dentry * new_dentry,
56475 + const struct dentry * parent_dentry,
56476 + const struct vfsmount * parent_mnt,
56477 + const struct dentry * old_dentry,
56478 + const struct vfsmount * old_mnt, const char *to)
56479 +{
56480 + return 1;
56481 +}
56482 +
56483 +int
56484 +gr_acl_handle_rename(const struct dentry *new_dentry,
56485 + const struct dentry *parent_dentry,
56486 + const struct vfsmount *parent_mnt,
56487 + const struct dentry *old_dentry,
56488 + const struct inode *old_parent_inode,
56489 + const struct vfsmount *old_mnt, const char *newname)
56490 +{
56491 + return 0;
56492 +}
56493 +
56494 +int
56495 +gr_acl_handle_filldir(const struct file *file, const char *name,
56496 + const int namelen, const ino_t ino)
56497 +{
56498 + return 1;
56499 +}
56500 +
56501 +int
56502 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56503 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56504 +{
56505 + return 1;
56506 +}
56507 +
56508 +int
56509 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56510 +{
56511 + return 0;
56512 +}
56513 +
56514 +int
56515 +gr_search_accept(const struct socket *sock)
56516 +{
56517 + return 0;
56518 +}
56519 +
56520 +int
56521 +gr_search_listen(const struct socket *sock)
56522 +{
56523 + return 0;
56524 +}
56525 +
56526 +int
56527 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56528 +{
56529 + return 0;
56530 +}
56531 +
56532 +__u32
56533 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56534 +{
56535 + return 1;
56536 +}
56537 +
56538 +__u32
56539 +gr_acl_handle_creat(const struct dentry * dentry,
56540 + const struct dentry * p_dentry,
56541 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56542 + const int imode)
56543 +{
56544 + return 1;
56545 +}
56546 +
56547 +void
56548 +gr_acl_handle_exit(void)
56549 +{
56550 + return;
56551 +}
56552 +
56553 +int
56554 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56555 +{
56556 + return 1;
56557 +}
56558 +
56559 +void
56560 +gr_set_role_label(const uid_t uid, const gid_t gid)
56561 +{
56562 + return;
56563 +}
56564 +
56565 +int
56566 +gr_acl_handle_procpidmem(const struct task_struct *task)
56567 +{
56568 + return 0;
56569 +}
56570 +
56571 +int
56572 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56573 +{
56574 + return 0;
56575 +}
56576 +
56577 +int
56578 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56579 +{
56580 + return 0;
56581 +}
56582 +
56583 +void
56584 +gr_set_kernel_label(struct task_struct *task)
56585 +{
56586 + return;
56587 +}
56588 +
56589 +int
56590 +gr_check_user_change(int real, int effective, int fs)
56591 +{
56592 + return 0;
56593 +}
56594 +
56595 +int
56596 +gr_check_group_change(int real, int effective, int fs)
56597 +{
56598 + return 0;
56599 +}
56600 +
56601 +int gr_acl_enable_at_secure(void)
56602 +{
56603 + return 0;
56604 +}
56605 +
56606 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56607 +{
56608 + return dentry->d_inode->i_sb->s_dev;
56609 +}
56610 +
56611 +EXPORT_SYMBOL(gr_learn_resource);
56612 +EXPORT_SYMBOL(gr_set_kernel_label);
56613 +#ifdef CONFIG_SECURITY
56614 +EXPORT_SYMBOL(gr_check_user_change);
56615 +EXPORT_SYMBOL(gr_check_group_change);
56616 +#endif
56617 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56618 new file mode 100644
56619 index 0000000..abfa971
56620 --- /dev/null
56621 +++ b/grsecurity/grsec_exec.c
56622 @@ -0,0 +1,174 @@
56623 +#include <linux/kernel.h>
56624 +#include <linux/sched.h>
56625 +#include <linux/file.h>
56626 +#include <linux/binfmts.h>
56627 +#include <linux/fs.h>
56628 +#include <linux/types.h>
56629 +#include <linux/grdefs.h>
56630 +#include <linux/grsecurity.h>
56631 +#include <linux/grinternal.h>
56632 +#include <linux/capability.h>
56633 +#include <linux/module.h>
56634 +
56635 +#include <asm/uaccess.h>
56636 +
56637 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56638 +static char gr_exec_arg_buf[132];
56639 +static DEFINE_MUTEX(gr_exec_arg_mutex);
56640 +#endif
56641 +
56642 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56643 +
56644 +void
56645 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56646 +{
56647 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56648 + char *grarg = gr_exec_arg_buf;
56649 + unsigned int i, x, execlen = 0;
56650 + char c;
56651 +
56652 + if (!((grsec_enable_execlog && grsec_enable_group &&
56653 + in_group_p(grsec_audit_gid))
56654 + || (grsec_enable_execlog && !grsec_enable_group)))
56655 + return;
56656 +
56657 + mutex_lock(&gr_exec_arg_mutex);
56658 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
56659 +
56660 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
56661 + const char __user *p;
56662 + unsigned int len;
56663 +
56664 + p = get_user_arg_ptr(argv, i);
56665 + if (IS_ERR(p))
56666 + goto log;
56667 +
56668 + len = strnlen_user(p, 128 - execlen);
56669 + if (len > 128 - execlen)
56670 + len = 128 - execlen;
56671 + else if (len > 0)
56672 + len--;
56673 + if (copy_from_user(grarg + execlen, p, len))
56674 + goto log;
56675 +
56676 + /* rewrite unprintable characters */
56677 + for (x = 0; x < len; x++) {
56678 + c = *(grarg + execlen + x);
56679 + if (c < 32 || c > 126)
56680 + *(grarg + execlen + x) = ' ';
56681 + }
56682 +
56683 + execlen += len;
56684 + *(grarg + execlen) = ' ';
56685 + *(grarg + execlen + 1) = '\0';
56686 + execlen++;
56687 + }
56688 +
56689 + log:
56690 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56691 + bprm->file->f_path.mnt, grarg);
56692 + mutex_unlock(&gr_exec_arg_mutex);
56693 +#endif
56694 + return;
56695 +}
56696 +
56697 +#ifdef CONFIG_GRKERNSEC
56698 +extern int gr_acl_is_capable(const int cap);
56699 +extern int gr_acl_is_capable_nolog(const int cap);
56700 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56701 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
56702 +extern int gr_chroot_is_capable(const int cap);
56703 +extern int gr_chroot_is_capable_nolog(const int cap);
56704 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
56705 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
56706 +#endif
56707 +
56708 +const char *captab_log[] = {
56709 + "CAP_CHOWN",
56710 + "CAP_DAC_OVERRIDE",
56711 + "CAP_DAC_READ_SEARCH",
56712 + "CAP_FOWNER",
56713 + "CAP_FSETID",
56714 + "CAP_KILL",
56715 + "CAP_SETGID",
56716 + "CAP_SETUID",
56717 + "CAP_SETPCAP",
56718 + "CAP_LINUX_IMMUTABLE",
56719 + "CAP_NET_BIND_SERVICE",
56720 + "CAP_NET_BROADCAST",
56721 + "CAP_NET_ADMIN",
56722 + "CAP_NET_RAW",
56723 + "CAP_IPC_LOCK",
56724 + "CAP_IPC_OWNER",
56725 + "CAP_SYS_MODULE",
56726 + "CAP_SYS_RAWIO",
56727 + "CAP_SYS_CHROOT",
56728 + "CAP_SYS_PTRACE",
56729 + "CAP_SYS_PACCT",
56730 + "CAP_SYS_ADMIN",
56731 + "CAP_SYS_BOOT",
56732 + "CAP_SYS_NICE",
56733 + "CAP_SYS_RESOURCE",
56734 + "CAP_SYS_TIME",
56735 + "CAP_SYS_TTY_CONFIG",
56736 + "CAP_MKNOD",
56737 + "CAP_LEASE",
56738 + "CAP_AUDIT_WRITE",
56739 + "CAP_AUDIT_CONTROL",
56740 + "CAP_SETFCAP",
56741 + "CAP_MAC_OVERRIDE",
56742 + "CAP_MAC_ADMIN",
56743 + "CAP_SYSLOG",
56744 + "CAP_WAKE_ALARM"
56745 +};
56746 +
56747 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56748 +
56749 +int gr_is_capable(const int cap)
56750 +{
56751 +#ifdef CONFIG_GRKERNSEC
56752 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56753 + return 1;
56754 + return 0;
56755 +#else
56756 + return 1;
56757 +#endif
56758 +}
56759 +
56760 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56761 +{
56762 +#ifdef CONFIG_GRKERNSEC
56763 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
56764 + return 1;
56765 + return 0;
56766 +#else
56767 + return 1;
56768 +#endif
56769 +}
56770 +
56771 +int gr_is_capable_nolog(const int cap)
56772 +{
56773 +#ifdef CONFIG_GRKERNSEC
56774 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56775 + return 1;
56776 + return 0;
56777 +#else
56778 + return 1;
56779 +#endif
56780 +}
56781 +
56782 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
56783 +{
56784 +#ifdef CONFIG_GRKERNSEC
56785 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
56786 + return 1;
56787 + return 0;
56788 +#else
56789 + return 1;
56790 +#endif
56791 +}
56792 +
56793 +EXPORT_SYMBOL(gr_is_capable);
56794 +EXPORT_SYMBOL(gr_is_capable_nolog);
56795 +EXPORT_SYMBOL(gr_task_is_capable);
56796 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
56797 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56798 new file mode 100644
56799 index 0000000..d3ee748
56800 --- /dev/null
56801 +++ b/grsecurity/grsec_fifo.c
56802 @@ -0,0 +1,24 @@
56803 +#include <linux/kernel.h>
56804 +#include <linux/sched.h>
56805 +#include <linux/fs.h>
56806 +#include <linux/file.h>
56807 +#include <linux/grinternal.h>
56808 +
56809 +int
56810 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56811 + const struct dentry *dir, const int flag, const int acc_mode)
56812 +{
56813 +#ifdef CONFIG_GRKERNSEC_FIFO
56814 + const struct cred *cred = current_cred();
56815 +
56816 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56817 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56818 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56819 + (cred->fsuid != dentry->d_inode->i_uid)) {
56820 + if (!inode_permission(dentry->d_inode, acc_mode))
56821 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56822 + return -EACCES;
56823 + }
56824 +#endif
56825 + return 0;
56826 +}
56827 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56828 new file mode 100644
56829 index 0000000..8ca18bf
56830 --- /dev/null
56831 +++ b/grsecurity/grsec_fork.c
56832 @@ -0,0 +1,23 @@
56833 +#include <linux/kernel.h>
56834 +#include <linux/sched.h>
56835 +#include <linux/grsecurity.h>
56836 +#include <linux/grinternal.h>
56837 +#include <linux/errno.h>
56838 +
56839 +void
56840 +gr_log_forkfail(const int retval)
56841 +{
56842 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56843 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56844 + switch (retval) {
56845 + case -EAGAIN:
56846 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56847 + break;
56848 + case -ENOMEM:
56849 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56850 + break;
56851 + }
56852 + }
56853 +#endif
56854 + return;
56855 +}
56856 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
56857 new file mode 100644
56858 index 0000000..01ddde4
56859 --- /dev/null
56860 +++ b/grsecurity/grsec_init.c
56861 @@ -0,0 +1,277 @@
56862 +#include <linux/kernel.h>
56863 +#include <linux/sched.h>
56864 +#include <linux/mm.h>
56865 +#include <linux/gracl.h>
56866 +#include <linux/slab.h>
56867 +#include <linux/vmalloc.h>
56868 +#include <linux/percpu.h>
56869 +#include <linux/module.h>
56870 +
56871 +int grsec_enable_ptrace_readexec;
56872 +int grsec_enable_setxid;
56873 +int grsec_enable_brute;
56874 +int grsec_enable_link;
56875 +int grsec_enable_dmesg;
56876 +int grsec_enable_harden_ptrace;
56877 +int grsec_enable_fifo;
56878 +int grsec_enable_execlog;
56879 +int grsec_enable_signal;
56880 +int grsec_enable_forkfail;
56881 +int grsec_enable_audit_ptrace;
56882 +int grsec_enable_time;
56883 +int grsec_enable_audit_textrel;
56884 +int grsec_enable_group;
56885 +int grsec_audit_gid;
56886 +int grsec_enable_chdir;
56887 +int grsec_enable_mount;
56888 +int grsec_enable_rofs;
56889 +int grsec_enable_chroot_findtask;
56890 +int grsec_enable_chroot_mount;
56891 +int grsec_enable_chroot_shmat;
56892 +int grsec_enable_chroot_fchdir;
56893 +int grsec_enable_chroot_double;
56894 +int grsec_enable_chroot_pivot;
56895 +int grsec_enable_chroot_chdir;
56896 +int grsec_enable_chroot_chmod;
56897 +int grsec_enable_chroot_mknod;
56898 +int grsec_enable_chroot_nice;
56899 +int grsec_enable_chroot_execlog;
56900 +int grsec_enable_chroot_caps;
56901 +int grsec_enable_chroot_sysctl;
56902 +int grsec_enable_chroot_unix;
56903 +int grsec_enable_tpe;
56904 +int grsec_tpe_gid;
56905 +int grsec_enable_blackhole;
56906 +#ifdef CONFIG_IPV6_MODULE
56907 +EXPORT_SYMBOL(grsec_enable_blackhole);
56908 +#endif
56909 +int grsec_lastack_retries;
56910 +int grsec_enable_tpe_all;
56911 +int grsec_enable_tpe_invert;
56912 +int grsec_enable_socket_all;
56913 +int grsec_socket_all_gid;
56914 +int grsec_enable_socket_client;
56915 +int grsec_socket_client_gid;
56916 +int grsec_enable_socket_server;
56917 +int grsec_socket_server_gid;
56918 +int grsec_resource_logging;
56919 +int grsec_disable_privio;
56920 +int grsec_enable_log_rwxmaps;
56921 +int grsec_lock;
56922 +
56923 +DEFINE_SPINLOCK(grsec_alert_lock);
56924 +unsigned long grsec_alert_wtime = 0;
56925 +unsigned long grsec_alert_fyet = 0;
56926 +
56927 +DEFINE_SPINLOCK(grsec_audit_lock);
56928 +
56929 +DEFINE_RWLOCK(grsec_exec_file_lock);
56930 +
56931 +char *gr_shared_page[4];
56932 +
56933 +char *gr_alert_log_fmt;
56934 +char *gr_audit_log_fmt;
56935 +char *gr_alert_log_buf;
56936 +char *gr_audit_log_buf;
56937 +
56938 +extern struct gr_arg *gr_usermode;
56939 +extern unsigned char *gr_system_salt;
56940 +extern unsigned char *gr_system_sum;
56941 +
56942 +void __init
56943 +grsecurity_init(void)
56944 +{
56945 + int j;
56946 + /* create the per-cpu shared pages */
56947 +
56948 +#ifdef CONFIG_X86
56949 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
56950 +#endif
56951 +
56952 + for (j = 0; j < 4; j++) {
56953 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
56954 + if (gr_shared_page[j] == NULL) {
56955 + panic("Unable to allocate grsecurity shared page");
56956 + return;
56957 + }
56958 + }
56959 +
56960 + /* allocate log buffers */
56961 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
56962 + if (!gr_alert_log_fmt) {
56963 + panic("Unable to allocate grsecurity alert log format buffer");
56964 + return;
56965 + }
56966 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
56967 + if (!gr_audit_log_fmt) {
56968 + panic("Unable to allocate grsecurity audit log format buffer");
56969 + return;
56970 + }
56971 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56972 + if (!gr_alert_log_buf) {
56973 + panic("Unable to allocate grsecurity alert log buffer");
56974 + return;
56975 + }
56976 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56977 + if (!gr_audit_log_buf) {
56978 + panic("Unable to allocate grsecurity audit log buffer");
56979 + return;
56980 + }
56981 +
56982 + /* allocate memory for authentication structure */
56983 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
56984 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
56985 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
56986 +
56987 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
56988 + panic("Unable to allocate grsecurity authentication structure");
56989 + return;
56990 + }
56991 +
56992 +
56993 +#ifdef CONFIG_GRKERNSEC_IO
56994 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
56995 + grsec_disable_privio = 1;
56996 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56997 + grsec_disable_privio = 1;
56998 +#else
56999 + grsec_disable_privio = 0;
57000 +#endif
57001 +#endif
57002 +
57003 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57004 + /* for backward compatibility, tpe_invert always defaults to on if
57005 + enabled in the kernel
57006 + */
57007 + grsec_enable_tpe_invert = 1;
57008 +#endif
57009 +
57010 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57011 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57012 + grsec_lock = 1;
57013 +#endif
57014 +
57015 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57016 + grsec_enable_audit_textrel = 1;
57017 +#endif
57018 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57019 + grsec_enable_log_rwxmaps = 1;
57020 +#endif
57021 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57022 + grsec_enable_group = 1;
57023 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57024 +#endif
57025 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57026 + grsec_enable_ptrace_readexec = 1;
57027 +#endif
57028 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57029 + grsec_enable_chdir = 1;
57030 +#endif
57031 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57032 + grsec_enable_harden_ptrace = 1;
57033 +#endif
57034 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57035 + grsec_enable_mount = 1;
57036 +#endif
57037 +#ifdef CONFIG_GRKERNSEC_LINK
57038 + grsec_enable_link = 1;
57039 +#endif
57040 +#ifdef CONFIG_GRKERNSEC_BRUTE
57041 + grsec_enable_brute = 1;
57042 +#endif
57043 +#ifdef CONFIG_GRKERNSEC_DMESG
57044 + grsec_enable_dmesg = 1;
57045 +#endif
57046 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57047 + grsec_enable_blackhole = 1;
57048 + grsec_lastack_retries = 4;
57049 +#endif
57050 +#ifdef CONFIG_GRKERNSEC_FIFO
57051 + grsec_enable_fifo = 1;
57052 +#endif
57053 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57054 + grsec_enable_execlog = 1;
57055 +#endif
57056 +#ifdef CONFIG_GRKERNSEC_SETXID
57057 + grsec_enable_setxid = 1;
57058 +#endif
57059 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57060 + grsec_enable_signal = 1;
57061 +#endif
57062 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57063 + grsec_enable_forkfail = 1;
57064 +#endif
57065 +#ifdef CONFIG_GRKERNSEC_TIME
57066 + grsec_enable_time = 1;
57067 +#endif
57068 +#ifdef CONFIG_GRKERNSEC_RESLOG
57069 + grsec_resource_logging = 1;
57070 +#endif
57071 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57072 + grsec_enable_chroot_findtask = 1;
57073 +#endif
57074 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57075 + grsec_enable_chroot_unix = 1;
57076 +#endif
57077 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57078 + grsec_enable_chroot_mount = 1;
57079 +#endif
57080 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57081 + grsec_enable_chroot_fchdir = 1;
57082 +#endif
57083 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57084 + grsec_enable_chroot_shmat = 1;
57085 +#endif
57086 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57087 + grsec_enable_audit_ptrace = 1;
57088 +#endif
57089 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57090 + grsec_enable_chroot_double = 1;
57091 +#endif
57092 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57093 + grsec_enable_chroot_pivot = 1;
57094 +#endif
57095 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57096 + grsec_enable_chroot_chdir = 1;
57097 +#endif
57098 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57099 + grsec_enable_chroot_chmod = 1;
57100 +#endif
57101 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57102 + grsec_enable_chroot_mknod = 1;
57103 +#endif
57104 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57105 + grsec_enable_chroot_nice = 1;
57106 +#endif
57107 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57108 + grsec_enable_chroot_execlog = 1;
57109 +#endif
57110 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57111 + grsec_enable_chroot_caps = 1;
57112 +#endif
57113 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57114 + grsec_enable_chroot_sysctl = 1;
57115 +#endif
57116 +#ifdef CONFIG_GRKERNSEC_TPE
57117 + grsec_enable_tpe = 1;
57118 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57119 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57120 + grsec_enable_tpe_all = 1;
57121 +#endif
57122 +#endif
57123 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57124 + grsec_enable_socket_all = 1;
57125 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57126 +#endif
57127 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57128 + grsec_enable_socket_client = 1;
57129 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57130 +#endif
57131 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57132 + grsec_enable_socket_server = 1;
57133 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57134 +#endif
57135 +#endif
57136 +
57137 + return;
57138 +}
57139 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57140 new file mode 100644
57141 index 0000000..3efe141
57142 --- /dev/null
57143 +++ b/grsecurity/grsec_link.c
57144 @@ -0,0 +1,43 @@
57145 +#include <linux/kernel.h>
57146 +#include <linux/sched.h>
57147 +#include <linux/fs.h>
57148 +#include <linux/file.h>
57149 +#include <linux/grinternal.h>
57150 +
57151 +int
57152 +gr_handle_follow_link(const struct inode *parent,
57153 + const struct inode *inode,
57154 + const struct dentry *dentry, const struct vfsmount *mnt)
57155 +{
57156 +#ifdef CONFIG_GRKERNSEC_LINK
57157 + const struct cred *cred = current_cred();
57158 +
57159 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57160 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57161 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57162 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57163 + return -EACCES;
57164 + }
57165 +#endif
57166 + return 0;
57167 +}
57168 +
57169 +int
57170 +gr_handle_hardlink(const struct dentry *dentry,
57171 + const struct vfsmount *mnt,
57172 + struct inode *inode, const int mode, const char *to)
57173 +{
57174 +#ifdef CONFIG_GRKERNSEC_LINK
57175 + const struct cred *cred = current_cred();
57176 +
57177 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57178 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57179 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57180 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57181 + !capable(CAP_FOWNER) && cred->uid) {
57182 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57183 + return -EPERM;
57184 + }
57185 +#endif
57186 + return 0;
57187 +}
57188 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57189 new file mode 100644
57190 index 0000000..a45d2e9
57191 --- /dev/null
57192 +++ b/grsecurity/grsec_log.c
57193 @@ -0,0 +1,322 @@
57194 +#include <linux/kernel.h>
57195 +#include <linux/sched.h>
57196 +#include <linux/file.h>
57197 +#include <linux/tty.h>
57198 +#include <linux/fs.h>
57199 +#include <linux/grinternal.h>
57200 +
57201 +#ifdef CONFIG_TREE_PREEMPT_RCU
57202 +#define DISABLE_PREEMPT() preempt_disable()
57203 +#define ENABLE_PREEMPT() preempt_enable()
57204 +#else
57205 +#define DISABLE_PREEMPT()
57206 +#define ENABLE_PREEMPT()
57207 +#endif
57208 +
57209 +#define BEGIN_LOCKS(x) \
57210 + DISABLE_PREEMPT(); \
57211 + rcu_read_lock(); \
57212 + read_lock(&tasklist_lock); \
57213 + read_lock(&grsec_exec_file_lock); \
57214 + if (x != GR_DO_AUDIT) \
57215 + spin_lock(&grsec_alert_lock); \
57216 + else \
57217 + spin_lock(&grsec_audit_lock)
57218 +
57219 +#define END_LOCKS(x) \
57220 + if (x != GR_DO_AUDIT) \
57221 + spin_unlock(&grsec_alert_lock); \
57222 + else \
57223 + spin_unlock(&grsec_audit_lock); \
57224 + read_unlock(&grsec_exec_file_lock); \
57225 + read_unlock(&tasklist_lock); \
57226 + rcu_read_unlock(); \
57227 + ENABLE_PREEMPT(); \
57228 + if (x == GR_DONT_AUDIT) \
57229 + gr_handle_alertkill(current)
57230 +
57231 +enum {
57232 + FLOODING,
57233 + NO_FLOODING
57234 +};
57235 +
57236 +extern char *gr_alert_log_fmt;
57237 +extern char *gr_audit_log_fmt;
57238 +extern char *gr_alert_log_buf;
57239 +extern char *gr_audit_log_buf;
57240 +
57241 +static int gr_log_start(int audit)
57242 +{
57243 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57244 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57245 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57246 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57247 + unsigned long curr_secs = get_seconds();
57248 +
57249 + if (audit == GR_DO_AUDIT)
57250 + goto set_fmt;
57251 +
57252 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57253 + grsec_alert_wtime = curr_secs;
57254 + grsec_alert_fyet = 0;
57255 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57256 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57257 + grsec_alert_fyet++;
57258 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57259 + grsec_alert_wtime = curr_secs;
57260 + grsec_alert_fyet++;
57261 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57262 + return FLOODING;
57263 + }
57264 + else return FLOODING;
57265 +
57266 +set_fmt:
57267 +#endif
57268 + memset(buf, 0, PAGE_SIZE);
57269 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57270 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57271 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57272 + } else if (current->signal->curr_ip) {
57273 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57274 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57275 + } else if (gr_acl_is_enabled()) {
57276 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57277 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57278 + } else {
57279 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
57280 + strcpy(buf, fmt);
57281 + }
57282 +
57283 + return NO_FLOODING;
57284 +}
57285 +
57286 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57287 + __attribute__ ((format (printf, 2, 0)));
57288 +
57289 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57290 +{
57291 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57292 + unsigned int len = strlen(buf);
57293 +
57294 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57295 +
57296 + return;
57297 +}
57298 +
57299 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57300 + __attribute__ ((format (printf, 2, 3)));
57301 +
57302 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57303 +{
57304 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57305 + unsigned int len = strlen(buf);
57306 + va_list ap;
57307 +
57308 + va_start(ap, msg);
57309 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57310 + va_end(ap);
57311 +
57312 + return;
57313 +}
57314 +
57315 +static void gr_log_end(int audit, int append_default)
57316 +{
57317 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57318 +
57319 + if (append_default) {
57320 + unsigned int len = strlen(buf);
57321 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57322 + }
57323 +
57324 + printk("%s\n", buf);
57325 +
57326 + return;
57327 +}
57328 +
57329 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57330 +{
57331 + int logtype;
57332 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57333 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57334 + void *voidptr = NULL;
57335 + int num1 = 0, num2 = 0;
57336 + unsigned long ulong1 = 0, ulong2 = 0;
57337 + struct dentry *dentry = NULL;
57338 + struct vfsmount *mnt = NULL;
57339 + struct file *file = NULL;
57340 + struct task_struct *task = NULL;
57341 + const struct cred *cred, *pcred;
57342 + va_list ap;
57343 +
57344 + BEGIN_LOCKS(audit);
57345 + logtype = gr_log_start(audit);
57346 + if (logtype == FLOODING) {
57347 + END_LOCKS(audit);
57348 + return;
57349 + }
57350 + va_start(ap, argtypes);
57351 + switch (argtypes) {
57352 + case GR_TTYSNIFF:
57353 + task = va_arg(ap, struct task_struct *);
57354 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57355 + break;
57356 + case GR_SYSCTL_HIDDEN:
57357 + str1 = va_arg(ap, char *);
57358 + gr_log_middle_varargs(audit, msg, result, str1);
57359 + break;
57360 + case GR_RBAC:
57361 + dentry = va_arg(ap, struct dentry *);
57362 + mnt = va_arg(ap, struct vfsmount *);
57363 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57364 + break;
57365 + case GR_RBAC_STR:
57366 + dentry = va_arg(ap, struct dentry *);
57367 + mnt = va_arg(ap, struct vfsmount *);
57368 + str1 = va_arg(ap, char *);
57369 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57370 + break;
57371 + case GR_STR_RBAC:
57372 + str1 = va_arg(ap, char *);
57373 + dentry = va_arg(ap, struct dentry *);
57374 + mnt = va_arg(ap, struct vfsmount *);
57375 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57376 + break;
57377 + case GR_RBAC_MODE2:
57378 + dentry = va_arg(ap, struct dentry *);
57379 + mnt = va_arg(ap, struct vfsmount *);
57380 + str1 = va_arg(ap, char *);
57381 + str2 = va_arg(ap, char *);
57382 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57383 + break;
57384 + case GR_RBAC_MODE3:
57385 + dentry = va_arg(ap, struct dentry *);
57386 + mnt = va_arg(ap, struct vfsmount *);
57387 + str1 = va_arg(ap, char *);
57388 + str2 = va_arg(ap, char *);
57389 + str3 = va_arg(ap, char *);
57390 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57391 + break;
57392 + case GR_FILENAME:
57393 + dentry = va_arg(ap, struct dentry *);
57394 + mnt = va_arg(ap, struct vfsmount *);
57395 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57396 + break;
57397 + case GR_STR_FILENAME:
57398 + str1 = va_arg(ap, char *);
57399 + dentry = va_arg(ap, struct dentry *);
57400 + mnt = va_arg(ap, struct vfsmount *);
57401 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57402 + break;
57403 + case GR_FILENAME_STR:
57404 + dentry = va_arg(ap, struct dentry *);
57405 + mnt = va_arg(ap, struct vfsmount *);
57406 + str1 = va_arg(ap, char *);
57407 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57408 + break;
57409 + case GR_FILENAME_TWO_INT:
57410 + dentry = va_arg(ap, struct dentry *);
57411 + mnt = va_arg(ap, struct vfsmount *);
57412 + num1 = va_arg(ap, int);
57413 + num2 = va_arg(ap, int);
57414 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57415 + break;
57416 + case GR_FILENAME_TWO_INT_STR:
57417 + dentry = va_arg(ap, struct dentry *);
57418 + mnt = va_arg(ap, struct vfsmount *);
57419 + num1 = va_arg(ap, int);
57420 + num2 = va_arg(ap, int);
57421 + str1 = va_arg(ap, char *);
57422 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57423 + break;
57424 + case GR_TEXTREL:
57425 + file = va_arg(ap, struct file *);
57426 + ulong1 = va_arg(ap, unsigned long);
57427 + ulong2 = va_arg(ap, unsigned long);
57428 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57429 + break;
57430 + case GR_PTRACE:
57431 + task = va_arg(ap, struct task_struct *);
57432 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57433 + break;
57434 + case GR_RESOURCE:
57435 + task = va_arg(ap, struct task_struct *);
57436 + cred = __task_cred(task);
57437 + pcred = __task_cred(task->real_parent);
57438 + ulong1 = va_arg(ap, unsigned long);
57439 + str1 = va_arg(ap, char *);
57440 + ulong2 = va_arg(ap, unsigned long);
57441 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57442 + break;
57443 + case GR_CAP:
57444 + task = va_arg(ap, struct task_struct *);
57445 + cred = __task_cred(task);
57446 + pcred = __task_cred(task->real_parent);
57447 + str1 = va_arg(ap, char *);
57448 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57449 + break;
57450 + case GR_SIG:
57451 + str1 = va_arg(ap, char *);
57452 + voidptr = va_arg(ap, void *);
57453 + gr_log_middle_varargs(audit, msg, str1, voidptr);
57454 + break;
57455 + case GR_SIG2:
57456 + task = va_arg(ap, struct task_struct *);
57457 + cred = __task_cred(task);
57458 + pcred = __task_cred(task->real_parent);
57459 + num1 = va_arg(ap, int);
57460 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57461 + break;
57462 + case GR_CRASH1:
57463 + task = va_arg(ap, struct task_struct *);
57464 + cred = __task_cred(task);
57465 + pcred = __task_cred(task->real_parent);
57466 + ulong1 = va_arg(ap, unsigned long);
57467 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57468 + break;
57469 + case GR_CRASH2:
57470 + task = va_arg(ap, struct task_struct *);
57471 + cred = __task_cred(task);
57472 + pcred = __task_cred(task->real_parent);
57473 + ulong1 = va_arg(ap, unsigned long);
57474 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57475 + break;
57476 + case GR_RWXMAP:
57477 + file = va_arg(ap, struct file *);
57478 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57479 + break;
57480 + case GR_PSACCT:
57481 + {
57482 + unsigned int wday, cday;
57483 + __u8 whr, chr;
57484 + __u8 wmin, cmin;
57485 + __u8 wsec, csec;
57486 + char cur_tty[64] = { 0 };
57487 + char parent_tty[64] = { 0 };
57488 +
57489 + task = va_arg(ap, struct task_struct *);
57490 + wday = va_arg(ap, unsigned int);
57491 + cday = va_arg(ap, unsigned int);
57492 + whr = va_arg(ap, int);
57493 + chr = va_arg(ap, int);
57494 + wmin = va_arg(ap, int);
57495 + cmin = va_arg(ap, int);
57496 + wsec = va_arg(ap, int);
57497 + csec = va_arg(ap, int);
57498 + ulong1 = va_arg(ap, unsigned long);
57499 + cred = __task_cred(task);
57500 + pcred = __task_cred(task->real_parent);
57501 +
57502 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57503 + }
57504 + break;
57505 + default:
57506 + gr_log_middle(audit, msg, ap);
57507 + }
57508 + va_end(ap);
57509 + // these don't need DEFAULTSECARGS printed on the end
57510 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57511 + gr_log_end(audit, 0);
57512 + else
57513 + gr_log_end(audit, 1);
57514 + END_LOCKS(audit);
57515 +}
57516 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57517 new file mode 100644
57518 index 0000000..f536303
57519 --- /dev/null
57520 +++ b/grsecurity/grsec_mem.c
57521 @@ -0,0 +1,40 @@
57522 +#include <linux/kernel.h>
57523 +#include <linux/sched.h>
57524 +#include <linux/mm.h>
57525 +#include <linux/mman.h>
57526 +#include <linux/grinternal.h>
57527 +
57528 +void
57529 +gr_handle_ioperm(void)
57530 +{
57531 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57532 + return;
57533 +}
57534 +
57535 +void
57536 +gr_handle_iopl(void)
57537 +{
57538 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57539 + return;
57540 +}
57541 +
57542 +void
57543 +gr_handle_mem_readwrite(u64 from, u64 to)
57544 +{
57545 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57546 + return;
57547 +}
57548 +
57549 +void
57550 +gr_handle_vm86(void)
57551 +{
57552 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57553 + return;
57554 +}
57555 +
57556 +void
57557 +gr_log_badprocpid(const char *entry)
57558 +{
57559 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57560 + return;
57561 +}
57562 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57563 new file mode 100644
57564 index 0000000..2131422
57565 --- /dev/null
57566 +++ b/grsecurity/grsec_mount.c
57567 @@ -0,0 +1,62 @@
57568 +#include <linux/kernel.h>
57569 +#include <linux/sched.h>
57570 +#include <linux/mount.h>
57571 +#include <linux/grsecurity.h>
57572 +#include <linux/grinternal.h>
57573 +
57574 +void
57575 +gr_log_remount(const char *devname, const int retval)
57576 +{
57577 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57578 + if (grsec_enable_mount && (retval >= 0))
57579 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57580 +#endif
57581 + return;
57582 +}
57583 +
57584 +void
57585 +gr_log_unmount(const char *devname, const int retval)
57586 +{
57587 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57588 + if (grsec_enable_mount && (retval >= 0))
57589 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57590 +#endif
57591 + return;
57592 +}
57593 +
57594 +void
57595 +gr_log_mount(const char *from, const char *to, const int retval)
57596 +{
57597 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57598 + if (grsec_enable_mount && (retval >= 0))
57599 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57600 +#endif
57601 + return;
57602 +}
57603 +
57604 +int
57605 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57606 +{
57607 +#ifdef CONFIG_GRKERNSEC_ROFS
57608 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57609 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57610 + return -EPERM;
57611 + } else
57612 + return 0;
57613 +#endif
57614 + return 0;
57615 +}
57616 +
57617 +int
57618 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57619 +{
57620 +#ifdef CONFIG_GRKERNSEC_ROFS
57621 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57622 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57623 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57624 + return -EPERM;
57625 + } else
57626 + return 0;
57627 +#endif
57628 + return 0;
57629 +}
57630 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57631 new file mode 100644
57632 index 0000000..a3b12a0
57633 --- /dev/null
57634 +++ b/grsecurity/grsec_pax.c
57635 @@ -0,0 +1,36 @@
57636 +#include <linux/kernel.h>
57637 +#include <linux/sched.h>
57638 +#include <linux/mm.h>
57639 +#include <linux/file.h>
57640 +#include <linux/grinternal.h>
57641 +#include <linux/grsecurity.h>
57642 +
57643 +void
57644 +gr_log_textrel(struct vm_area_struct * vma)
57645 +{
57646 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57647 + if (grsec_enable_audit_textrel)
57648 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57649 +#endif
57650 + return;
57651 +}
57652 +
57653 +void
57654 +gr_log_rwxmmap(struct file *file)
57655 +{
57656 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57657 + if (grsec_enable_log_rwxmaps)
57658 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57659 +#endif
57660 + return;
57661 +}
57662 +
57663 +void
57664 +gr_log_rwxmprotect(struct file *file)
57665 +{
57666 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57667 + if (grsec_enable_log_rwxmaps)
57668 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57669 +#endif
57670 + return;
57671 +}
57672 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57673 new file mode 100644
57674 index 0000000..f7f29aa
57675 --- /dev/null
57676 +++ b/grsecurity/grsec_ptrace.c
57677 @@ -0,0 +1,30 @@
57678 +#include <linux/kernel.h>
57679 +#include <linux/sched.h>
57680 +#include <linux/grinternal.h>
57681 +#include <linux/security.h>
57682 +
57683 +void
57684 +gr_audit_ptrace(struct task_struct *task)
57685 +{
57686 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57687 + if (grsec_enable_audit_ptrace)
57688 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57689 +#endif
57690 + return;
57691 +}
57692 +
57693 +int
57694 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
57695 +{
57696 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57697 + const struct dentry *dentry = file->f_path.dentry;
57698 + const struct vfsmount *mnt = file->f_path.mnt;
57699 +
57700 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57701 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57702 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57703 + return -EACCES;
57704 + }
57705 +#endif
57706 + return 0;
57707 +}
57708 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57709 new file mode 100644
57710 index 0000000..7a5b2de
57711 --- /dev/null
57712 +++ b/grsecurity/grsec_sig.c
57713 @@ -0,0 +1,207 @@
57714 +#include <linux/kernel.h>
57715 +#include <linux/sched.h>
57716 +#include <linux/delay.h>
57717 +#include <linux/grsecurity.h>
57718 +#include <linux/grinternal.h>
57719 +#include <linux/hardirq.h>
57720 +
57721 +char *signames[] = {
57722 + [SIGSEGV] = "Segmentation fault",
57723 + [SIGILL] = "Illegal instruction",
57724 + [SIGABRT] = "Abort",
57725 + [SIGBUS] = "Invalid alignment/Bus error"
57726 +};
57727 +
57728 +void
57729 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57730 +{
57731 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57732 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57733 + (sig == SIGABRT) || (sig == SIGBUS))) {
57734 + if (t->pid == current->pid) {
57735 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57736 + } else {
57737 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57738 + }
57739 + }
57740 +#endif
57741 + return;
57742 +}
57743 +
57744 +int
57745 +gr_handle_signal(const struct task_struct *p, const int sig)
57746 +{
57747 +#ifdef CONFIG_GRKERNSEC
57748 + /* ignore the 0 signal for protected task checks */
57749 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57750 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57751 + return -EPERM;
57752 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57753 + return -EPERM;
57754 + }
57755 +#endif
57756 + return 0;
57757 +}
57758 +
57759 +#ifdef CONFIG_GRKERNSEC
57760 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57761 +
57762 +int gr_fake_force_sig(int sig, struct task_struct *t)
57763 +{
57764 + unsigned long int flags;
57765 + int ret, blocked, ignored;
57766 + struct k_sigaction *action;
57767 +
57768 + spin_lock_irqsave(&t->sighand->siglock, flags);
57769 + action = &t->sighand->action[sig-1];
57770 + ignored = action->sa.sa_handler == SIG_IGN;
57771 + blocked = sigismember(&t->blocked, sig);
57772 + if (blocked || ignored) {
57773 + action->sa.sa_handler = SIG_DFL;
57774 + if (blocked) {
57775 + sigdelset(&t->blocked, sig);
57776 + recalc_sigpending_and_wake(t);
57777 + }
57778 + }
57779 + if (action->sa.sa_handler == SIG_DFL)
57780 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
57781 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57782 +
57783 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
57784 +
57785 + return ret;
57786 +}
57787 +#endif
57788 +
57789 +#ifdef CONFIG_GRKERNSEC_BRUTE
57790 +#define GR_USER_BAN_TIME (15 * 60)
57791 +
57792 +static int __get_dumpable(unsigned long mm_flags)
57793 +{
57794 + int ret;
57795 +
57796 + ret = mm_flags & MMF_DUMPABLE_MASK;
57797 + return (ret >= 2) ? 2 : ret;
57798 +}
57799 +#endif
57800 +
57801 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57802 +{
57803 +#ifdef CONFIG_GRKERNSEC_BRUTE
57804 + uid_t uid = 0;
57805 +
57806 + if (!grsec_enable_brute)
57807 + return;
57808 +
57809 + rcu_read_lock();
57810 + read_lock(&tasklist_lock);
57811 + read_lock(&grsec_exec_file_lock);
57812 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57813 + p->real_parent->brute = 1;
57814 + else {
57815 + const struct cred *cred = __task_cred(p), *cred2;
57816 + struct task_struct *tsk, *tsk2;
57817 +
57818 + if (!__get_dumpable(mm_flags) && cred->uid) {
57819 + struct user_struct *user;
57820 +
57821 + uid = cred->uid;
57822 +
57823 + /* this is put upon execution past expiration */
57824 + user = find_user(uid);
57825 + if (user == NULL)
57826 + goto unlock;
57827 + user->banned = 1;
57828 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57829 + if (user->ban_expires == ~0UL)
57830 + user->ban_expires--;
57831 +
57832 + do_each_thread(tsk2, tsk) {
57833 + cred2 = __task_cred(tsk);
57834 + if (tsk != p && cred2->uid == uid)
57835 + gr_fake_force_sig(SIGKILL, tsk);
57836 + } while_each_thread(tsk2, tsk);
57837 + }
57838 + }
57839 +unlock:
57840 + read_unlock(&grsec_exec_file_lock);
57841 + read_unlock(&tasklist_lock);
57842 + rcu_read_unlock();
57843 +
57844 + if (uid)
57845 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57846 +
57847 +#endif
57848 + return;
57849 +}
57850 +
57851 +void gr_handle_brute_check(void)
57852 +{
57853 +#ifdef CONFIG_GRKERNSEC_BRUTE
57854 + if (current->brute)
57855 + msleep(30 * 1000);
57856 +#endif
57857 + return;
57858 +}
57859 +
57860 +void gr_handle_kernel_exploit(void)
57861 +{
57862 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
57863 + const struct cred *cred;
57864 + struct task_struct *tsk, *tsk2;
57865 + struct user_struct *user;
57866 + uid_t uid;
57867 +
57868 + if (in_irq() || in_serving_softirq() || in_nmi())
57869 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
57870 +
57871 + uid = current_uid();
57872 +
57873 + if (uid == 0)
57874 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
57875 + else {
57876 + /* kill all the processes of this user, hold a reference
57877 + to their creds struct, and prevent them from creating
57878 + another process until system reset
57879 + */
57880 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
57881 + /* we intentionally leak this ref */
57882 + user = get_uid(current->cred->user);
57883 + if (user) {
57884 + user->banned = 1;
57885 + user->ban_expires = ~0UL;
57886 + }
57887 +
57888 + read_lock(&tasklist_lock);
57889 + do_each_thread(tsk2, tsk) {
57890 + cred = __task_cred(tsk);
57891 + if (cred->uid == uid)
57892 + gr_fake_force_sig(SIGKILL, tsk);
57893 + } while_each_thread(tsk2, tsk);
57894 + read_unlock(&tasklist_lock);
57895 + }
57896 +#endif
57897 +}
57898 +
57899 +int __gr_process_user_ban(struct user_struct *user)
57900 +{
57901 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57902 + if (unlikely(user->banned)) {
57903 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
57904 + user->banned = 0;
57905 + user->ban_expires = 0;
57906 + free_uid(user);
57907 + } else
57908 + return -EPERM;
57909 + }
57910 +#endif
57911 + return 0;
57912 +}
57913 +
57914 +int gr_process_user_ban(void)
57915 +{
57916 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57917 + return __gr_process_user_ban(current->cred->user);
57918 +#endif
57919 + return 0;
57920 +}
57921 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
57922 new file mode 100644
57923 index 0000000..4030d57
57924 --- /dev/null
57925 +++ b/grsecurity/grsec_sock.c
57926 @@ -0,0 +1,244 @@
57927 +#include <linux/kernel.h>
57928 +#include <linux/module.h>
57929 +#include <linux/sched.h>
57930 +#include <linux/file.h>
57931 +#include <linux/net.h>
57932 +#include <linux/in.h>
57933 +#include <linux/ip.h>
57934 +#include <net/sock.h>
57935 +#include <net/inet_sock.h>
57936 +#include <linux/grsecurity.h>
57937 +#include <linux/grinternal.h>
57938 +#include <linux/gracl.h>
57939 +
57940 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
57941 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
57942 +
57943 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
57944 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
57945 +
57946 +#ifdef CONFIG_UNIX_MODULE
57947 +EXPORT_SYMBOL(gr_acl_handle_unix);
57948 +EXPORT_SYMBOL(gr_acl_handle_mknod);
57949 +EXPORT_SYMBOL(gr_handle_chroot_unix);
57950 +EXPORT_SYMBOL(gr_handle_create);
57951 +#endif
57952 +
57953 +#ifdef CONFIG_GRKERNSEC
57954 +#define gr_conn_table_size 32749
57955 +struct conn_table_entry {
57956 + struct conn_table_entry *next;
57957 + struct signal_struct *sig;
57958 +};
57959 +
57960 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
57961 +DEFINE_SPINLOCK(gr_conn_table_lock);
57962 +
57963 +extern const char * gr_socktype_to_name(unsigned char type);
57964 +extern const char * gr_proto_to_name(unsigned char proto);
57965 +extern const char * gr_sockfamily_to_name(unsigned char family);
57966 +
57967 +static __inline__ int
57968 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
57969 +{
57970 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
57971 +}
57972 +
57973 +static __inline__ int
57974 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
57975 + __u16 sport, __u16 dport)
57976 +{
57977 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
57978 + sig->gr_sport == sport && sig->gr_dport == dport))
57979 + return 1;
57980 + else
57981 + return 0;
57982 +}
57983 +
57984 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
57985 +{
57986 + struct conn_table_entry **match;
57987 + unsigned int index;
57988 +
57989 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57990 + sig->gr_sport, sig->gr_dport,
57991 + gr_conn_table_size);
57992 +
57993 + newent->sig = sig;
57994 +
57995 + match = &gr_conn_table[index];
57996 + newent->next = *match;
57997 + *match = newent;
57998 +
57999 + return;
58000 +}
58001 +
58002 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58003 +{
58004 + struct conn_table_entry *match, *last = NULL;
58005 + unsigned int index;
58006 +
58007 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58008 + sig->gr_sport, sig->gr_dport,
58009 + gr_conn_table_size);
58010 +
58011 + match = gr_conn_table[index];
58012 + while (match && !conn_match(match->sig,
58013 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58014 + sig->gr_dport)) {
58015 + last = match;
58016 + match = match->next;
58017 + }
58018 +
58019 + if (match) {
58020 + if (last)
58021 + last->next = match->next;
58022 + else
58023 + gr_conn_table[index] = NULL;
58024 + kfree(match);
58025 + }
58026 +
58027 + return;
58028 +}
58029 +
58030 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58031 + __u16 sport, __u16 dport)
58032 +{
58033 + struct conn_table_entry *match;
58034 + unsigned int index;
58035 +
58036 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58037 +
58038 + match = gr_conn_table[index];
58039 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58040 + match = match->next;
58041 +
58042 + if (match)
58043 + return match->sig;
58044 + else
58045 + return NULL;
58046 +}
58047 +
58048 +#endif
58049 +
58050 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58051 +{
58052 +#ifdef CONFIG_GRKERNSEC
58053 + struct signal_struct *sig = task->signal;
58054 + struct conn_table_entry *newent;
58055 +
58056 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58057 + if (newent == NULL)
58058 + return;
58059 + /* no bh lock needed since we are called with bh disabled */
58060 + spin_lock(&gr_conn_table_lock);
58061 + gr_del_task_from_ip_table_nolock(sig);
58062 + sig->gr_saddr = inet->inet_rcv_saddr;
58063 + sig->gr_daddr = inet->inet_daddr;
58064 + sig->gr_sport = inet->inet_sport;
58065 + sig->gr_dport = inet->inet_dport;
58066 + gr_add_to_task_ip_table_nolock(sig, newent);
58067 + spin_unlock(&gr_conn_table_lock);
58068 +#endif
58069 + return;
58070 +}
58071 +
58072 +void gr_del_task_from_ip_table(struct task_struct *task)
58073 +{
58074 +#ifdef CONFIG_GRKERNSEC
58075 + spin_lock_bh(&gr_conn_table_lock);
58076 + gr_del_task_from_ip_table_nolock(task->signal);
58077 + spin_unlock_bh(&gr_conn_table_lock);
58078 +#endif
58079 + return;
58080 +}
58081 +
58082 +void
58083 +gr_attach_curr_ip(const struct sock *sk)
58084 +{
58085 +#ifdef CONFIG_GRKERNSEC
58086 + struct signal_struct *p, *set;
58087 + const struct inet_sock *inet = inet_sk(sk);
58088 +
58089 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58090 + return;
58091 +
58092 + set = current->signal;
58093 +
58094 + spin_lock_bh(&gr_conn_table_lock);
58095 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58096 + inet->inet_dport, inet->inet_sport);
58097 + if (unlikely(p != NULL)) {
58098 + set->curr_ip = p->curr_ip;
58099 + set->used_accept = 1;
58100 + gr_del_task_from_ip_table_nolock(p);
58101 + spin_unlock_bh(&gr_conn_table_lock);
58102 + return;
58103 + }
58104 + spin_unlock_bh(&gr_conn_table_lock);
58105 +
58106 + set->curr_ip = inet->inet_daddr;
58107 + set->used_accept = 1;
58108 +#endif
58109 + return;
58110 +}
58111 +
58112 +int
58113 +gr_handle_sock_all(const int family, const int type, const int protocol)
58114 +{
58115 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58116 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58117 + (family != AF_UNIX)) {
58118 + if (family == AF_INET)
58119 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58120 + else
58121 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58122 + return -EACCES;
58123 + }
58124 +#endif
58125 + return 0;
58126 +}
58127 +
58128 +int
58129 +gr_handle_sock_server(const struct sockaddr *sck)
58130 +{
58131 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58132 + if (grsec_enable_socket_server &&
58133 + in_group_p(grsec_socket_server_gid) &&
58134 + sck && (sck->sa_family != AF_UNIX) &&
58135 + (sck->sa_family != AF_LOCAL)) {
58136 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58137 + return -EACCES;
58138 + }
58139 +#endif
58140 + return 0;
58141 +}
58142 +
58143 +int
58144 +gr_handle_sock_server_other(const struct sock *sck)
58145 +{
58146 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58147 + if (grsec_enable_socket_server &&
58148 + in_group_p(grsec_socket_server_gid) &&
58149 + sck && (sck->sk_family != AF_UNIX) &&
58150 + (sck->sk_family != AF_LOCAL)) {
58151 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58152 + return -EACCES;
58153 + }
58154 +#endif
58155 + return 0;
58156 +}
58157 +
58158 +int
58159 +gr_handle_sock_client(const struct sockaddr *sck)
58160 +{
58161 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58162 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58163 + sck && (sck->sa_family != AF_UNIX) &&
58164 + (sck->sa_family != AF_LOCAL)) {
58165 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58166 + return -EACCES;
58167 + }
58168 +#endif
58169 + return 0;
58170 +}
58171 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58172 new file mode 100644
58173 index 0000000..8316f6f
58174 --- /dev/null
58175 +++ b/grsecurity/grsec_sysctl.c
58176 @@ -0,0 +1,453 @@
58177 +#include <linux/kernel.h>
58178 +#include <linux/sched.h>
58179 +#include <linux/sysctl.h>
58180 +#include <linux/grsecurity.h>
58181 +#include <linux/grinternal.h>
58182 +
58183 +int
58184 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58185 +{
58186 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58187 + if (dirname == NULL || name == NULL)
58188 + return 0;
58189 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58190 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58191 + return -EACCES;
58192 + }
58193 +#endif
58194 + return 0;
58195 +}
58196 +
58197 +#ifdef CONFIG_GRKERNSEC_ROFS
58198 +static int __maybe_unused one = 1;
58199 +#endif
58200 +
58201 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58202 +struct ctl_table grsecurity_table[] = {
58203 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58204 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58205 +#ifdef CONFIG_GRKERNSEC_IO
58206 + {
58207 + .procname = "disable_priv_io",
58208 + .data = &grsec_disable_privio,
58209 + .maxlen = sizeof(int),
58210 + .mode = 0600,
58211 + .proc_handler = &proc_dointvec,
58212 + },
58213 +#endif
58214 +#endif
58215 +#ifdef CONFIG_GRKERNSEC_LINK
58216 + {
58217 + .procname = "linking_restrictions",
58218 + .data = &grsec_enable_link,
58219 + .maxlen = sizeof(int),
58220 + .mode = 0600,
58221 + .proc_handler = &proc_dointvec,
58222 + },
58223 +#endif
58224 +#ifdef CONFIG_GRKERNSEC_BRUTE
58225 + {
58226 + .procname = "deter_bruteforce",
58227 + .data = &grsec_enable_brute,
58228 + .maxlen = sizeof(int),
58229 + .mode = 0600,
58230 + .proc_handler = &proc_dointvec,
58231 + },
58232 +#endif
58233 +#ifdef CONFIG_GRKERNSEC_FIFO
58234 + {
58235 + .procname = "fifo_restrictions",
58236 + .data = &grsec_enable_fifo,
58237 + .maxlen = sizeof(int),
58238 + .mode = 0600,
58239 + .proc_handler = &proc_dointvec,
58240 + },
58241 +#endif
58242 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58243 + {
58244 + .procname = "ptrace_readexec",
58245 + .data = &grsec_enable_ptrace_readexec,
58246 + .maxlen = sizeof(int),
58247 + .mode = 0600,
58248 + .proc_handler = &proc_dointvec,
58249 + },
58250 +#endif
58251 +#ifdef CONFIG_GRKERNSEC_SETXID
58252 + {
58253 + .procname = "consistent_setxid",
58254 + .data = &grsec_enable_setxid,
58255 + .maxlen = sizeof(int),
58256 + .mode = 0600,
58257 + .proc_handler = &proc_dointvec,
58258 + },
58259 +#endif
58260 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58261 + {
58262 + .procname = "ip_blackhole",
58263 + .data = &grsec_enable_blackhole,
58264 + .maxlen = sizeof(int),
58265 + .mode = 0600,
58266 + .proc_handler = &proc_dointvec,
58267 + },
58268 + {
58269 + .procname = "lastack_retries",
58270 + .data = &grsec_lastack_retries,
58271 + .maxlen = sizeof(int),
58272 + .mode = 0600,
58273 + .proc_handler = &proc_dointvec,
58274 + },
58275 +#endif
58276 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58277 + {
58278 + .procname = "exec_logging",
58279 + .data = &grsec_enable_execlog,
58280 + .maxlen = sizeof(int),
58281 + .mode = 0600,
58282 + .proc_handler = &proc_dointvec,
58283 + },
58284 +#endif
58285 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58286 + {
58287 + .procname = "rwxmap_logging",
58288 + .data = &grsec_enable_log_rwxmaps,
58289 + .maxlen = sizeof(int),
58290 + .mode = 0600,
58291 + .proc_handler = &proc_dointvec,
58292 + },
58293 +#endif
58294 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58295 + {
58296 + .procname = "signal_logging",
58297 + .data = &grsec_enable_signal,
58298 + .maxlen = sizeof(int),
58299 + .mode = 0600,
58300 + .proc_handler = &proc_dointvec,
58301 + },
58302 +#endif
58303 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58304 + {
58305 + .procname = "forkfail_logging",
58306 + .data = &grsec_enable_forkfail,
58307 + .maxlen = sizeof(int),
58308 + .mode = 0600,
58309 + .proc_handler = &proc_dointvec,
58310 + },
58311 +#endif
58312 +#ifdef CONFIG_GRKERNSEC_TIME
58313 + {
58314 + .procname = "timechange_logging",
58315 + .data = &grsec_enable_time,
58316 + .maxlen = sizeof(int),
58317 + .mode = 0600,
58318 + .proc_handler = &proc_dointvec,
58319 + },
58320 +#endif
58321 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58322 + {
58323 + .procname = "chroot_deny_shmat",
58324 + .data = &grsec_enable_chroot_shmat,
58325 + .maxlen = sizeof(int),
58326 + .mode = 0600,
58327 + .proc_handler = &proc_dointvec,
58328 + },
58329 +#endif
58330 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58331 + {
58332 + .procname = "chroot_deny_unix",
58333 + .data = &grsec_enable_chroot_unix,
58334 + .maxlen = sizeof(int),
58335 + .mode = 0600,
58336 + .proc_handler = &proc_dointvec,
58337 + },
58338 +#endif
58339 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58340 + {
58341 + .procname = "chroot_deny_mount",
58342 + .data = &grsec_enable_chroot_mount,
58343 + .maxlen = sizeof(int),
58344 + .mode = 0600,
58345 + .proc_handler = &proc_dointvec,
58346 + },
58347 +#endif
58348 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58349 + {
58350 + .procname = "chroot_deny_fchdir",
58351 + .data = &grsec_enable_chroot_fchdir,
58352 + .maxlen = sizeof(int),
58353 + .mode = 0600,
58354 + .proc_handler = &proc_dointvec,
58355 + },
58356 +#endif
58357 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58358 + {
58359 + .procname = "chroot_deny_chroot",
58360 + .data = &grsec_enable_chroot_double,
58361 + .maxlen = sizeof(int),
58362 + .mode = 0600,
58363 + .proc_handler = &proc_dointvec,
58364 + },
58365 +#endif
58366 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58367 + {
58368 + .procname = "chroot_deny_pivot",
58369 + .data = &grsec_enable_chroot_pivot,
58370 + .maxlen = sizeof(int),
58371 + .mode = 0600,
58372 + .proc_handler = &proc_dointvec,
58373 + },
58374 +#endif
58375 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58376 + {
58377 + .procname = "chroot_enforce_chdir",
58378 + .data = &grsec_enable_chroot_chdir,
58379 + .maxlen = sizeof(int),
58380 + .mode = 0600,
58381 + .proc_handler = &proc_dointvec,
58382 + },
58383 +#endif
58384 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58385 + {
58386 + .procname = "chroot_deny_chmod",
58387 + .data = &grsec_enable_chroot_chmod,
58388 + .maxlen = sizeof(int),
58389 + .mode = 0600,
58390 + .proc_handler = &proc_dointvec,
58391 + },
58392 +#endif
58393 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58394 + {
58395 + .procname = "chroot_deny_mknod",
58396 + .data = &grsec_enable_chroot_mknod,
58397 + .maxlen = sizeof(int),
58398 + .mode = 0600,
58399 + .proc_handler = &proc_dointvec,
58400 + },
58401 +#endif
58402 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58403 + {
58404 + .procname = "chroot_restrict_nice",
58405 + .data = &grsec_enable_chroot_nice,
58406 + .maxlen = sizeof(int),
58407 + .mode = 0600,
58408 + .proc_handler = &proc_dointvec,
58409 + },
58410 +#endif
58411 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58412 + {
58413 + .procname = "chroot_execlog",
58414 + .data = &grsec_enable_chroot_execlog,
58415 + .maxlen = sizeof(int),
58416 + .mode = 0600,
58417 + .proc_handler = &proc_dointvec,
58418 + },
58419 +#endif
58420 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58421 + {
58422 + .procname = "chroot_caps",
58423 + .data = &grsec_enable_chroot_caps,
58424 + .maxlen = sizeof(int),
58425 + .mode = 0600,
58426 + .proc_handler = &proc_dointvec,
58427 + },
58428 +#endif
58429 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58430 + {
58431 + .procname = "chroot_deny_sysctl",
58432 + .data = &grsec_enable_chroot_sysctl,
58433 + .maxlen = sizeof(int),
58434 + .mode = 0600,
58435 + .proc_handler = &proc_dointvec,
58436 + },
58437 +#endif
58438 +#ifdef CONFIG_GRKERNSEC_TPE
58439 + {
58440 + .procname = "tpe",
58441 + .data = &grsec_enable_tpe,
58442 + .maxlen = sizeof(int),
58443 + .mode = 0600,
58444 + .proc_handler = &proc_dointvec,
58445 + },
58446 + {
58447 + .procname = "tpe_gid",
58448 + .data = &grsec_tpe_gid,
58449 + .maxlen = sizeof(int),
58450 + .mode = 0600,
58451 + .proc_handler = &proc_dointvec,
58452 + },
58453 +#endif
58454 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58455 + {
58456 + .procname = "tpe_invert",
58457 + .data = &grsec_enable_tpe_invert,
58458 + .maxlen = sizeof(int),
58459 + .mode = 0600,
58460 + .proc_handler = &proc_dointvec,
58461 + },
58462 +#endif
58463 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58464 + {
58465 + .procname = "tpe_restrict_all",
58466 + .data = &grsec_enable_tpe_all,
58467 + .maxlen = sizeof(int),
58468 + .mode = 0600,
58469 + .proc_handler = &proc_dointvec,
58470 + },
58471 +#endif
58472 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58473 + {
58474 + .procname = "socket_all",
58475 + .data = &grsec_enable_socket_all,
58476 + .maxlen = sizeof(int),
58477 + .mode = 0600,
58478 + .proc_handler = &proc_dointvec,
58479 + },
58480 + {
58481 + .procname = "socket_all_gid",
58482 + .data = &grsec_socket_all_gid,
58483 + .maxlen = sizeof(int),
58484 + .mode = 0600,
58485 + .proc_handler = &proc_dointvec,
58486 + },
58487 +#endif
58488 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58489 + {
58490 + .procname = "socket_client",
58491 + .data = &grsec_enable_socket_client,
58492 + .maxlen = sizeof(int),
58493 + .mode = 0600,
58494 + .proc_handler = &proc_dointvec,
58495 + },
58496 + {
58497 + .procname = "socket_client_gid",
58498 + .data = &grsec_socket_client_gid,
58499 + .maxlen = sizeof(int),
58500 + .mode = 0600,
58501 + .proc_handler = &proc_dointvec,
58502 + },
58503 +#endif
58504 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58505 + {
58506 + .procname = "socket_server",
58507 + .data = &grsec_enable_socket_server,
58508 + .maxlen = sizeof(int),
58509 + .mode = 0600,
58510 + .proc_handler = &proc_dointvec,
58511 + },
58512 + {
58513 + .procname = "socket_server_gid",
58514 + .data = &grsec_socket_server_gid,
58515 + .maxlen = sizeof(int),
58516 + .mode = 0600,
58517 + .proc_handler = &proc_dointvec,
58518 + },
58519 +#endif
58520 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58521 + {
58522 + .procname = "audit_group",
58523 + .data = &grsec_enable_group,
58524 + .maxlen = sizeof(int),
58525 + .mode = 0600,
58526 + .proc_handler = &proc_dointvec,
58527 + },
58528 + {
58529 + .procname = "audit_gid",
58530 + .data = &grsec_audit_gid,
58531 + .maxlen = sizeof(int),
58532 + .mode = 0600,
58533 + .proc_handler = &proc_dointvec,
58534 + },
58535 +#endif
58536 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58537 + {
58538 + .procname = "audit_chdir",
58539 + .data = &grsec_enable_chdir,
58540 + .maxlen = sizeof(int),
58541 + .mode = 0600,
58542 + .proc_handler = &proc_dointvec,
58543 + },
58544 +#endif
58545 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58546 + {
58547 + .procname = "audit_mount",
58548 + .data = &grsec_enable_mount,
58549 + .maxlen = sizeof(int),
58550 + .mode = 0600,
58551 + .proc_handler = &proc_dointvec,
58552 + },
58553 +#endif
58554 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58555 + {
58556 + .procname = "audit_textrel",
58557 + .data = &grsec_enable_audit_textrel,
58558 + .maxlen = sizeof(int),
58559 + .mode = 0600,
58560 + .proc_handler = &proc_dointvec,
58561 + },
58562 +#endif
58563 +#ifdef CONFIG_GRKERNSEC_DMESG
58564 + {
58565 + .procname = "dmesg",
58566 + .data = &grsec_enable_dmesg,
58567 + .maxlen = sizeof(int),
58568 + .mode = 0600,
58569 + .proc_handler = &proc_dointvec,
58570 + },
58571 +#endif
58572 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58573 + {
58574 + .procname = "chroot_findtask",
58575 + .data = &grsec_enable_chroot_findtask,
58576 + .maxlen = sizeof(int),
58577 + .mode = 0600,
58578 + .proc_handler = &proc_dointvec,
58579 + },
58580 +#endif
58581 +#ifdef CONFIG_GRKERNSEC_RESLOG
58582 + {
58583 + .procname = "resource_logging",
58584 + .data = &grsec_resource_logging,
58585 + .maxlen = sizeof(int),
58586 + .mode = 0600,
58587 + .proc_handler = &proc_dointvec,
58588 + },
58589 +#endif
58590 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58591 + {
58592 + .procname = "audit_ptrace",
58593 + .data = &grsec_enable_audit_ptrace,
58594 + .maxlen = sizeof(int),
58595 + .mode = 0600,
58596 + .proc_handler = &proc_dointvec,
58597 + },
58598 +#endif
58599 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58600 + {
58601 + .procname = "harden_ptrace",
58602 + .data = &grsec_enable_harden_ptrace,
58603 + .maxlen = sizeof(int),
58604 + .mode = 0600,
58605 + .proc_handler = &proc_dointvec,
58606 + },
58607 +#endif
58608 + {
58609 + .procname = "grsec_lock",
58610 + .data = &grsec_lock,
58611 + .maxlen = sizeof(int),
58612 + .mode = 0600,
58613 + .proc_handler = &proc_dointvec,
58614 + },
58615 +#endif
58616 +#ifdef CONFIG_GRKERNSEC_ROFS
58617 + {
58618 + .procname = "romount_protect",
58619 + .data = &grsec_enable_rofs,
58620 + .maxlen = sizeof(int),
58621 + .mode = 0600,
58622 + .proc_handler = &proc_dointvec_minmax,
58623 + .extra1 = &one,
58624 + .extra2 = &one,
58625 + },
58626 +#endif
58627 + { }
58628 +};
58629 +#endif
58630 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58631 new file mode 100644
58632 index 0000000..0dc13c3
58633 --- /dev/null
58634 +++ b/grsecurity/grsec_time.c
58635 @@ -0,0 +1,16 @@
58636 +#include <linux/kernel.h>
58637 +#include <linux/sched.h>
58638 +#include <linux/grinternal.h>
58639 +#include <linux/module.h>
58640 +
58641 +void
58642 +gr_log_timechange(void)
58643 +{
58644 +#ifdef CONFIG_GRKERNSEC_TIME
58645 + if (grsec_enable_time)
58646 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58647 +#endif
58648 + return;
58649 +}
58650 +
58651 +EXPORT_SYMBOL(gr_log_timechange);
58652 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58653 new file mode 100644
58654 index 0000000..07e0dc0
58655 --- /dev/null
58656 +++ b/grsecurity/grsec_tpe.c
58657 @@ -0,0 +1,73 @@
58658 +#include <linux/kernel.h>
58659 +#include <linux/sched.h>
58660 +#include <linux/file.h>
58661 +#include <linux/fs.h>
58662 +#include <linux/grinternal.h>
58663 +
58664 +extern int gr_acl_tpe_check(void);
58665 +
58666 +int
58667 +gr_tpe_allow(const struct file *file)
58668 +{
58669 +#ifdef CONFIG_GRKERNSEC
58670 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58671 + const struct cred *cred = current_cred();
58672 + char *msg = NULL;
58673 + char *msg2 = NULL;
58674 +
58675 + // never restrict root
58676 + if (!cred->uid)
58677 + return 1;
58678 +
58679 + if (grsec_enable_tpe) {
58680 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58681 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58682 + msg = "not being in trusted group";
58683 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58684 + msg = "being in untrusted group";
58685 +#else
58686 + if (in_group_p(grsec_tpe_gid))
58687 + msg = "being in untrusted group";
58688 +#endif
58689 + }
58690 + if (!msg && gr_acl_tpe_check())
58691 + msg = "being in untrusted role";
58692 +
58693 + // not in any affected group/role
58694 + if (!msg)
58695 + goto next_check;
58696 +
58697 + if (inode->i_uid)
58698 + msg2 = "file in non-root-owned directory";
58699 + else if (inode->i_mode & S_IWOTH)
58700 + msg2 = "file in world-writable directory";
58701 + else if (inode->i_mode & S_IWGRP)
58702 + msg2 = "file in group-writable directory";
58703 +
58704 + if (msg && msg2) {
58705 + char fullmsg[70] = {0};
58706 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58707 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58708 + return 0;
58709 + }
58710 + msg = NULL;
58711 +next_check:
58712 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58713 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58714 + return 1;
58715 +
58716 + if (inode->i_uid && (inode->i_uid != cred->uid))
58717 + msg = "directory not owned by user";
58718 + else if (inode->i_mode & S_IWOTH)
58719 + msg = "file in world-writable directory";
58720 + else if (inode->i_mode & S_IWGRP)
58721 + msg = "file in group-writable directory";
58722 +
58723 + if (msg) {
58724 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58725 + return 0;
58726 + }
58727 +#endif
58728 +#endif
58729 + return 1;
58730 +}
58731 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58732 new file mode 100644
58733 index 0000000..9f7b1ac
58734 --- /dev/null
58735 +++ b/grsecurity/grsum.c
58736 @@ -0,0 +1,61 @@
58737 +#include <linux/err.h>
58738 +#include <linux/kernel.h>
58739 +#include <linux/sched.h>
58740 +#include <linux/mm.h>
58741 +#include <linux/scatterlist.h>
58742 +#include <linux/crypto.h>
58743 +#include <linux/gracl.h>
58744 +
58745 +
58746 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58747 +#error "crypto and sha256 must be built into the kernel"
58748 +#endif
58749 +
58750 +int
58751 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58752 +{
58753 + char *p;
58754 + struct crypto_hash *tfm;
58755 + struct hash_desc desc;
58756 + struct scatterlist sg;
58757 + unsigned char temp_sum[GR_SHA_LEN];
58758 + volatile int retval = 0;
58759 + volatile int dummy = 0;
58760 + unsigned int i;
58761 +
58762 + sg_init_table(&sg, 1);
58763 +
58764 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58765 + if (IS_ERR(tfm)) {
58766 + /* should never happen, since sha256 should be built in */
58767 + return 1;
58768 + }
58769 +
58770 + desc.tfm = tfm;
58771 + desc.flags = 0;
58772 +
58773 + crypto_hash_init(&desc);
58774 +
58775 + p = salt;
58776 + sg_set_buf(&sg, p, GR_SALT_LEN);
58777 + crypto_hash_update(&desc, &sg, sg.length);
58778 +
58779 + p = entry->pw;
58780 + sg_set_buf(&sg, p, strlen(p));
58781 +
58782 + crypto_hash_update(&desc, &sg, sg.length);
58783 +
58784 + crypto_hash_final(&desc, temp_sum);
58785 +
58786 + memset(entry->pw, 0, GR_PW_LEN);
58787 +
58788 + for (i = 0; i < GR_SHA_LEN; i++)
58789 + if (sum[i] != temp_sum[i])
58790 + retval = 1;
58791 + else
58792 + dummy = 1; // waste a cycle
58793 +
58794 + crypto_free_hash(tfm);
58795 +
58796 + return retval;
58797 +}
58798 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58799 index f1c8ca6..b5c1cc7 100644
58800 --- a/include/acpi/acpi_bus.h
58801 +++ b/include/acpi/acpi_bus.h
58802 @@ -107,7 +107,7 @@ struct acpi_device_ops {
58803 acpi_op_bind bind;
58804 acpi_op_unbind unbind;
58805 acpi_op_notify notify;
58806 -};
58807 +} __no_const;
58808
58809 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58810
58811 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58812 index b7babf0..71e4e74 100644
58813 --- a/include/asm-generic/atomic-long.h
58814 +++ b/include/asm-generic/atomic-long.h
58815 @@ -22,6 +22,12 @@
58816
58817 typedef atomic64_t atomic_long_t;
58818
58819 +#ifdef CONFIG_PAX_REFCOUNT
58820 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
58821 +#else
58822 +typedef atomic64_t atomic_long_unchecked_t;
58823 +#endif
58824 +
58825 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58826
58827 static inline long atomic_long_read(atomic_long_t *l)
58828 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58829 return (long)atomic64_read(v);
58830 }
58831
58832 +#ifdef CONFIG_PAX_REFCOUNT
58833 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58834 +{
58835 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58836 +
58837 + return (long)atomic64_read_unchecked(v);
58838 +}
58839 +#endif
58840 +
58841 static inline void atomic_long_set(atomic_long_t *l, long i)
58842 {
58843 atomic64_t *v = (atomic64_t *)l;
58844 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58845 atomic64_set(v, i);
58846 }
58847
58848 +#ifdef CONFIG_PAX_REFCOUNT
58849 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58850 +{
58851 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58852 +
58853 + atomic64_set_unchecked(v, i);
58854 +}
58855 +#endif
58856 +
58857 static inline void atomic_long_inc(atomic_long_t *l)
58858 {
58859 atomic64_t *v = (atomic64_t *)l;
58860 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58861 atomic64_inc(v);
58862 }
58863
58864 +#ifdef CONFIG_PAX_REFCOUNT
58865 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58866 +{
58867 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58868 +
58869 + atomic64_inc_unchecked(v);
58870 +}
58871 +#endif
58872 +
58873 static inline void atomic_long_dec(atomic_long_t *l)
58874 {
58875 atomic64_t *v = (atomic64_t *)l;
58876 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58877 atomic64_dec(v);
58878 }
58879
58880 +#ifdef CONFIG_PAX_REFCOUNT
58881 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58882 +{
58883 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58884 +
58885 + atomic64_dec_unchecked(v);
58886 +}
58887 +#endif
58888 +
58889 static inline void atomic_long_add(long i, atomic_long_t *l)
58890 {
58891 atomic64_t *v = (atomic64_t *)l;
58892 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58893 atomic64_add(i, v);
58894 }
58895
58896 +#ifdef CONFIG_PAX_REFCOUNT
58897 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58898 +{
58899 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58900 +
58901 + atomic64_add_unchecked(i, v);
58902 +}
58903 +#endif
58904 +
58905 static inline void atomic_long_sub(long i, atomic_long_t *l)
58906 {
58907 atomic64_t *v = (atomic64_t *)l;
58908 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58909 atomic64_sub(i, v);
58910 }
58911
58912 +#ifdef CONFIG_PAX_REFCOUNT
58913 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58914 +{
58915 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58916 +
58917 + atomic64_sub_unchecked(i, v);
58918 +}
58919 +#endif
58920 +
58921 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58922 {
58923 atomic64_t *v = (atomic64_t *)l;
58924 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58925 return (long)atomic64_inc_return(v);
58926 }
58927
58928 +#ifdef CONFIG_PAX_REFCOUNT
58929 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58930 +{
58931 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58932 +
58933 + return (long)atomic64_inc_return_unchecked(v);
58934 +}
58935 +#endif
58936 +
58937 static inline long atomic_long_dec_return(atomic_long_t *l)
58938 {
58939 atomic64_t *v = (atomic64_t *)l;
58940 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58941
58942 typedef atomic_t atomic_long_t;
58943
58944 +#ifdef CONFIG_PAX_REFCOUNT
58945 +typedef atomic_unchecked_t atomic_long_unchecked_t;
58946 +#else
58947 +typedef atomic_t atomic_long_unchecked_t;
58948 +#endif
58949 +
58950 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
58951 static inline long atomic_long_read(atomic_long_t *l)
58952 {
58953 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58954 return (long)atomic_read(v);
58955 }
58956
58957 +#ifdef CONFIG_PAX_REFCOUNT
58958 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58959 +{
58960 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58961 +
58962 + return (long)atomic_read_unchecked(v);
58963 +}
58964 +#endif
58965 +
58966 static inline void atomic_long_set(atomic_long_t *l, long i)
58967 {
58968 atomic_t *v = (atomic_t *)l;
58969 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58970 atomic_set(v, i);
58971 }
58972
58973 +#ifdef CONFIG_PAX_REFCOUNT
58974 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58975 +{
58976 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58977 +
58978 + atomic_set_unchecked(v, i);
58979 +}
58980 +#endif
58981 +
58982 static inline void atomic_long_inc(atomic_long_t *l)
58983 {
58984 atomic_t *v = (atomic_t *)l;
58985 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58986 atomic_inc(v);
58987 }
58988
58989 +#ifdef CONFIG_PAX_REFCOUNT
58990 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58991 +{
58992 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58993 +
58994 + atomic_inc_unchecked(v);
58995 +}
58996 +#endif
58997 +
58998 static inline void atomic_long_dec(atomic_long_t *l)
58999 {
59000 atomic_t *v = (atomic_t *)l;
59001 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59002 atomic_dec(v);
59003 }
59004
59005 +#ifdef CONFIG_PAX_REFCOUNT
59006 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59007 +{
59008 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59009 +
59010 + atomic_dec_unchecked(v);
59011 +}
59012 +#endif
59013 +
59014 static inline void atomic_long_add(long i, atomic_long_t *l)
59015 {
59016 atomic_t *v = (atomic_t *)l;
59017 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59018 atomic_add(i, v);
59019 }
59020
59021 +#ifdef CONFIG_PAX_REFCOUNT
59022 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59023 +{
59024 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59025 +
59026 + atomic_add_unchecked(i, v);
59027 +}
59028 +#endif
59029 +
59030 static inline void atomic_long_sub(long i, atomic_long_t *l)
59031 {
59032 atomic_t *v = (atomic_t *)l;
59033 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59034 atomic_sub(i, v);
59035 }
59036
59037 +#ifdef CONFIG_PAX_REFCOUNT
59038 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59039 +{
59040 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59041 +
59042 + atomic_sub_unchecked(i, v);
59043 +}
59044 +#endif
59045 +
59046 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59047 {
59048 atomic_t *v = (atomic_t *)l;
59049 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59050 return (long)atomic_inc_return(v);
59051 }
59052
59053 +#ifdef CONFIG_PAX_REFCOUNT
59054 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59055 +{
59056 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59057 +
59058 + return (long)atomic_inc_return_unchecked(v);
59059 +}
59060 +#endif
59061 +
59062 static inline long atomic_long_dec_return(atomic_long_t *l)
59063 {
59064 atomic_t *v = (atomic_t *)l;
59065 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59066
59067 #endif /* BITS_PER_LONG == 64 */
59068
59069 +#ifdef CONFIG_PAX_REFCOUNT
59070 +static inline void pax_refcount_needs_these_functions(void)
59071 +{
59072 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59073 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59074 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59075 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59076 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59077 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59078 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59079 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59080 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59081 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59082 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59083 +
59084 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59085 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59086 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59087 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59088 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59089 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59090 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59091 +}
59092 +#else
59093 +#define atomic_read_unchecked(v) atomic_read(v)
59094 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59095 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59096 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59097 +#define atomic_inc_unchecked(v) atomic_inc(v)
59098 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59099 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59100 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59101 +#define atomic_dec_unchecked(v) atomic_dec(v)
59102 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59103 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59104 +
59105 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59106 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59107 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59108 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59109 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59110 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59111 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59112 +#endif
59113 +
59114 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59115 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59116 index b18ce4f..2ee2843 100644
59117 --- a/include/asm-generic/atomic64.h
59118 +++ b/include/asm-generic/atomic64.h
59119 @@ -16,6 +16,8 @@ typedef struct {
59120 long long counter;
59121 } atomic64_t;
59122
59123 +typedef atomic64_t atomic64_unchecked_t;
59124 +
59125 #define ATOMIC64_INIT(i) { (i) }
59126
59127 extern long long atomic64_read(const atomic64_t *v);
59128 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59129 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59130 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59131
59132 +#define atomic64_read_unchecked(v) atomic64_read(v)
59133 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59134 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59135 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59136 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59137 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59138 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59139 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59140 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59141 +
59142 #endif /* _ASM_GENERIC_ATOMIC64_H */
59143 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59144 index 1bfcfe5..e04c5c9 100644
59145 --- a/include/asm-generic/cache.h
59146 +++ b/include/asm-generic/cache.h
59147 @@ -6,7 +6,7 @@
59148 * cache lines need to provide their own cache.h.
59149 */
59150
59151 -#define L1_CACHE_SHIFT 5
59152 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59153 +#define L1_CACHE_SHIFT 5UL
59154 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59155
59156 #endif /* __ASM_GENERIC_CACHE_H */
59157 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59158 index 0d68a1e..b74a761 100644
59159 --- a/include/asm-generic/emergency-restart.h
59160 +++ b/include/asm-generic/emergency-restart.h
59161 @@ -1,7 +1,7 @@
59162 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59163 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59164
59165 -static inline void machine_emergency_restart(void)
59166 +static inline __noreturn void machine_emergency_restart(void)
59167 {
59168 machine_restart(NULL);
59169 }
59170 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59171 index 0232ccb..13d9165 100644
59172 --- a/include/asm-generic/kmap_types.h
59173 +++ b/include/asm-generic/kmap_types.h
59174 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59175 KMAP_D(17) KM_NMI,
59176 KMAP_D(18) KM_NMI_PTE,
59177 KMAP_D(19) KM_KDB,
59178 +KMAP_D(20) KM_CLEARPAGE,
59179 /*
59180 * Remember to update debug_kmap_atomic() when adding new kmap types!
59181 */
59182 -KMAP_D(20) KM_TYPE_NR
59183 +KMAP_D(21) KM_TYPE_NR
59184 };
59185
59186 #undef KMAP_D
59187 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59188 index 9ceb03b..2efbcbd 100644
59189 --- a/include/asm-generic/local.h
59190 +++ b/include/asm-generic/local.h
59191 @@ -39,6 +39,7 @@ typedef struct
59192 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59193 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59194 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59195 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59196
59197 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59198 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59199 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59200 index 725612b..9cc513a 100644
59201 --- a/include/asm-generic/pgtable-nopmd.h
59202 +++ b/include/asm-generic/pgtable-nopmd.h
59203 @@ -1,14 +1,19 @@
59204 #ifndef _PGTABLE_NOPMD_H
59205 #define _PGTABLE_NOPMD_H
59206
59207 -#ifndef __ASSEMBLY__
59208 -
59209 #include <asm-generic/pgtable-nopud.h>
59210
59211 -struct mm_struct;
59212 -
59213 #define __PAGETABLE_PMD_FOLDED
59214
59215 +#define PMD_SHIFT PUD_SHIFT
59216 +#define PTRS_PER_PMD 1
59217 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59218 +#define PMD_MASK (~(PMD_SIZE-1))
59219 +
59220 +#ifndef __ASSEMBLY__
59221 +
59222 +struct mm_struct;
59223 +
59224 /*
59225 * Having the pmd type consist of a pud gets the size right, and allows
59226 * us to conceptually access the pud entry that this pmd is folded into
59227 @@ -16,11 +21,6 @@ struct mm_struct;
59228 */
59229 typedef struct { pud_t pud; } pmd_t;
59230
59231 -#define PMD_SHIFT PUD_SHIFT
59232 -#define PTRS_PER_PMD 1
59233 -#define PMD_SIZE (1UL << PMD_SHIFT)
59234 -#define PMD_MASK (~(PMD_SIZE-1))
59235 -
59236 /*
59237 * The "pud_xxx()" functions here are trivial for a folded two-level
59238 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59239 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59240 index 810431d..0ec4804f 100644
59241 --- a/include/asm-generic/pgtable-nopud.h
59242 +++ b/include/asm-generic/pgtable-nopud.h
59243 @@ -1,10 +1,15 @@
59244 #ifndef _PGTABLE_NOPUD_H
59245 #define _PGTABLE_NOPUD_H
59246
59247 -#ifndef __ASSEMBLY__
59248 -
59249 #define __PAGETABLE_PUD_FOLDED
59250
59251 +#define PUD_SHIFT PGDIR_SHIFT
59252 +#define PTRS_PER_PUD 1
59253 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59254 +#define PUD_MASK (~(PUD_SIZE-1))
59255 +
59256 +#ifndef __ASSEMBLY__
59257 +
59258 /*
59259 * Having the pud type consist of a pgd gets the size right, and allows
59260 * us to conceptually access the pgd entry that this pud is folded into
59261 @@ -12,11 +17,6 @@
59262 */
59263 typedef struct { pgd_t pgd; } pud_t;
59264
59265 -#define PUD_SHIFT PGDIR_SHIFT
59266 -#define PTRS_PER_PUD 1
59267 -#define PUD_SIZE (1UL << PUD_SHIFT)
59268 -#define PUD_MASK (~(PUD_SIZE-1))
59269 -
59270 /*
59271 * The "pgd_xxx()" functions here are trivial for a folded two-level
59272 * setup: the pud is never bad, and a pud always exists (as it's folded
59273 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59274 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59275
59276 #define pgd_populate(mm, pgd, pud) do { } while (0)
59277 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59278 /*
59279 * (puds are folded into pgds so this doesn't get actually called,
59280 * but the define is needed for a generic inline function.)
59281 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59282 index 125c54e..e95c18e 100644
59283 --- a/include/asm-generic/pgtable.h
59284 +++ b/include/asm-generic/pgtable.h
59285 @@ -446,6 +446,18 @@ static inline int pmd_write(pmd_t pmd)
59286 #endif /* __HAVE_ARCH_PMD_WRITE */
59287 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
59288
59289 +#ifndef __HAVE_ARCH_READ_PMD_ATOMIC
59290 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
59291 +{
59292 + /*
59293 + * Depend on compiler for an atomic pmd read. NOTE: this is
59294 + * only going to work, if the pmdval_t isn't larger than
59295 + * an unsigned long.
59296 + */
59297 + return *pmdp;
59298 +}
59299 +#endif /* __HAVE_ARCH_READ_PMD_ATOMIC */
59300 +
59301 /*
59302 * This function is meant to be used by sites walking pagetables with
59303 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
59304 @@ -459,11 +471,17 @@ static inline int pmd_write(pmd_t pmd)
59305 * undefined so behaving like if the pmd was none is safe (because it
59306 * can return none anyway). The compiler level barrier() is critically
59307 * important to compute the two checks atomically on the same pmdval.
59308 + *
59309 + * For 32bit kernels with a 64bit large pmd_t this automatically takes
59310 + * care of reading the pmd atomically to avoid SMP race conditions
59311 + * against pmd_populate() when the mmap_sem is hold for reading by the
59312 + * caller (a special atomic read not done by "gcc" as in the generic
59313 + * version above, is also needed when THP is disabled because the page
59314 + * fault can populate the pmd from under us).
59315 */
59316 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
59317 {
59318 - /* depend on compiler for an atomic pmd read */
59319 - pmd_t pmdval = *pmd;
59320 + pmd_t pmdval = read_pmd_atomic(pmd);
59321 /*
59322 * The barrier will stabilize the pmdval in a register or on
59323 * the stack so that it will stop changing under the code.
59324 @@ -503,6 +521,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59325 #endif
59326 }
59327
59328 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59329 +static inline unsigned long pax_open_kernel(void) { return 0; }
59330 +#endif
59331 +
59332 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59333 +static inline unsigned long pax_close_kernel(void) { return 0; }
59334 +#endif
59335 +
59336 #endif /* CONFIG_MMU */
59337
59338 #endif /* !__ASSEMBLY__ */
59339 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59340 index 8aeadf6..f1dc019 100644
59341 --- a/include/asm-generic/vmlinux.lds.h
59342 +++ b/include/asm-generic/vmlinux.lds.h
59343 @@ -218,6 +218,7 @@
59344 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59345 VMLINUX_SYMBOL(__start_rodata) = .; \
59346 *(.rodata) *(.rodata.*) \
59347 + *(.data..read_only) \
59348 *(__vermagic) /* Kernel version magic */ \
59349 . = ALIGN(8); \
59350 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59351 @@ -716,17 +717,18 @@
59352 * section in the linker script will go there too. @phdr should have
59353 * a leading colon.
59354 *
59355 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59356 + * Note that this macros defines per_cpu_load as an absolute symbol.
59357 * If there is no need to put the percpu section at a predetermined
59358 * address, use PERCPU_SECTION.
59359 */
59360 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59361 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59362 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59363 + per_cpu_load = .; \
59364 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59365 - LOAD_OFFSET) { \
59366 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59367 PERCPU_INPUT(cacheline) \
59368 } phdr \
59369 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59370 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59371
59372 /**
59373 * PERCPU_SECTION - define output section for percpu area, simple version
59374 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59375 index dd73104..fde86bd 100644
59376 --- a/include/drm/drmP.h
59377 +++ b/include/drm/drmP.h
59378 @@ -72,6 +72,7 @@
59379 #include <linux/workqueue.h>
59380 #include <linux/poll.h>
59381 #include <asm/pgalloc.h>
59382 +#include <asm/local.h>
59383 #include "drm.h"
59384
59385 #include <linux/idr.h>
59386 @@ -1074,7 +1075,7 @@ struct drm_device {
59387
59388 /** \name Usage Counters */
59389 /*@{ */
59390 - int open_count; /**< Outstanding files open */
59391 + local_t open_count; /**< Outstanding files open */
59392 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59393 atomic_t vma_count; /**< Outstanding vma areas open */
59394 int buf_use; /**< Buffers in use -- cannot alloc */
59395 @@ -1085,7 +1086,7 @@ struct drm_device {
59396 /*@{ */
59397 unsigned long counters;
59398 enum drm_stat_type types[15];
59399 - atomic_t counts[15];
59400 + atomic_unchecked_t counts[15];
59401 /*@} */
59402
59403 struct list_head filelist;
59404 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59405 index 37515d1..34fa8b0 100644
59406 --- a/include/drm/drm_crtc_helper.h
59407 +++ b/include/drm/drm_crtc_helper.h
59408 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59409
59410 /* disable crtc when not in use - more explicit than dpms off */
59411 void (*disable)(struct drm_crtc *crtc);
59412 -};
59413 +} __no_const;
59414
59415 struct drm_encoder_helper_funcs {
59416 void (*dpms)(struct drm_encoder *encoder, int mode);
59417 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59418 struct drm_connector *connector);
59419 /* disable encoder when not in use - more explicit than dpms off */
59420 void (*disable)(struct drm_encoder *encoder);
59421 -};
59422 +} __no_const;
59423
59424 struct drm_connector_helper_funcs {
59425 int (*get_modes)(struct drm_connector *connector);
59426 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59427 index d6d1da4..fdd1ac5 100644
59428 --- a/include/drm/ttm/ttm_memory.h
59429 +++ b/include/drm/ttm/ttm_memory.h
59430 @@ -48,7 +48,7 @@
59431
59432 struct ttm_mem_shrink {
59433 int (*do_shrink) (struct ttm_mem_shrink *);
59434 -};
59435 +} __no_const;
59436
59437 /**
59438 * struct ttm_mem_global - Global memory accounting structure.
59439 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59440 index e86dfca..40cc55f 100644
59441 --- a/include/linux/a.out.h
59442 +++ b/include/linux/a.out.h
59443 @@ -39,6 +39,14 @@ enum machine_type {
59444 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59445 };
59446
59447 +/* Constants for the N_FLAGS field */
59448 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59449 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59450 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59451 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59452 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59453 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59454 +
59455 #if !defined (N_MAGIC)
59456 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59457 #endif
59458 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59459 index 06fd4bb..1caec0d 100644
59460 --- a/include/linux/atmdev.h
59461 +++ b/include/linux/atmdev.h
59462 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59463 #endif
59464
59465 struct k_atm_aal_stats {
59466 -#define __HANDLE_ITEM(i) atomic_t i
59467 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
59468 __AAL_STAT_ITEMS
59469 #undef __HANDLE_ITEM
59470 };
59471 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59472 index 366422b..1fa7f84 100644
59473 --- a/include/linux/binfmts.h
59474 +++ b/include/linux/binfmts.h
59475 @@ -89,6 +89,7 @@ struct linux_binfmt {
59476 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59477 int (*load_shlib)(struct file *);
59478 int (*core_dump)(struct coredump_params *cprm);
59479 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59480 unsigned long min_coredump; /* minimal dump size */
59481 };
59482
59483 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59484 index 4d4ac24..2c3ccce 100644
59485 --- a/include/linux/blkdev.h
59486 +++ b/include/linux/blkdev.h
59487 @@ -1376,7 +1376,7 @@ struct block_device_operations {
59488 /* this callback is with swap_lock and sometimes page table lock held */
59489 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59490 struct module *owner;
59491 -};
59492 +} __do_const;
59493
59494 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59495 unsigned long);
59496 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59497 index 4d1a074..88f929a 100644
59498 --- a/include/linux/blktrace_api.h
59499 +++ b/include/linux/blktrace_api.h
59500 @@ -162,7 +162,7 @@ struct blk_trace {
59501 struct dentry *dir;
59502 struct dentry *dropped_file;
59503 struct dentry *msg_file;
59504 - atomic_t dropped;
59505 + atomic_unchecked_t dropped;
59506 };
59507
59508 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59509 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59510 index 83195fb..0b0f77d 100644
59511 --- a/include/linux/byteorder/little_endian.h
59512 +++ b/include/linux/byteorder/little_endian.h
59513 @@ -42,51 +42,51 @@
59514
59515 static inline __le64 __cpu_to_le64p(const __u64 *p)
59516 {
59517 - return (__force __le64)*p;
59518 + return (__force const __le64)*p;
59519 }
59520 static inline __u64 __le64_to_cpup(const __le64 *p)
59521 {
59522 - return (__force __u64)*p;
59523 + return (__force const __u64)*p;
59524 }
59525 static inline __le32 __cpu_to_le32p(const __u32 *p)
59526 {
59527 - return (__force __le32)*p;
59528 + return (__force const __le32)*p;
59529 }
59530 static inline __u32 __le32_to_cpup(const __le32 *p)
59531 {
59532 - return (__force __u32)*p;
59533 + return (__force const __u32)*p;
59534 }
59535 static inline __le16 __cpu_to_le16p(const __u16 *p)
59536 {
59537 - return (__force __le16)*p;
59538 + return (__force const __le16)*p;
59539 }
59540 static inline __u16 __le16_to_cpup(const __le16 *p)
59541 {
59542 - return (__force __u16)*p;
59543 + return (__force const __u16)*p;
59544 }
59545 static inline __be64 __cpu_to_be64p(const __u64 *p)
59546 {
59547 - return (__force __be64)__swab64p(p);
59548 + return (__force const __be64)__swab64p(p);
59549 }
59550 static inline __u64 __be64_to_cpup(const __be64 *p)
59551 {
59552 - return __swab64p((__u64 *)p);
59553 + return __swab64p((const __u64 *)p);
59554 }
59555 static inline __be32 __cpu_to_be32p(const __u32 *p)
59556 {
59557 - return (__force __be32)__swab32p(p);
59558 + return (__force const __be32)__swab32p(p);
59559 }
59560 static inline __u32 __be32_to_cpup(const __be32 *p)
59561 {
59562 - return __swab32p((__u32 *)p);
59563 + return __swab32p((const __u32 *)p);
59564 }
59565 static inline __be16 __cpu_to_be16p(const __u16 *p)
59566 {
59567 - return (__force __be16)__swab16p(p);
59568 + return (__force const __be16)__swab16p(p);
59569 }
59570 static inline __u16 __be16_to_cpup(const __be16 *p)
59571 {
59572 - return __swab16p((__u16 *)p);
59573 + return __swab16p((const __u16 *)p);
59574 }
59575 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59576 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59577 diff --git a/include/linux/cache.h b/include/linux/cache.h
59578 index 4c57065..4307975 100644
59579 --- a/include/linux/cache.h
59580 +++ b/include/linux/cache.h
59581 @@ -16,6 +16,10 @@
59582 #define __read_mostly
59583 #endif
59584
59585 +#ifndef __read_only
59586 +#define __read_only __read_mostly
59587 +#endif
59588 +
59589 #ifndef ____cacheline_aligned
59590 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59591 #endif
59592 diff --git a/include/linux/capability.h b/include/linux/capability.h
59593 index 12d52de..b5f7fa7 100644
59594 --- a/include/linux/capability.h
59595 +++ b/include/linux/capability.h
59596 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
59597 extern bool capable(int cap);
59598 extern bool ns_capable(struct user_namespace *ns, int cap);
59599 extern bool nsown_capable(int cap);
59600 +extern bool capable_nolog(int cap);
59601 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59602
59603 /* audit system wants to get cap info from files as well */
59604 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59605 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59606 index 42e55de..1cd0e66 100644
59607 --- a/include/linux/cleancache.h
59608 +++ b/include/linux/cleancache.h
59609 @@ -31,7 +31,7 @@ struct cleancache_ops {
59610 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
59611 void (*invalidate_inode)(int, struct cleancache_filekey);
59612 void (*invalidate_fs)(int);
59613 -};
59614 +} __no_const;
59615
59616 extern struct cleancache_ops
59617 cleancache_register_ops(struct cleancache_ops *ops);
59618 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59619 index 2f40791..a62d196 100644
59620 --- a/include/linux/compiler-gcc4.h
59621 +++ b/include/linux/compiler-gcc4.h
59622 @@ -32,6 +32,16 @@
59623 #define __linktime_error(message) __attribute__((__error__(message)))
59624
59625 #if __GNUC_MINOR__ >= 5
59626 +
59627 +#ifdef CONSTIFY_PLUGIN
59628 +#define __no_const __attribute__((no_const))
59629 +#define __do_const __attribute__((do_const))
59630 +#endif
59631 +
59632 +#ifdef SIZE_OVERFLOW_PLUGIN
59633 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
59634 +#endif
59635 +
59636 /*
59637 * Mark a position in code as unreachable. This can be used to
59638 * suppress control flow warnings after asm blocks that transfer
59639 @@ -47,6 +57,11 @@
59640 #define __noclone __attribute__((__noclone__))
59641
59642 #endif
59643 +
59644 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59645 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59646 +#define __bos0(ptr) __bos((ptr), 0)
59647 +#define __bos1(ptr) __bos((ptr), 1)
59648 #endif
59649
59650 #if __GNUC_MINOR__ > 0
59651 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59652 index 923d093..726c17f 100644
59653 --- a/include/linux/compiler.h
59654 +++ b/include/linux/compiler.h
59655 @@ -5,31 +5,62 @@
59656
59657 #ifdef __CHECKER__
59658 # define __user __attribute__((noderef, address_space(1)))
59659 +# define __force_user __force __user
59660 # define __kernel __attribute__((address_space(0)))
59661 +# define __force_kernel __force __kernel
59662 # define __safe __attribute__((safe))
59663 # define __force __attribute__((force))
59664 # define __nocast __attribute__((nocast))
59665 # define __iomem __attribute__((noderef, address_space(2)))
59666 +# define __force_iomem __force __iomem
59667 # define __acquires(x) __attribute__((context(x,0,1)))
59668 # define __releases(x) __attribute__((context(x,1,0)))
59669 # define __acquire(x) __context__(x,1)
59670 # define __release(x) __context__(x,-1)
59671 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59672 # define __percpu __attribute__((noderef, address_space(3)))
59673 +# define __force_percpu __force __percpu
59674 #ifdef CONFIG_SPARSE_RCU_POINTER
59675 # define __rcu __attribute__((noderef, address_space(4)))
59676 +# define __force_rcu __force __rcu
59677 #else
59678 # define __rcu
59679 +# define __force_rcu
59680 #endif
59681 extern void __chk_user_ptr(const volatile void __user *);
59682 extern void __chk_io_ptr(const volatile void __iomem *);
59683 +#elif defined(CHECKER_PLUGIN)
59684 +//# define __user
59685 +//# define __force_user
59686 +//# define __kernel
59687 +//# define __force_kernel
59688 +# define __safe
59689 +# define __force
59690 +# define __nocast
59691 +# define __iomem
59692 +# define __force_iomem
59693 +# define __chk_user_ptr(x) (void)0
59694 +# define __chk_io_ptr(x) (void)0
59695 +# define __builtin_warning(x, y...) (1)
59696 +# define __acquires(x)
59697 +# define __releases(x)
59698 +# define __acquire(x) (void)0
59699 +# define __release(x) (void)0
59700 +# define __cond_lock(x,c) (c)
59701 +# define __percpu
59702 +# define __force_percpu
59703 +# define __rcu
59704 +# define __force_rcu
59705 #else
59706 # define __user
59707 +# define __force_user
59708 # define __kernel
59709 +# define __force_kernel
59710 # define __safe
59711 # define __force
59712 # define __nocast
59713 # define __iomem
59714 +# define __force_iomem
59715 # define __chk_user_ptr(x) (void)0
59716 # define __chk_io_ptr(x) (void)0
59717 # define __builtin_warning(x, y...) (1)
59718 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59719 # define __release(x) (void)0
59720 # define __cond_lock(x,c) (c)
59721 # define __percpu
59722 +# define __force_percpu
59723 # define __rcu
59724 +# define __force_rcu
59725 #endif
59726
59727 #ifdef __KERNEL__
59728 @@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59729 # define __attribute_const__ /* unimplemented */
59730 #endif
59731
59732 +#ifndef __no_const
59733 +# define __no_const
59734 +#endif
59735 +
59736 +#ifndef __do_const
59737 +# define __do_const
59738 +#endif
59739 +
59740 +#ifndef __size_overflow
59741 +# define __size_overflow(...)
59742 +#endif
59743 +
59744 /*
59745 * Tell gcc if a function is cold. The compiler will assume any path
59746 * directly leading to the call is unlikely.
59747 @@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59748 #define __cold
59749 #endif
59750
59751 +#ifndef __alloc_size
59752 +#define __alloc_size(...)
59753 +#endif
59754 +
59755 +#ifndef __bos
59756 +#define __bos(ptr, arg)
59757 +#endif
59758 +
59759 +#ifndef __bos0
59760 +#define __bos0(ptr)
59761 +#endif
59762 +
59763 +#ifndef __bos1
59764 +#define __bos1(ptr)
59765 +#endif
59766 +
59767 /* Simple shorthand for a section definition */
59768 #ifndef __section
59769 # define __section(S) __attribute__ ((__section__(#S)))
59770 @@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59771 * use is to mediate communication between process-level code and irq/NMI
59772 * handlers, all running on the same CPU.
59773 */
59774 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59775 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59776 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59777
59778 #endif /* __LINUX_COMPILER_H */
59779 diff --git a/include/linux/cred.h b/include/linux/cred.h
59780 index adadf71..6af5560 100644
59781 --- a/include/linux/cred.h
59782 +++ b/include/linux/cred.h
59783 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59784 static inline void validate_process_creds(void)
59785 {
59786 }
59787 +static inline void validate_task_creds(struct task_struct *task)
59788 +{
59789 +}
59790 #endif
59791
59792 /**
59793 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59794 index b92eadf..b4ecdc1 100644
59795 --- a/include/linux/crypto.h
59796 +++ b/include/linux/crypto.h
59797 @@ -373,7 +373,7 @@ struct cipher_tfm {
59798 const u8 *key, unsigned int keylen);
59799 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59800 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59801 -};
59802 +} __no_const;
59803
59804 struct hash_tfm {
59805 int (*init)(struct hash_desc *desc);
59806 @@ -394,13 +394,13 @@ struct compress_tfm {
59807 int (*cot_decompress)(struct crypto_tfm *tfm,
59808 const u8 *src, unsigned int slen,
59809 u8 *dst, unsigned int *dlen);
59810 -};
59811 +} __no_const;
59812
59813 struct rng_tfm {
59814 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59815 unsigned int dlen);
59816 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59817 -};
59818 +} __no_const;
59819
59820 #define crt_ablkcipher crt_u.ablkcipher
59821 #define crt_aead crt_u.aead
59822 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59823 index 7925bf0..d5143d2 100644
59824 --- a/include/linux/decompress/mm.h
59825 +++ b/include/linux/decompress/mm.h
59826 @@ -77,7 +77,7 @@ static void free(void *where)
59827 * warnings when not needed (indeed large_malloc / large_free are not
59828 * needed by inflate */
59829
59830 -#define malloc(a) kmalloc(a, GFP_KERNEL)
59831 +#define malloc(a) kmalloc((a), GFP_KERNEL)
59832 #define free(a) kfree(a)
59833
59834 #define large_malloc(a) vmalloc(a)
59835 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59836 index dfc099e..e583e66 100644
59837 --- a/include/linux/dma-mapping.h
59838 +++ b/include/linux/dma-mapping.h
59839 @@ -51,7 +51,7 @@ struct dma_map_ops {
59840 u64 (*get_required_mask)(struct device *dev);
59841 #endif
59842 int is_phys;
59843 -};
59844 +} __do_const;
59845
59846 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59847
59848 diff --git a/include/linux/efi.h b/include/linux/efi.h
59849 index ec45ccd..9923c32 100644
59850 --- a/include/linux/efi.h
59851 +++ b/include/linux/efi.h
59852 @@ -635,7 +635,7 @@ struct efivar_operations {
59853 efi_get_variable_t *get_variable;
59854 efi_get_next_variable_t *get_next_variable;
59855 efi_set_variable_t *set_variable;
59856 -};
59857 +} __no_const;
59858
59859 struct efivars {
59860 /*
59861 diff --git a/include/linux/elf.h b/include/linux/elf.h
59862 index 999b4f5..57753b4 100644
59863 --- a/include/linux/elf.h
59864 +++ b/include/linux/elf.h
59865 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
59866 #define PT_GNU_EH_FRAME 0x6474e550
59867
59868 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
59869 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
59870 +
59871 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
59872 +
59873 +/* Constants for the e_flags field */
59874 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59875 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
59876 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
59877 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
59878 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59879 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59880
59881 /*
59882 * Extended Numbering
59883 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
59884 #define DT_DEBUG 21
59885 #define DT_TEXTREL 22
59886 #define DT_JMPREL 23
59887 +#define DT_FLAGS 30
59888 + #define DF_TEXTREL 0x00000004
59889 #define DT_ENCODING 32
59890 #define OLD_DT_LOOS 0x60000000
59891 #define DT_LOOS 0x6000000d
59892 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
59893 #define PF_W 0x2
59894 #define PF_X 0x1
59895
59896 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
59897 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
59898 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
59899 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
59900 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
59901 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
59902 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
59903 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
59904 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
59905 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
59906 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
59907 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
59908 +
59909 typedef struct elf32_phdr{
59910 Elf32_Word p_type;
59911 Elf32_Off p_offset;
59912 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
59913 #define EI_OSABI 7
59914 #define EI_PAD 8
59915
59916 +#define EI_PAX 14
59917 +
59918 #define ELFMAG0 0x7f /* EI_MAG */
59919 #define ELFMAG1 'E'
59920 #define ELFMAG2 'L'
59921 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
59922 #define elf_note elf32_note
59923 #define elf_addr_t Elf32_Off
59924 #define Elf_Half Elf32_Half
59925 +#define elf_dyn Elf32_Dyn
59926
59927 #else
59928
59929 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
59930 #define elf_note elf64_note
59931 #define elf_addr_t Elf64_Off
59932 #define Elf_Half Elf64_Half
59933 +#define elf_dyn Elf64_Dyn
59934
59935 #endif
59936
59937 diff --git a/include/linux/filter.h b/include/linux/filter.h
59938 index 8eeb205..d59bfa2 100644
59939 --- a/include/linux/filter.h
59940 +++ b/include/linux/filter.h
59941 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
59942
59943 struct sk_buff;
59944 struct sock;
59945 +struct bpf_jit_work;
59946
59947 struct sk_filter
59948 {
59949 @@ -141,6 +142,9 @@ struct sk_filter
59950 unsigned int len; /* Number of filter blocks */
59951 unsigned int (*bpf_func)(const struct sk_buff *skb,
59952 const struct sock_filter *filter);
59953 +#ifdef CONFIG_BPF_JIT
59954 + struct bpf_jit_work *work;
59955 +#endif
59956 struct rcu_head rcu;
59957 struct sock_filter insns[0];
59958 };
59959 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
59960 index cdc9b71..ce69fb5 100644
59961 --- a/include/linux/firewire.h
59962 +++ b/include/linux/firewire.h
59963 @@ -413,7 +413,7 @@ struct fw_iso_context {
59964 union {
59965 fw_iso_callback_t sc;
59966 fw_iso_mc_callback_t mc;
59967 - } callback;
59968 + } __no_const callback;
59969 void *callback_data;
59970 };
59971
59972 diff --git a/include/linux/fs.h b/include/linux/fs.h
59973 index 25c40b9..1bfd4f4 100644
59974 --- a/include/linux/fs.h
59975 +++ b/include/linux/fs.h
59976 @@ -1634,7 +1634,8 @@ struct file_operations {
59977 int (*setlease)(struct file *, long, struct file_lock **);
59978 long (*fallocate)(struct file *file, int mode, loff_t offset,
59979 loff_t len);
59980 -};
59981 +} __do_const;
59982 +typedef struct file_operations __no_const file_operations_no_const;
59983
59984 struct inode_operations {
59985 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
59986 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
59987 index 003dc0f..3c4ea97 100644
59988 --- a/include/linux/fs_struct.h
59989 +++ b/include/linux/fs_struct.h
59990 @@ -6,7 +6,7 @@
59991 #include <linux/seqlock.h>
59992
59993 struct fs_struct {
59994 - int users;
59995 + atomic_t users;
59996 spinlock_t lock;
59997 seqcount_t seq;
59998 int umask;
59999 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60000 index ce31408..b1ad003 100644
60001 --- a/include/linux/fscache-cache.h
60002 +++ b/include/linux/fscache-cache.h
60003 @@ -102,7 +102,7 @@ struct fscache_operation {
60004 fscache_operation_release_t release;
60005 };
60006
60007 -extern atomic_t fscache_op_debug_id;
60008 +extern atomic_unchecked_t fscache_op_debug_id;
60009 extern void fscache_op_work_func(struct work_struct *work);
60010
60011 extern void fscache_enqueue_operation(struct fscache_operation *);
60012 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60013 {
60014 INIT_WORK(&op->work, fscache_op_work_func);
60015 atomic_set(&op->usage, 1);
60016 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60017 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60018 op->processor = processor;
60019 op->release = release;
60020 INIT_LIST_HEAD(&op->pend_link);
60021 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60022 index a6dfe69..569586df 100644
60023 --- a/include/linux/fsnotify.h
60024 +++ b/include/linux/fsnotify.h
60025 @@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60026 */
60027 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60028 {
60029 - return kstrdup(name, GFP_KERNEL);
60030 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60031 }
60032
60033 /*
60034 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60035 index 91d0e0a3..035666b 100644
60036 --- a/include/linux/fsnotify_backend.h
60037 +++ b/include/linux/fsnotify_backend.h
60038 @@ -105,6 +105,7 @@ struct fsnotify_ops {
60039 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60040 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60041 };
60042 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60043
60044 /*
60045 * A group is a "thing" that wants to receive notification about filesystem
60046 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60047 index 176a939..1462211 100644
60048 --- a/include/linux/ftrace_event.h
60049 +++ b/include/linux/ftrace_event.h
60050 @@ -97,7 +97,7 @@ struct trace_event_functions {
60051 trace_print_func raw;
60052 trace_print_func hex;
60053 trace_print_func binary;
60054 -};
60055 +} __no_const;
60056
60057 struct trace_event {
60058 struct hlist_node node;
60059 @@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60060 extern int trace_add_event_call(struct ftrace_event_call *call);
60061 extern void trace_remove_event_call(struct ftrace_event_call *call);
60062
60063 -#define is_signed_type(type) (((type)(-1)) < 0)
60064 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60065
60066 int trace_set_clr_event(const char *system, const char *event, int set);
60067
60068 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60069 index 017a7fb..33a8507 100644
60070 --- a/include/linux/genhd.h
60071 +++ b/include/linux/genhd.h
60072 @@ -185,7 +185,7 @@ struct gendisk {
60073 struct kobject *slave_dir;
60074
60075 struct timer_rand_state *random;
60076 - atomic_t sync_io; /* RAID */
60077 + atomic_unchecked_t sync_io; /* RAID */
60078 struct disk_events *ev;
60079 #ifdef CONFIG_BLK_DEV_INTEGRITY
60080 struct blk_integrity *integrity;
60081 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60082 new file mode 100644
60083 index 0000000..c938b1f
60084 --- /dev/null
60085 +++ b/include/linux/gracl.h
60086 @@ -0,0 +1,319 @@
60087 +#ifndef GR_ACL_H
60088 +#define GR_ACL_H
60089 +
60090 +#include <linux/grdefs.h>
60091 +#include <linux/resource.h>
60092 +#include <linux/capability.h>
60093 +#include <linux/dcache.h>
60094 +#include <asm/resource.h>
60095 +
60096 +/* Major status information */
60097 +
60098 +#define GR_VERSION "grsecurity 2.9.1"
60099 +#define GRSECURITY_VERSION 0x2901
60100 +
60101 +enum {
60102 + GR_SHUTDOWN = 0,
60103 + GR_ENABLE = 1,
60104 + GR_SPROLE = 2,
60105 + GR_RELOAD = 3,
60106 + GR_SEGVMOD = 4,
60107 + GR_STATUS = 5,
60108 + GR_UNSPROLE = 6,
60109 + GR_PASSSET = 7,
60110 + GR_SPROLEPAM = 8,
60111 +};
60112 +
60113 +/* Password setup definitions
60114 + * kernel/grhash.c */
60115 +enum {
60116 + GR_PW_LEN = 128,
60117 + GR_SALT_LEN = 16,
60118 + GR_SHA_LEN = 32,
60119 +};
60120 +
60121 +enum {
60122 + GR_SPROLE_LEN = 64,
60123 +};
60124 +
60125 +enum {
60126 + GR_NO_GLOB = 0,
60127 + GR_REG_GLOB,
60128 + GR_CREATE_GLOB
60129 +};
60130 +
60131 +#define GR_NLIMITS 32
60132 +
60133 +/* Begin Data Structures */
60134 +
60135 +struct sprole_pw {
60136 + unsigned char *rolename;
60137 + unsigned char salt[GR_SALT_LEN];
60138 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60139 +};
60140 +
60141 +struct name_entry {
60142 + __u32 key;
60143 + ino_t inode;
60144 + dev_t device;
60145 + char *name;
60146 + __u16 len;
60147 + __u8 deleted;
60148 + struct name_entry *prev;
60149 + struct name_entry *next;
60150 +};
60151 +
60152 +struct inodev_entry {
60153 + struct name_entry *nentry;
60154 + struct inodev_entry *prev;
60155 + struct inodev_entry *next;
60156 +};
60157 +
60158 +struct acl_role_db {
60159 + struct acl_role_label **r_hash;
60160 + __u32 r_size;
60161 +};
60162 +
60163 +struct inodev_db {
60164 + struct inodev_entry **i_hash;
60165 + __u32 i_size;
60166 +};
60167 +
60168 +struct name_db {
60169 + struct name_entry **n_hash;
60170 + __u32 n_size;
60171 +};
60172 +
60173 +struct crash_uid {
60174 + uid_t uid;
60175 + unsigned long expires;
60176 +};
60177 +
60178 +struct gr_hash_struct {
60179 + void **table;
60180 + void **nametable;
60181 + void *first;
60182 + __u32 table_size;
60183 + __u32 used_size;
60184 + int type;
60185 +};
60186 +
60187 +/* Userspace Grsecurity ACL data structures */
60188 +
60189 +struct acl_subject_label {
60190 + char *filename;
60191 + ino_t inode;
60192 + dev_t device;
60193 + __u32 mode;
60194 + kernel_cap_t cap_mask;
60195 + kernel_cap_t cap_lower;
60196 + kernel_cap_t cap_invert_audit;
60197 +
60198 + struct rlimit res[GR_NLIMITS];
60199 + __u32 resmask;
60200 +
60201 + __u8 user_trans_type;
60202 + __u8 group_trans_type;
60203 + uid_t *user_transitions;
60204 + gid_t *group_transitions;
60205 + __u16 user_trans_num;
60206 + __u16 group_trans_num;
60207 +
60208 + __u32 sock_families[2];
60209 + __u32 ip_proto[8];
60210 + __u32 ip_type;
60211 + struct acl_ip_label **ips;
60212 + __u32 ip_num;
60213 + __u32 inaddr_any_override;
60214 +
60215 + __u32 crashes;
60216 + unsigned long expires;
60217 +
60218 + struct acl_subject_label *parent_subject;
60219 + struct gr_hash_struct *hash;
60220 + struct acl_subject_label *prev;
60221 + struct acl_subject_label *next;
60222 +
60223 + struct acl_object_label **obj_hash;
60224 + __u32 obj_hash_size;
60225 + __u16 pax_flags;
60226 +};
60227 +
60228 +struct role_allowed_ip {
60229 + __u32 addr;
60230 + __u32 netmask;
60231 +
60232 + struct role_allowed_ip *prev;
60233 + struct role_allowed_ip *next;
60234 +};
60235 +
60236 +struct role_transition {
60237 + char *rolename;
60238 +
60239 + struct role_transition *prev;
60240 + struct role_transition *next;
60241 +};
60242 +
60243 +struct acl_role_label {
60244 + char *rolename;
60245 + uid_t uidgid;
60246 + __u16 roletype;
60247 +
60248 + __u16 auth_attempts;
60249 + unsigned long expires;
60250 +
60251 + struct acl_subject_label *root_label;
60252 + struct gr_hash_struct *hash;
60253 +
60254 + struct acl_role_label *prev;
60255 + struct acl_role_label *next;
60256 +
60257 + struct role_transition *transitions;
60258 + struct role_allowed_ip *allowed_ips;
60259 + uid_t *domain_children;
60260 + __u16 domain_child_num;
60261 +
60262 + umode_t umask;
60263 +
60264 + struct acl_subject_label **subj_hash;
60265 + __u32 subj_hash_size;
60266 +};
60267 +
60268 +struct user_acl_role_db {
60269 + struct acl_role_label **r_table;
60270 + __u32 num_pointers; /* Number of allocations to track */
60271 + __u32 num_roles; /* Number of roles */
60272 + __u32 num_domain_children; /* Number of domain children */
60273 + __u32 num_subjects; /* Number of subjects */
60274 + __u32 num_objects; /* Number of objects */
60275 +};
60276 +
60277 +struct acl_object_label {
60278 + char *filename;
60279 + ino_t inode;
60280 + dev_t device;
60281 + __u32 mode;
60282 +
60283 + struct acl_subject_label *nested;
60284 + struct acl_object_label *globbed;
60285 +
60286 + /* next two structures not used */
60287 +
60288 + struct acl_object_label *prev;
60289 + struct acl_object_label *next;
60290 +};
60291 +
60292 +struct acl_ip_label {
60293 + char *iface;
60294 + __u32 addr;
60295 + __u32 netmask;
60296 + __u16 low, high;
60297 + __u8 mode;
60298 + __u32 type;
60299 + __u32 proto[8];
60300 +
60301 + /* next two structures not used */
60302 +
60303 + struct acl_ip_label *prev;
60304 + struct acl_ip_label *next;
60305 +};
60306 +
60307 +struct gr_arg {
60308 + struct user_acl_role_db role_db;
60309 + unsigned char pw[GR_PW_LEN];
60310 + unsigned char salt[GR_SALT_LEN];
60311 + unsigned char sum[GR_SHA_LEN];
60312 + unsigned char sp_role[GR_SPROLE_LEN];
60313 + struct sprole_pw *sprole_pws;
60314 + dev_t segv_device;
60315 + ino_t segv_inode;
60316 + uid_t segv_uid;
60317 + __u16 num_sprole_pws;
60318 + __u16 mode;
60319 +};
60320 +
60321 +struct gr_arg_wrapper {
60322 + struct gr_arg *arg;
60323 + __u32 version;
60324 + __u32 size;
60325 +};
60326 +
60327 +struct subject_map {
60328 + struct acl_subject_label *user;
60329 + struct acl_subject_label *kernel;
60330 + struct subject_map *prev;
60331 + struct subject_map *next;
60332 +};
60333 +
60334 +struct acl_subj_map_db {
60335 + struct subject_map **s_hash;
60336 + __u32 s_size;
60337 +};
60338 +
60339 +/* End Data Structures Section */
60340 +
60341 +/* Hash functions generated by empirical testing by Brad Spengler
60342 + Makes good use of the low bits of the inode. Generally 0-1 times
60343 + in loop for successful match. 0-3 for unsuccessful match.
60344 + Shift/add algorithm with modulus of table size and an XOR*/
60345 +
60346 +static __inline__ unsigned int
60347 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60348 +{
60349 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60350 +}
60351 +
60352 + static __inline__ unsigned int
60353 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60354 +{
60355 + return ((const unsigned long)userp % sz);
60356 +}
60357 +
60358 +static __inline__ unsigned int
60359 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60360 +{
60361 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60362 +}
60363 +
60364 +static __inline__ unsigned int
60365 +nhash(const char *name, const __u16 len, const unsigned int sz)
60366 +{
60367 + return full_name_hash((const unsigned char *)name, len) % sz;
60368 +}
60369 +
60370 +#define FOR_EACH_ROLE_START(role) \
60371 + role = role_list; \
60372 + while (role) {
60373 +
60374 +#define FOR_EACH_ROLE_END(role) \
60375 + role = role->prev; \
60376 + }
60377 +
60378 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60379 + subj = NULL; \
60380 + iter = 0; \
60381 + while (iter < role->subj_hash_size) { \
60382 + if (subj == NULL) \
60383 + subj = role->subj_hash[iter]; \
60384 + if (subj == NULL) { \
60385 + iter++; \
60386 + continue; \
60387 + }
60388 +
60389 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60390 + subj = subj->next; \
60391 + if (subj == NULL) \
60392 + iter++; \
60393 + }
60394 +
60395 +
60396 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60397 + subj = role->hash->first; \
60398 + while (subj != NULL) {
60399 +
60400 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60401 + subj = subj->next; \
60402 + }
60403 +
60404 +#endif
60405 +
60406 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60407 new file mode 100644
60408 index 0000000..323ecf2
60409 --- /dev/null
60410 +++ b/include/linux/gralloc.h
60411 @@ -0,0 +1,9 @@
60412 +#ifndef __GRALLOC_H
60413 +#define __GRALLOC_H
60414 +
60415 +void acl_free_all(void);
60416 +int acl_alloc_stack_init(unsigned long size);
60417 +void *acl_alloc(unsigned long len);
60418 +void *acl_alloc_num(unsigned long num, unsigned long len);
60419 +
60420 +#endif
60421 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60422 new file mode 100644
60423 index 0000000..b30e9bc
60424 --- /dev/null
60425 +++ b/include/linux/grdefs.h
60426 @@ -0,0 +1,140 @@
60427 +#ifndef GRDEFS_H
60428 +#define GRDEFS_H
60429 +
60430 +/* Begin grsecurity status declarations */
60431 +
60432 +enum {
60433 + GR_READY = 0x01,
60434 + GR_STATUS_INIT = 0x00 // disabled state
60435 +};
60436 +
60437 +/* Begin ACL declarations */
60438 +
60439 +/* Role flags */
60440 +
60441 +enum {
60442 + GR_ROLE_USER = 0x0001,
60443 + GR_ROLE_GROUP = 0x0002,
60444 + GR_ROLE_DEFAULT = 0x0004,
60445 + GR_ROLE_SPECIAL = 0x0008,
60446 + GR_ROLE_AUTH = 0x0010,
60447 + GR_ROLE_NOPW = 0x0020,
60448 + GR_ROLE_GOD = 0x0040,
60449 + GR_ROLE_LEARN = 0x0080,
60450 + GR_ROLE_TPE = 0x0100,
60451 + GR_ROLE_DOMAIN = 0x0200,
60452 + GR_ROLE_PAM = 0x0400,
60453 + GR_ROLE_PERSIST = 0x0800
60454 +};
60455 +
60456 +/* ACL Subject and Object mode flags */
60457 +enum {
60458 + GR_DELETED = 0x80000000
60459 +};
60460 +
60461 +/* ACL Object-only mode flags */
60462 +enum {
60463 + GR_READ = 0x00000001,
60464 + GR_APPEND = 0x00000002,
60465 + GR_WRITE = 0x00000004,
60466 + GR_EXEC = 0x00000008,
60467 + GR_FIND = 0x00000010,
60468 + GR_INHERIT = 0x00000020,
60469 + GR_SETID = 0x00000040,
60470 + GR_CREATE = 0x00000080,
60471 + GR_DELETE = 0x00000100,
60472 + GR_LINK = 0x00000200,
60473 + GR_AUDIT_READ = 0x00000400,
60474 + GR_AUDIT_APPEND = 0x00000800,
60475 + GR_AUDIT_WRITE = 0x00001000,
60476 + GR_AUDIT_EXEC = 0x00002000,
60477 + GR_AUDIT_FIND = 0x00004000,
60478 + GR_AUDIT_INHERIT= 0x00008000,
60479 + GR_AUDIT_SETID = 0x00010000,
60480 + GR_AUDIT_CREATE = 0x00020000,
60481 + GR_AUDIT_DELETE = 0x00040000,
60482 + GR_AUDIT_LINK = 0x00080000,
60483 + GR_PTRACERD = 0x00100000,
60484 + GR_NOPTRACE = 0x00200000,
60485 + GR_SUPPRESS = 0x00400000,
60486 + GR_NOLEARN = 0x00800000,
60487 + GR_INIT_TRANSFER= 0x01000000
60488 +};
60489 +
60490 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60491 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60492 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60493 +
60494 +/* ACL subject-only mode flags */
60495 +enum {
60496 + GR_KILL = 0x00000001,
60497 + GR_VIEW = 0x00000002,
60498 + GR_PROTECTED = 0x00000004,
60499 + GR_LEARN = 0x00000008,
60500 + GR_OVERRIDE = 0x00000010,
60501 + /* just a placeholder, this mode is only used in userspace */
60502 + GR_DUMMY = 0x00000020,
60503 + GR_PROTSHM = 0x00000040,
60504 + GR_KILLPROC = 0x00000080,
60505 + GR_KILLIPPROC = 0x00000100,
60506 + /* just a placeholder, this mode is only used in userspace */
60507 + GR_NOTROJAN = 0x00000200,
60508 + GR_PROTPROCFD = 0x00000400,
60509 + GR_PROCACCT = 0x00000800,
60510 + GR_RELAXPTRACE = 0x00001000,
60511 + GR_NESTED = 0x00002000,
60512 + GR_INHERITLEARN = 0x00004000,
60513 + GR_PROCFIND = 0x00008000,
60514 + GR_POVERRIDE = 0x00010000,
60515 + GR_KERNELAUTH = 0x00020000,
60516 + GR_ATSECURE = 0x00040000,
60517 + GR_SHMEXEC = 0x00080000
60518 +};
60519 +
60520 +enum {
60521 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60522 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60523 + GR_PAX_ENABLE_MPROTECT = 0x0004,
60524 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
60525 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60526 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60527 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60528 + GR_PAX_DISABLE_MPROTECT = 0x0400,
60529 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
60530 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60531 +};
60532 +
60533 +enum {
60534 + GR_ID_USER = 0x01,
60535 + GR_ID_GROUP = 0x02,
60536 +};
60537 +
60538 +enum {
60539 + GR_ID_ALLOW = 0x01,
60540 + GR_ID_DENY = 0x02,
60541 +};
60542 +
60543 +#define GR_CRASH_RES 31
60544 +#define GR_UIDTABLE_MAX 500
60545 +
60546 +/* begin resource learning section */
60547 +enum {
60548 + GR_RLIM_CPU_BUMP = 60,
60549 + GR_RLIM_FSIZE_BUMP = 50000,
60550 + GR_RLIM_DATA_BUMP = 10000,
60551 + GR_RLIM_STACK_BUMP = 1000,
60552 + GR_RLIM_CORE_BUMP = 10000,
60553 + GR_RLIM_RSS_BUMP = 500000,
60554 + GR_RLIM_NPROC_BUMP = 1,
60555 + GR_RLIM_NOFILE_BUMP = 5,
60556 + GR_RLIM_MEMLOCK_BUMP = 50000,
60557 + GR_RLIM_AS_BUMP = 500000,
60558 + GR_RLIM_LOCKS_BUMP = 2,
60559 + GR_RLIM_SIGPENDING_BUMP = 5,
60560 + GR_RLIM_MSGQUEUE_BUMP = 10000,
60561 + GR_RLIM_NICE_BUMP = 1,
60562 + GR_RLIM_RTPRIO_BUMP = 1,
60563 + GR_RLIM_RTTIME_BUMP = 1000000
60564 +};
60565 +
60566 +#endif
60567 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60568 new file mode 100644
60569 index 0000000..da390f1
60570 --- /dev/null
60571 +++ b/include/linux/grinternal.h
60572 @@ -0,0 +1,221 @@
60573 +#ifndef __GRINTERNAL_H
60574 +#define __GRINTERNAL_H
60575 +
60576 +#ifdef CONFIG_GRKERNSEC
60577 +
60578 +#include <linux/fs.h>
60579 +#include <linux/mnt_namespace.h>
60580 +#include <linux/nsproxy.h>
60581 +#include <linux/gracl.h>
60582 +#include <linux/grdefs.h>
60583 +#include <linux/grmsg.h>
60584 +
60585 +void gr_add_learn_entry(const char *fmt, ...)
60586 + __attribute__ ((format (printf, 1, 2)));
60587 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60588 + const struct vfsmount *mnt);
60589 +__u32 gr_check_create(const struct dentry *new_dentry,
60590 + const struct dentry *parent,
60591 + const struct vfsmount *mnt, const __u32 mode);
60592 +int gr_check_protected_task(const struct task_struct *task);
60593 +__u32 to_gr_audit(const __u32 reqmode);
60594 +int gr_set_acls(const int type);
60595 +int gr_apply_subject_to_task(struct task_struct *task);
60596 +int gr_acl_is_enabled(void);
60597 +char gr_roletype_to_char(void);
60598 +
60599 +void gr_handle_alertkill(struct task_struct *task);
60600 +char *gr_to_filename(const struct dentry *dentry,
60601 + const struct vfsmount *mnt);
60602 +char *gr_to_filename1(const struct dentry *dentry,
60603 + const struct vfsmount *mnt);
60604 +char *gr_to_filename2(const struct dentry *dentry,
60605 + const struct vfsmount *mnt);
60606 +char *gr_to_filename3(const struct dentry *dentry,
60607 + const struct vfsmount *mnt);
60608 +
60609 +extern int grsec_enable_ptrace_readexec;
60610 +extern int grsec_enable_harden_ptrace;
60611 +extern int grsec_enable_link;
60612 +extern int grsec_enable_fifo;
60613 +extern int grsec_enable_execve;
60614 +extern int grsec_enable_shm;
60615 +extern int grsec_enable_execlog;
60616 +extern int grsec_enable_signal;
60617 +extern int grsec_enable_audit_ptrace;
60618 +extern int grsec_enable_forkfail;
60619 +extern int grsec_enable_time;
60620 +extern int grsec_enable_rofs;
60621 +extern int grsec_enable_chroot_shmat;
60622 +extern int grsec_enable_chroot_mount;
60623 +extern int grsec_enable_chroot_double;
60624 +extern int grsec_enable_chroot_pivot;
60625 +extern int grsec_enable_chroot_chdir;
60626 +extern int grsec_enable_chroot_chmod;
60627 +extern int grsec_enable_chroot_mknod;
60628 +extern int grsec_enable_chroot_fchdir;
60629 +extern int grsec_enable_chroot_nice;
60630 +extern int grsec_enable_chroot_execlog;
60631 +extern int grsec_enable_chroot_caps;
60632 +extern int grsec_enable_chroot_sysctl;
60633 +extern int grsec_enable_chroot_unix;
60634 +extern int grsec_enable_tpe;
60635 +extern int grsec_tpe_gid;
60636 +extern int grsec_enable_tpe_all;
60637 +extern int grsec_enable_tpe_invert;
60638 +extern int grsec_enable_socket_all;
60639 +extern int grsec_socket_all_gid;
60640 +extern int grsec_enable_socket_client;
60641 +extern int grsec_socket_client_gid;
60642 +extern int grsec_enable_socket_server;
60643 +extern int grsec_socket_server_gid;
60644 +extern int grsec_audit_gid;
60645 +extern int grsec_enable_group;
60646 +extern int grsec_enable_audit_textrel;
60647 +extern int grsec_enable_log_rwxmaps;
60648 +extern int grsec_enable_mount;
60649 +extern int grsec_enable_chdir;
60650 +extern int grsec_resource_logging;
60651 +extern int grsec_enable_blackhole;
60652 +extern int grsec_lastack_retries;
60653 +extern int grsec_enable_brute;
60654 +extern int grsec_lock;
60655 +
60656 +extern spinlock_t grsec_alert_lock;
60657 +extern unsigned long grsec_alert_wtime;
60658 +extern unsigned long grsec_alert_fyet;
60659 +
60660 +extern spinlock_t grsec_audit_lock;
60661 +
60662 +extern rwlock_t grsec_exec_file_lock;
60663 +
60664 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60665 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60666 + (tsk)->exec_file->f_vfsmnt) : "/")
60667 +
60668 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60669 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60670 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60671 +
60672 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60673 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
60674 + (tsk)->exec_file->f_vfsmnt) : "/")
60675 +
60676 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60677 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60678 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60679 +
60680 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60681 +
60682 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60683 +
60684 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60685 + (task)->pid, (cred)->uid, \
60686 + (cred)->euid, (cred)->gid, (cred)->egid, \
60687 + gr_parent_task_fullpath(task), \
60688 + (task)->real_parent->comm, (task)->real_parent->pid, \
60689 + (pcred)->uid, (pcred)->euid, \
60690 + (pcred)->gid, (pcred)->egid
60691 +
60692 +#define GR_CHROOT_CAPS {{ \
60693 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60694 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60695 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60696 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60697 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60698 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60699 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60700 +
60701 +#define security_learn(normal_msg,args...) \
60702 +({ \
60703 + read_lock(&grsec_exec_file_lock); \
60704 + gr_add_learn_entry(normal_msg "\n", ## args); \
60705 + read_unlock(&grsec_exec_file_lock); \
60706 +})
60707 +
60708 +enum {
60709 + GR_DO_AUDIT,
60710 + GR_DONT_AUDIT,
60711 + /* used for non-audit messages that we shouldn't kill the task on */
60712 + GR_DONT_AUDIT_GOOD
60713 +};
60714 +
60715 +enum {
60716 + GR_TTYSNIFF,
60717 + GR_RBAC,
60718 + GR_RBAC_STR,
60719 + GR_STR_RBAC,
60720 + GR_RBAC_MODE2,
60721 + GR_RBAC_MODE3,
60722 + GR_FILENAME,
60723 + GR_SYSCTL_HIDDEN,
60724 + GR_NOARGS,
60725 + GR_ONE_INT,
60726 + GR_ONE_INT_TWO_STR,
60727 + GR_ONE_STR,
60728 + GR_STR_INT,
60729 + GR_TWO_STR_INT,
60730 + GR_TWO_INT,
60731 + GR_TWO_U64,
60732 + GR_THREE_INT,
60733 + GR_FIVE_INT_TWO_STR,
60734 + GR_TWO_STR,
60735 + GR_THREE_STR,
60736 + GR_FOUR_STR,
60737 + GR_STR_FILENAME,
60738 + GR_FILENAME_STR,
60739 + GR_FILENAME_TWO_INT,
60740 + GR_FILENAME_TWO_INT_STR,
60741 + GR_TEXTREL,
60742 + GR_PTRACE,
60743 + GR_RESOURCE,
60744 + GR_CAP,
60745 + GR_SIG,
60746 + GR_SIG2,
60747 + GR_CRASH1,
60748 + GR_CRASH2,
60749 + GR_PSACCT,
60750 + GR_RWXMAP
60751 +};
60752 +
60753 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60754 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60755 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60756 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60757 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60758 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60759 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60760 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60761 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60762 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60763 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60764 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60765 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60766 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60767 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60768 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60769 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60770 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60771 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60772 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60773 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60774 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60775 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60776 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60777 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60778 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60779 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60780 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60781 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60782 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60783 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60784 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60785 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60786 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60787 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60788 +
60789 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60790 +
60791 +#endif
60792 +
60793 +#endif
60794 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60795 new file mode 100644
60796 index 0000000..ae576a1
60797 --- /dev/null
60798 +++ b/include/linux/grmsg.h
60799 @@ -0,0 +1,109 @@
60800 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60801 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60802 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60803 +#define GR_STOPMOD_MSG "denied modification of module state by "
60804 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60805 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60806 +#define GR_IOPERM_MSG "denied use of ioperm() by "
60807 +#define GR_IOPL_MSG "denied use of iopl() by "
60808 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60809 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60810 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60811 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60812 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60813 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60814 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60815 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60816 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60817 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60818 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60819 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60820 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60821 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60822 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60823 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60824 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60825 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60826 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60827 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60828 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60829 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60830 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60831 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60832 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60833 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60834 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60835 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60836 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60837 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60838 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60839 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60840 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60841 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60842 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60843 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60844 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60845 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60846 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60847 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60848 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60849 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60850 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60851 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60852 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60853 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60854 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60855 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60856 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
60857 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
60858 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
60859 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
60860 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
60861 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
60862 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
60863 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
60864 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
60865 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
60866 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
60867 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
60868 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
60869 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
60870 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
60871 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
60872 +#define GR_NICE_CHROOT_MSG "denied priority change by "
60873 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
60874 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
60875 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
60876 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
60877 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
60878 +#define GR_TIME_MSG "time set by "
60879 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
60880 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
60881 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
60882 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
60883 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
60884 +#define GR_BIND_MSG "denied bind() by "
60885 +#define GR_CONNECT_MSG "denied connect() by "
60886 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
60887 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
60888 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
60889 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
60890 +#define GR_CAP_ACL_MSG "use of %s denied for "
60891 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
60892 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
60893 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
60894 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
60895 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
60896 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
60897 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
60898 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
60899 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
60900 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
60901 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
60902 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
60903 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
60904 +#define GR_VM86_MSG "denied use of vm86 by "
60905 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
60906 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
60907 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
60908 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
60909 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
60910 new file mode 100644
60911 index 0000000..acd05db
60912 --- /dev/null
60913 +++ b/include/linux/grsecurity.h
60914 @@ -0,0 +1,232 @@
60915 +#ifndef GR_SECURITY_H
60916 +#define GR_SECURITY_H
60917 +#include <linux/fs.h>
60918 +#include <linux/fs_struct.h>
60919 +#include <linux/binfmts.h>
60920 +#include <linux/gracl.h>
60921 +
60922 +/* notify of brain-dead configs */
60923 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60924 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
60925 +#endif
60926 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
60927 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
60928 +#endif
60929 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
60930 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
60931 +#endif
60932 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
60933 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
60934 +#endif
60935 +
60936 +#include <linux/compat.h>
60937 +
60938 +struct user_arg_ptr {
60939 +#ifdef CONFIG_COMPAT
60940 + bool is_compat;
60941 +#endif
60942 + union {
60943 + const char __user *const __user *native;
60944 +#ifdef CONFIG_COMPAT
60945 + compat_uptr_t __user *compat;
60946 +#endif
60947 + } ptr;
60948 +};
60949 +
60950 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
60951 +void gr_handle_brute_check(void);
60952 +void gr_handle_kernel_exploit(void);
60953 +int gr_process_user_ban(void);
60954 +
60955 +char gr_roletype_to_char(void);
60956 +
60957 +int gr_acl_enable_at_secure(void);
60958 +
60959 +int gr_check_user_change(int real, int effective, int fs);
60960 +int gr_check_group_change(int real, int effective, int fs);
60961 +
60962 +void gr_del_task_from_ip_table(struct task_struct *p);
60963 +
60964 +int gr_pid_is_chrooted(struct task_struct *p);
60965 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
60966 +int gr_handle_chroot_nice(void);
60967 +int gr_handle_chroot_sysctl(const int op);
60968 +int gr_handle_chroot_setpriority(struct task_struct *p,
60969 + const int niceval);
60970 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
60971 +int gr_handle_chroot_chroot(const struct dentry *dentry,
60972 + const struct vfsmount *mnt);
60973 +void gr_handle_chroot_chdir(struct path *path);
60974 +int gr_handle_chroot_chmod(const struct dentry *dentry,
60975 + const struct vfsmount *mnt, const int mode);
60976 +int gr_handle_chroot_mknod(const struct dentry *dentry,
60977 + const struct vfsmount *mnt, const int mode);
60978 +int gr_handle_chroot_mount(const struct dentry *dentry,
60979 + const struct vfsmount *mnt,
60980 + const char *dev_name);
60981 +int gr_handle_chroot_pivot(void);
60982 +int gr_handle_chroot_unix(const pid_t pid);
60983 +
60984 +int gr_handle_rawio(const struct inode *inode);
60985 +
60986 +void gr_handle_ioperm(void);
60987 +void gr_handle_iopl(void);
60988 +
60989 +umode_t gr_acl_umask(void);
60990 +
60991 +int gr_tpe_allow(const struct file *file);
60992 +
60993 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
60994 +void gr_clear_chroot_entries(struct task_struct *task);
60995 +
60996 +void gr_log_forkfail(const int retval);
60997 +void gr_log_timechange(void);
60998 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
60999 +void gr_log_chdir(const struct dentry *dentry,
61000 + const struct vfsmount *mnt);
61001 +void gr_log_chroot_exec(const struct dentry *dentry,
61002 + const struct vfsmount *mnt);
61003 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61004 +void gr_log_remount(const char *devname, const int retval);
61005 +void gr_log_unmount(const char *devname, const int retval);
61006 +void gr_log_mount(const char *from, const char *to, const int retval);
61007 +void gr_log_textrel(struct vm_area_struct *vma);
61008 +void gr_log_rwxmmap(struct file *file);
61009 +void gr_log_rwxmprotect(struct file *file);
61010 +
61011 +int gr_handle_follow_link(const struct inode *parent,
61012 + const struct inode *inode,
61013 + const struct dentry *dentry,
61014 + const struct vfsmount *mnt);
61015 +int gr_handle_fifo(const struct dentry *dentry,
61016 + const struct vfsmount *mnt,
61017 + const struct dentry *dir, const int flag,
61018 + const int acc_mode);
61019 +int gr_handle_hardlink(const struct dentry *dentry,
61020 + const struct vfsmount *mnt,
61021 + struct inode *inode,
61022 + const int mode, const char *to);
61023 +
61024 +int gr_is_capable(const int cap);
61025 +int gr_is_capable_nolog(const int cap);
61026 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61027 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61028 +
61029 +void gr_learn_resource(const struct task_struct *task, const int limit,
61030 + const unsigned long wanted, const int gt);
61031 +void gr_copy_label(struct task_struct *tsk);
61032 +void gr_handle_crash(struct task_struct *task, const int sig);
61033 +int gr_handle_signal(const struct task_struct *p, const int sig);
61034 +int gr_check_crash_uid(const uid_t uid);
61035 +int gr_check_protected_task(const struct task_struct *task);
61036 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61037 +int gr_acl_handle_mmap(const struct file *file,
61038 + const unsigned long prot);
61039 +int gr_acl_handle_mprotect(const struct file *file,
61040 + const unsigned long prot);
61041 +int gr_check_hidden_task(const struct task_struct *tsk);
61042 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61043 + const struct vfsmount *mnt);
61044 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61045 + const struct vfsmount *mnt);
61046 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61047 + const struct vfsmount *mnt, const int fmode);
61048 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61049 + const struct vfsmount *mnt, umode_t *mode);
61050 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61051 + const struct vfsmount *mnt);
61052 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61053 + const struct vfsmount *mnt);
61054 +int gr_handle_ptrace(struct task_struct *task, const long request);
61055 +int gr_handle_proc_ptrace(struct task_struct *task);
61056 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61057 + const struct vfsmount *mnt);
61058 +int gr_check_crash_exec(const struct file *filp);
61059 +int gr_acl_is_enabled(void);
61060 +void gr_set_kernel_label(struct task_struct *task);
61061 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61062 + const gid_t gid);
61063 +int gr_set_proc_label(const struct dentry *dentry,
61064 + const struct vfsmount *mnt,
61065 + const int unsafe_flags);
61066 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61067 + const struct vfsmount *mnt);
61068 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61069 + const struct vfsmount *mnt, int acc_mode);
61070 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61071 + const struct dentry *p_dentry,
61072 + const struct vfsmount *p_mnt,
61073 + int open_flags, int acc_mode, const int imode);
61074 +void gr_handle_create(const struct dentry *dentry,
61075 + const struct vfsmount *mnt);
61076 +void gr_handle_proc_create(const struct dentry *dentry,
61077 + const struct inode *inode);
61078 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61079 + const struct dentry *parent_dentry,
61080 + const struct vfsmount *parent_mnt,
61081 + const int mode);
61082 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61083 + const struct dentry *parent_dentry,
61084 + const struct vfsmount *parent_mnt);
61085 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61086 + const struct vfsmount *mnt);
61087 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61088 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61089 + const struct vfsmount *mnt);
61090 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61091 + const struct dentry *parent_dentry,
61092 + const struct vfsmount *parent_mnt,
61093 + const char *from);
61094 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61095 + const struct dentry *parent_dentry,
61096 + const struct vfsmount *parent_mnt,
61097 + const struct dentry *old_dentry,
61098 + const struct vfsmount *old_mnt, const char *to);
61099 +int gr_acl_handle_rename(struct dentry *new_dentry,
61100 + struct dentry *parent_dentry,
61101 + const struct vfsmount *parent_mnt,
61102 + struct dentry *old_dentry,
61103 + struct inode *old_parent_inode,
61104 + struct vfsmount *old_mnt, const char *newname);
61105 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61106 + struct dentry *old_dentry,
61107 + struct dentry *new_dentry,
61108 + struct vfsmount *mnt, const __u8 replace);
61109 +__u32 gr_check_link(const struct dentry *new_dentry,
61110 + const struct dentry *parent_dentry,
61111 + const struct vfsmount *parent_mnt,
61112 + const struct dentry *old_dentry,
61113 + const struct vfsmount *old_mnt);
61114 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61115 + const unsigned int namelen, const ino_t ino);
61116 +
61117 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61118 + const struct vfsmount *mnt);
61119 +void gr_acl_handle_exit(void);
61120 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61121 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61122 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61123 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61124 +void gr_audit_ptrace(struct task_struct *task);
61125 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61126 +
61127 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61128 +
61129 +#ifdef CONFIG_GRKERNSEC
61130 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61131 +void gr_handle_vm86(void);
61132 +void gr_handle_mem_readwrite(u64 from, u64 to);
61133 +
61134 +void gr_log_badprocpid(const char *entry);
61135 +
61136 +extern int grsec_enable_dmesg;
61137 +extern int grsec_disable_privio;
61138 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61139 +extern int grsec_enable_chroot_findtask;
61140 +#endif
61141 +#ifdef CONFIG_GRKERNSEC_SETXID
61142 +extern int grsec_enable_setxid;
61143 +#endif
61144 +#endif
61145 +
61146 +#endif
61147 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61148 new file mode 100644
61149 index 0000000..e7ffaaf
61150 --- /dev/null
61151 +++ b/include/linux/grsock.h
61152 @@ -0,0 +1,19 @@
61153 +#ifndef __GRSOCK_H
61154 +#define __GRSOCK_H
61155 +
61156 +extern void gr_attach_curr_ip(const struct sock *sk);
61157 +extern int gr_handle_sock_all(const int family, const int type,
61158 + const int protocol);
61159 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61160 +extern int gr_handle_sock_server_other(const struct sock *sck);
61161 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61162 +extern int gr_search_connect(struct socket * sock,
61163 + struct sockaddr_in * addr);
61164 +extern int gr_search_bind(struct socket * sock,
61165 + struct sockaddr_in * addr);
61166 +extern int gr_search_listen(struct socket * sock);
61167 +extern int gr_search_accept(struct socket * sock);
61168 +extern int gr_search_socket(const int domain, const int type,
61169 + const int protocol);
61170 +
61171 +#endif
61172 diff --git a/include/linux/hid.h b/include/linux/hid.h
61173 index 3a95da6..51986f1 100644
61174 --- a/include/linux/hid.h
61175 +++ b/include/linux/hid.h
61176 @@ -696,7 +696,7 @@ struct hid_ll_driver {
61177 unsigned int code, int value);
61178
61179 int (*parse)(struct hid_device *hdev);
61180 -};
61181 +} __no_const;
61182
61183 #define PM_HINT_FULLON 1<<5
61184 #define PM_HINT_NORMAL 1<<1
61185 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61186 index d3999b4..1304cb4 100644
61187 --- a/include/linux/highmem.h
61188 +++ b/include/linux/highmem.h
61189 @@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
61190 kunmap_atomic(kaddr);
61191 }
61192
61193 +static inline void sanitize_highpage(struct page *page)
61194 +{
61195 + void *kaddr;
61196 + unsigned long flags;
61197 +
61198 + local_irq_save(flags);
61199 + kaddr = kmap_atomic(page);
61200 + clear_page(kaddr);
61201 + kunmap_atomic(kaddr);
61202 + local_irq_restore(flags);
61203 +}
61204 +
61205 static inline void zero_user_segments(struct page *page,
61206 unsigned start1, unsigned end1,
61207 unsigned start2, unsigned end2)
61208 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61209 index 195d8b3..e20cfab 100644
61210 --- a/include/linux/i2c.h
61211 +++ b/include/linux/i2c.h
61212 @@ -365,6 +365,7 @@ struct i2c_algorithm {
61213 /* To determine what the adapter supports */
61214 u32 (*functionality) (struct i2c_adapter *);
61215 };
61216 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61217
61218 /*
61219 * i2c_adapter is the structure used to identify a physical i2c bus along
61220 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61221 index d23c3c2..eb63c81 100644
61222 --- a/include/linux/i2o.h
61223 +++ b/include/linux/i2o.h
61224 @@ -565,7 +565,7 @@ struct i2o_controller {
61225 struct i2o_device *exec; /* Executive */
61226 #if BITS_PER_LONG == 64
61227 spinlock_t context_list_lock; /* lock for context_list */
61228 - atomic_t context_list_counter; /* needed for unique contexts */
61229 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61230 struct list_head context_list; /* list of context id's
61231 and pointers */
61232 #endif
61233 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61234 index 58404b0..439ed95 100644
61235 --- a/include/linux/if_team.h
61236 +++ b/include/linux/if_team.h
61237 @@ -64,6 +64,7 @@ struct team_mode_ops {
61238 void (*port_leave)(struct team *team, struct team_port *port);
61239 void (*port_change_mac)(struct team *team, struct team_port *port);
61240 };
61241 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61242
61243 enum team_option_type {
61244 TEAM_OPTION_TYPE_U32,
61245 @@ -112,7 +113,7 @@ struct team {
61246 struct list_head option_list;
61247
61248 const struct team_mode *mode;
61249 - struct team_mode_ops ops;
61250 + team_mode_ops_no_const ops;
61251 long mode_priv[TEAM_MODE_PRIV_LONGS];
61252 };
61253
61254 diff --git a/include/linux/init.h b/include/linux/init.h
61255 index 6b95109..4aca62c 100644
61256 --- a/include/linux/init.h
61257 +++ b/include/linux/init.h
61258 @@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
61259
61260 /* Each module must use one module_init(). */
61261 #define module_init(initfn) \
61262 - static inline initcall_t __inittest(void) \
61263 + static inline __used initcall_t __inittest(void) \
61264 { return initfn; } \
61265 int init_module(void) __attribute__((alias(#initfn)));
61266
61267 /* This is only required if you want to be unloadable. */
61268 #define module_exit(exitfn) \
61269 - static inline exitcall_t __exittest(void) \
61270 + static inline __used exitcall_t __exittest(void) \
61271 { return exitfn; } \
61272 void cleanup_module(void) __attribute__((alias(#exitfn)));
61273
61274 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61275 index e4baff5..83bb175 100644
61276 --- a/include/linux/init_task.h
61277 +++ b/include/linux/init_task.h
61278 @@ -134,6 +134,12 @@ extern struct cred init_cred;
61279
61280 #define INIT_TASK_COMM "swapper"
61281
61282 +#ifdef CONFIG_X86
61283 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61284 +#else
61285 +#define INIT_TASK_THREAD_INFO
61286 +#endif
61287 +
61288 /*
61289 * INIT_TASK is used to set up the first task table, touch at
61290 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61291 @@ -172,6 +178,7 @@ extern struct cred init_cred;
61292 RCU_INIT_POINTER(.cred, &init_cred), \
61293 .comm = INIT_TASK_COMM, \
61294 .thread = INIT_THREAD, \
61295 + INIT_TASK_THREAD_INFO \
61296 .fs = &init_fs, \
61297 .files = &init_files, \
61298 .signal = &init_signals, \
61299 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61300 index e6ca56d..8583707 100644
61301 --- a/include/linux/intel-iommu.h
61302 +++ b/include/linux/intel-iommu.h
61303 @@ -296,7 +296,7 @@ struct iommu_flush {
61304 u8 fm, u64 type);
61305 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61306 unsigned int size_order, u64 type);
61307 -};
61308 +} __no_const;
61309
61310 enum {
61311 SR_DMAR_FECTL_REG,
61312 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61313 index 2aea5d2..0b82f0c 100644
61314 --- a/include/linux/interrupt.h
61315 +++ b/include/linux/interrupt.h
61316 @@ -439,7 +439,7 @@ enum
61317 /* map softirq index to softirq name. update 'softirq_to_name' in
61318 * kernel/softirq.c when adding a new softirq.
61319 */
61320 -extern char *softirq_to_name[NR_SOFTIRQS];
61321 +extern const char * const softirq_to_name[NR_SOFTIRQS];
61322
61323 /* softirq mask and active fields moved to irq_cpustat_t in
61324 * asm/hardirq.h to get better cache usage. KAO
61325 @@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61326
61327 struct softirq_action
61328 {
61329 - void (*action)(struct softirq_action *);
61330 + void (*action)(void);
61331 };
61332
61333 asmlinkage void do_softirq(void);
61334 asmlinkage void __do_softirq(void);
61335 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61336 +extern void open_softirq(int nr, void (*action)(void));
61337 extern void softirq_init(void);
61338 extern void __raise_softirq_irqoff(unsigned int nr);
61339
61340 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61341 index 3875719..4cd454c 100644
61342 --- a/include/linux/kallsyms.h
61343 +++ b/include/linux/kallsyms.h
61344 @@ -15,7 +15,8 @@
61345
61346 struct module;
61347
61348 -#ifdef CONFIG_KALLSYMS
61349 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61350 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61351 /* Lookup the address for a symbol. Returns 0 if not found. */
61352 unsigned long kallsyms_lookup_name(const char *name);
61353
61354 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61355 /* Stupid that this does nothing, but I didn't create this mess. */
61356 #define __print_symbol(fmt, addr)
61357 #endif /*CONFIG_KALLSYMS*/
61358 +#else /* when included by kallsyms.c, vsnprintf.c, or
61359 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61360 +extern void __print_symbol(const char *fmt, unsigned long address);
61361 +extern int sprint_backtrace(char *buffer, unsigned long address);
61362 +extern int sprint_symbol(char *buffer, unsigned long address);
61363 +const char *kallsyms_lookup(unsigned long addr,
61364 + unsigned long *symbolsize,
61365 + unsigned long *offset,
61366 + char **modname, char *namebuf);
61367 +#endif
61368
61369 /* This macro allows us to keep printk typechecking */
61370 static __printf(1, 2)
61371 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61372 index c4d2fc1..5df9c19 100644
61373 --- a/include/linux/kgdb.h
61374 +++ b/include/linux/kgdb.h
61375 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61376 extern int kgdb_io_module_registered;
61377
61378 extern atomic_t kgdb_setting_breakpoint;
61379 -extern atomic_t kgdb_cpu_doing_single_step;
61380 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61381
61382 extern struct task_struct *kgdb_usethread;
61383 extern struct task_struct *kgdb_contthread;
61384 @@ -252,7 +252,7 @@ struct kgdb_arch {
61385 void (*disable_hw_break)(struct pt_regs *regs);
61386 void (*remove_all_hw_break)(void);
61387 void (*correct_hw_break)(void);
61388 -};
61389 +} __do_const;
61390
61391 /**
61392 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61393 @@ -277,7 +277,7 @@ struct kgdb_io {
61394 void (*pre_exception) (void);
61395 void (*post_exception) (void);
61396 int is_console;
61397 -};
61398 +} __do_const;
61399
61400 extern struct kgdb_arch arch_kgdb_ops;
61401
61402 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61403 index dd99c32..da06047 100644
61404 --- a/include/linux/kmod.h
61405 +++ b/include/linux/kmod.h
61406 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61407 * usually useless though. */
61408 extern __printf(2, 3)
61409 int __request_module(bool wait, const char *name, ...);
61410 +extern __printf(3, 4)
61411 +int ___request_module(bool wait, char *param_name, const char *name, ...);
61412 #define request_module(mod...) __request_module(true, mod)
61413 #define request_module_nowait(mod...) __request_module(false, mod)
61414 #define try_then_request_module(x, mod...) \
61415 diff --git a/include/linux/kref.h b/include/linux/kref.h
61416 index 9c07dce..a92fa71 100644
61417 --- a/include/linux/kref.h
61418 +++ b/include/linux/kref.h
61419 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
61420 static inline int kref_sub(struct kref *kref, unsigned int count,
61421 void (*release)(struct kref *kref))
61422 {
61423 - WARN_ON(release == NULL);
61424 + BUG_ON(release == NULL);
61425
61426 if (atomic_sub_and_test((int) count, &kref->refcount)) {
61427 release(kref);
61428 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61429 index 72cbf08..dd0201d 100644
61430 --- a/include/linux/kvm_host.h
61431 +++ b/include/linux/kvm_host.h
61432 @@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61433 void vcpu_load(struct kvm_vcpu *vcpu);
61434 void vcpu_put(struct kvm_vcpu *vcpu);
61435
61436 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61437 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61438 struct module *module);
61439 void kvm_exit(void);
61440
61441 @@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61442 struct kvm_guest_debug *dbg);
61443 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61444
61445 -int kvm_arch_init(void *opaque);
61446 +int kvm_arch_init(const void *opaque);
61447 void kvm_arch_exit(void);
61448
61449 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61450 diff --git a/include/linux/libata.h b/include/linux/libata.h
61451 index e926df7..1713bd8 100644
61452 --- a/include/linux/libata.h
61453 +++ b/include/linux/libata.h
61454 @@ -909,7 +909,7 @@ struct ata_port_operations {
61455 * fields must be pointers.
61456 */
61457 const struct ata_port_operations *inherits;
61458 -};
61459 +} __do_const;
61460
61461 struct ata_port_info {
61462 unsigned long flags;
61463 diff --git a/include/linux/mca.h b/include/linux/mca.h
61464 index 3797270..7765ede 100644
61465 --- a/include/linux/mca.h
61466 +++ b/include/linux/mca.h
61467 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61468 int region);
61469 void * (*mca_transform_memory)(struct mca_device *,
61470 void *memory);
61471 -};
61472 +} __no_const;
61473
61474 struct mca_bus {
61475 u64 default_dma_mask;
61476 diff --git a/include/linux/memory.h b/include/linux/memory.h
61477 index 1ac7f6e..a5794d0 100644
61478 --- a/include/linux/memory.h
61479 +++ b/include/linux/memory.h
61480 @@ -143,7 +143,7 @@ struct memory_accessor {
61481 size_t count);
61482 ssize_t (*write)(struct memory_accessor *, const char *buf,
61483 off_t offset, size_t count);
61484 -};
61485 +} __no_const;
61486
61487 /*
61488 * Kernel text modification mutex, used for code patching. Users of this lock
61489 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61490 index ee96cd5..7823c3a 100644
61491 --- a/include/linux/mfd/abx500.h
61492 +++ b/include/linux/mfd/abx500.h
61493 @@ -455,6 +455,7 @@ struct abx500_ops {
61494 int (*event_registers_startup_state_get) (struct device *, u8 *);
61495 int (*startup_irq_enabled) (struct device *, unsigned int);
61496 };
61497 +typedef struct abx500_ops __no_const abx500_ops_no_const;
61498
61499 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61500 void abx500_remove_ops(struct device *dev);
61501 diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
61502 index 9b07725..3d55001 100644
61503 --- a/include/linux/mfd/abx500/ux500_chargalg.h
61504 +++ b/include/linux/mfd/abx500/ux500_chargalg.h
61505 @@ -19,7 +19,7 @@ struct ux500_charger_ops {
61506 int (*enable) (struct ux500_charger *, int, int, int);
61507 int (*kick_wd) (struct ux500_charger *);
61508 int (*update_curr) (struct ux500_charger *, int);
61509 -};
61510 +} __no_const;
61511
61512 /**
61513 * struct ux500_charger - power supply ux500 charger sub class
61514 diff --git a/include/linux/mm.h b/include/linux/mm.h
61515 index 74aa71b..4ae97ba 100644
61516 --- a/include/linux/mm.h
61517 +++ b/include/linux/mm.h
61518 @@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
61519
61520 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61521 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61522 +
61523 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61524 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61525 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61526 +#else
61527 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61528 +#endif
61529 +
61530 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61531 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61532
61533 @@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
61534 int set_page_dirty_lock(struct page *page);
61535 int clear_page_dirty_for_io(struct page *page);
61536
61537 -/* Is the vma a continuation of the stack vma above it? */
61538 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61539 -{
61540 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61541 -}
61542 -
61543 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
61544 - unsigned long addr)
61545 -{
61546 - return (vma->vm_flags & VM_GROWSDOWN) &&
61547 - (vma->vm_start == addr) &&
61548 - !vma_growsdown(vma->vm_prev, addr);
61549 -}
61550 -
61551 -/* Is the vma a continuation of the stack vma below it? */
61552 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61553 -{
61554 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61555 -}
61556 -
61557 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
61558 - unsigned long addr)
61559 -{
61560 - return (vma->vm_flags & VM_GROWSUP) &&
61561 - (vma->vm_end == addr) &&
61562 - !vma_growsup(vma->vm_next, addr);
61563 -}
61564 -
61565 extern pid_t
61566 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
61567
61568 @@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
61569 }
61570 #endif
61571
61572 +#ifdef CONFIG_MMU
61573 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61574 +#else
61575 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61576 +{
61577 + return __pgprot(0);
61578 +}
61579 +#endif
61580 +
61581 int vma_wants_writenotify(struct vm_area_struct *vma);
61582
61583 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61584 @@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
61585 {
61586 return 0;
61587 }
61588 +
61589 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
61590 + unsigned long address)
61591 +{
61592 + return 0;
61593 +}
61594 #else
61595 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61596 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
61597 #endif
61598
61599 #ifdef __PAGETABLE_PMD_FOLDED
61600 @@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
61601 {
61602 return 0;
61603 }
61604 +
61605 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
61606 + unsigned long address)
61607 +{
61608 + return 0;
61609 +}
61610 #else
61611 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
61612 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
61613 #endif
61614
61615 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
61616 @@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
61617 NULL: pud_offset(pgd, address);
61618 }
61619
61620 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
61621 +{
61622 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
61623 + NULL: pud_offset(pgd, address);
61624 +}
61625 +
61626 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
61627 {
61628 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
61629 NULL: pmd_offset(pud, address);
61630 }
61631 +
61632 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
61633 +{
61634 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
61635 + NULL: pmd_offset(pud, address);
61636 +}
61637 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
61638
61639 #if USE_SPLIT_PTLOCKS
61640 @@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
61641 unsigned long, unsigned long,
61642 unsigned long, unsigned long);
61643 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61644 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61645
61646 /* These take the mm semaphore themselves */
61647 extern unsigned long vm_brk(unsigned long, unsigned long);
61648 @@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61649 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61650 struct vm_area_struct **pprev);
61651
61652 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61653 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61654 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61655 +
61656 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61657 NULL if none. Assume start_addr < end_addr. */
61658 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61659 @@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
61660 return vma;
61661 }
61662
61663 -#ifdef CONFIG_MMU
61664 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
61665 -#else
61666 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61667 -{
61668 - return __pgprot(0);
61669 -}
61670 -#endif
61671 -
61672 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61673 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61674 unsigned long pfn, unsigned long size, pgprot_t);
61675 @@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long pfn);
61676 extern int sysctl_memory_failure_early_kill;
61677 extern int sysctl_memory_failure_recovery;
61678 extern void shake_page(struct page *p, int access);
61679 -extern atomic_long_t mce_bad_pages;
61680 +extern atomic_long_unchecked_t mce_bad_pages;
61681 extern int soft_offline_page(struct page *page, int flags);
61682
61683 extern void dump_page(struct page *page);
61684 @@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
61685 static inline bool page_is_guard(struct page *page) { return false; }
61686 #endif /* CONFIG_DEBUG_PAGEALLOC */
61687
61688 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61689 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61690 +#else
61691 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61692 +#endif
61693 +
61694 #endif /* __KERNEL__ */
61695 #endif /* _LINUX_MM_H */
61696 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61697 index 3cc3062..efeaeb7 100644
61698 --- a/include/linux/mm_types.h
61699 +++ b/include/linux/mm_types.h
61700 @@ -252,6 +252,8 @@ struct vm_area_struct {
61701 #ifdef CONFIG_NUMA
61702 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61703 #endif
61704 +
61705 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61706 };
61707
61708 struct core_thread {
61709 @@ -326,7 +328,7 @@ struct mm_struct {
61710 unsigned long def_flags;
61711 unsigned long nr_ptes; /* Page table pages */
61712 unsigned long start_code, end_code, start_data, end_data;
61713 - unsigned long start_brk, brk, start_stack;
61714 + unsigned long brk_gap, start_brk, brk, start_stack;
61715 unsigned long arg_start, arg_end, env_start, env_end;
61716
61717 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
61718 @@ -388,6 +390,24 @@ struct mm_struct {
61719 #ifdef CONFIG_CPUMASK_OFFSTACK
61720 struct cpumask cpumask_allocation;
61721 #endif
61722 +
61723 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
61724 + unsigned long pax_flags;
61725 +#endif
61726 +
61727 +#ifdef CONFIG_PAX_DLRESOLVE
61728 + unsigned long call_dl_resolve;
61729 +#endif
61730 +
61731 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61732 + unsigned long call_syscall;
61733 +#endif
61734 +
61735 +#ifdef CONFIG_PAX_ASLR
61736 + unsigned long delta_mmap; /* randomized offset */
61737 + unsigned long delta_stack; /* randomized offset */
61738 +#endif
61739 +
61740 };
61741
61742 static inline void mm_init_cpumask(struct mm_struct *mm)
61743 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61744 index 1d1b1e1..2a13c78 100644
61745 --- a/include/linux/mmu_notifier.h
61746 +++ b/include/linux/mmu_notifier.h
61747 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61748 */
61749 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61750 ({ \
61751 - pte_t __pte; \
61752 + pte_t ___pte; \
61753 struct vm_area_struct *___vma = __vma; \
61754 unsigned long ___address = __address; \
61755 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61756 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61757 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61758 - __pte; \
61759 + ___pte; \
61760 })
61761
61762 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61763 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61764 index dff7115..0e001c8 100644
61765 --- a/include/linux/mmzone.h
61766 +++ b/include/linux/mmzone.h
61767 @@ -380,7 +380,7 @@ struct zone {
61768 unsigned long flags; /* zone flags, see below */
61769
61770 /* Zone statistics */
61771 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61772 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61773
61774 /*
61775 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61776 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61777 index 501da4c..ba79bb4 100644
61778 --- a/include/linux/mod_devicetable.h
61779 +++ b/include/linux/mod_devicetable.h
61780 @@ -12,7 +12,7 @@
61781 typedef unsigned long kernel_ulong_t;
61782 #endif
61783
61784 -#define PCI_ANY_ID (~0)
61785 +#define PCI_ANY_ID ((__u16)~0)
61786
61787 struct pci_device_id {
61788 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61789 @@ -131,7 +131,7 @@ struct usb_device_id {
61790 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61791 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61792
61793 -#define HID_ANY_ID (~0)
61794 +#define HID_ANY_ID (~0U)
61795
61796 struct hid_device_id {
61797 __u16 bus;
61798 diff --git a/include/linux/module.h b/include/linux/module.h
61799 index fbcafe2..e5d9587 100644
61800 --- a/include/linux/module.h
61801 +++ b/include/linux/module.h
61802 @@ -17,6 +17,7 @@
61803 #include <linux/moduleparam.h>
61804 #include <linux/tracepoint.h>
61805 #include <linux/export.h>
61806 +#include <linux/fs.h>
61807
61808 #include <linux/percpu.h>
61809 #include <asm/module.h>
61810 @@ -273,19 +274,16 @@ struct module
61811 int (*init)(void);
61812
61813 /* If this is non-NULL, vfree after init() returns */
61814 - void *module_init;
61815 + void *module_init_rx, *module_init_rw;
61816
61817 /* Here is the actual code + data, vfree'd on unload. */
61818 - void *module_core;
61819 + void *module_core_rx, *module_core_rw;
61820
61821 /* Here are the sizes of the init and core sections */
61822 - unsigned int init_size, core_size;
61823 + unsigned int init_size_rw, core_size_rw;
61824
61825 /* The size of the executable code in each section. */
61826 - unsigned int init_text_size, core_text_size;
61827 -
61828 - /* Size of RO sections of the module (text+rodata) */
61829 - unsigned int init_ro_size, core_ro_size;
61830 + unsigned int init_size_rx, core_size_rx;
61831
61832 /* Arch-specific module values */
61833 struct mod_arch_specific arch;
61834 @@ -341,6 +339,10 @@ struct module
61835 #ifdef CONFIG_EVENT_TRACING
61836 struct ftrace_event_call **trace_events;
61837 unsigned int num_trace_events;
61838 + struct file_operations trace_id;
61839 + struct file_operations trace_enable;
61840 + struct file_operations trace_format;
61841 + struct file_operations trace_filter;
61842 #endif
61843 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61844 unsigned int num_ftrace_callsites;
61845 @@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
61846 bool is_module_percpu_address(unsigned long addr);
61847 bool is_module_text_address(unsigned long addr);
61848
61849 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61850 +{
61851 +
61852 +#ifdef CONFIG_PAX_KERNEXEC
61853 + if (ktla_ktva(addr) >= (unsigned long)start &&
61854 + ktla_ktva(addr) < (unsigned long)start + size)
61855 + return 1;
61856 +#endif
61857 +
61858 + return ((void *)addr >= start && (void *)addr < start + size);
61859 +}
61860 +
61861 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
61862 +{
61863 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
61864 +}
61865 +
61866 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
61867 +{
61868 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
61869 +}
61870 +
61871 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
61872 +{
61873 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
61874 +}
61875 +
61876 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
61877 +{
61878 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
61879 +}
61880 +
61881 static inline int within_module_core(unsigned long addr, struct module *mod)
61882 {
61883 - return (unsigned long)mod->module_core <= addr &&
61884 - addr < (unsigned long)mod->module_core + mod->core_size;
61885 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
61886 }
61887
61888 static inline int within_module_init(unsigned long addr, struct module *mod)
61889 {
61890 - return (unsigned long)mod->module_init <= addr &&
61891 - addr < (unsigned long)mod->module_init + mod->init_size;
61892 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
61893 }
61894
61895 /* Search for module by name: must hold module_mutex. */
61896 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
61897 index b2be02e..72d2f78 100644
61898 --- a/include/linux/moduleloader.h
61899 +++ b/include/linux/moduleloader.h
61900 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
61901
61902 /* Allocator used for allocating struct module, core sections and init
61903 sections. Returns NULL on failure. */
61904 -void *module_alloc(unsigned long size);
61905 +void *module_alloc(unsigned long size) __size_overflow(1);
61906 +
61907 +#ifdef CONFIG_PAX_KERNEXEC
61908 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
61909 +#else
61910 +#define module_alloc_exec(x) module_alloc(x)
61911 +#endif
61912
61913 /* Free memory returned from module_alloc. */
61914 void module_free(struct module *mod, void *module_region);
61915
61916 +#ifdef CONFIG_PAX_KERNEXEC
61917 +void module_free_exec(struct module *mod, void *module_region);
61918 +#else
61919 +#define module_free_exec(x, y) module_free((x), (y))
61920 +#endif
61921 +
61922 /* Apply the given relocation to the (simplified) ELF. Return -error
61923 or 0. */
61924 int apply_relocate(Elf_Shdr *sechdrs,
61925 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
61926 index ea36486..91e70f4 100644
61927 --- a/include/linux/moduleparam.h
61928 +++ b/include/linux/moduleparam.h
61929 @@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
61930 * @len is usually just sizeof(string).
61931 */
61932 #define module_param_string(name, string, len, perm) \
61933 - static const struct kparam_string __param_string_##name \
61934 + static const struct kparam_string __param_string_##name __used \
61935 = { len, string }; \
61936 __module_param_call(MODULE_PARAM_PREFIX, name, \
61937 &param_ops_string, \
61938 @@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
61939 */
61940 #define module_param_array_named(name, array, type, nump, perm) \
61941 param_check_##type(name, &(array)[0]); \
61942 - static const struct kparam_array __param_arr_##name \
61943 + static const struct kparam_array __param_arr_##name __used \
61944 = { .max = ARRAY_SIZE(array), .num = nump, \
61945 .ops = &param_ops_##type, \
61946 .elemsize = sizeof(array[0]), .elem = array }; \
61947 diff --git a/include/linux/namei.h b/include/linux/namei.h
61948 index ffc0213..2c1f2cb 100644
61949 --- a/include/linux/namei.h
61950 +++ b/include/linux/namei.h
61951 @@ -24,7 +24,7 @@ struct nameidata {
61952 unsigned seq;
61953 int last_type;
61954 unsigned depth;
61955 - char *saved_names[MAX_NESTED_LINKS + 1];
61956 + const char *saved_names[MAX_NESTED_LINKS + 1];
61957
61958 /* Intent data */
61959 union {
61960 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
61961 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
61962 extern void unlock_rename(struct dentry *, struct dentry *);
61963
61964 -static inline void nd_set_link(struct nameidata *nd, char *path)
61965 +static inline void nd_set_link(struct nameidata *nd, const char *path)
61966 {
61967 nd->saved_names[nd->depth] = path;
61968 }
61969
61970 -static inline char *nd_get_link(struct nameidata *nd)
61971 +static inline const char *nd_get_link(const struct nameidata *nd)
61972 {
61973 return nd->saved_names[nd->depth];
61974 }
61975 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
61976 index 33900a5..2072000 100644
61977 --- a/include/linux/netdevice.h
61978 +++ b/include/linux/netdevice.h
61979 @@ -1003,6 +1003,7 @@ struct net_device_ops {
61980 int (*ndo_neigh_construct)(struct neighbour *n);
61981 void (*ndo_neigh_destroy)(struct neighbour *n);
61982 };
61983 +typedef struct net_device_ops __no_const net_device_ops_no_const;
61984
61985 /*
61986 * The DEVICE structure.
61987 @@ -1064,7 +1065,7 @@ struct net_device {
61988 int iflink;
61989
61990 struct net_device_stats stats;
61991 - atomic_long_t rx_dropped; /* dropped packets by core network
61992 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
61993 * Do not use this in drivers.
61994 */
61995
61996 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
61997 new file mode 100644
61998 index 0000000..33f4af8
61999 --- /dev/null
62000 +++ b/include/linux/netfilter/xt_gradm.h
62001 @@ -0,0 +1,9 @@
62002 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
62003 +#define _LINUX_NETFILTER_XT_GRADM_H 1
62004 +
62005 +struct xt_gradm_mtinfo {
62006 + __u16 flags;
62007 + __u16 invflags;
62008 +};
62009 +
62010 +#endif
62011 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62012 index c65a18a..0c05f3a 100644
62013 --- a/include/linux/of_pdt.h
62014 +++ b/include/linux/of_pdt.h
62015 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62016
62017 /* return 0 on success; fill in 'len' with number of bytes in path */
62018 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62019 -};
62020 +} __no_const;
62021
62022 extern void *prom_early_alloc(unsigned long size);
62023
62024 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62025 index a4c5624..79d6d88 100644
62026 --- a/include/linux/oprofile.h
62027 +++ b/include/linux/oprofile.h
62028 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62029 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62030 char const * name, ulong * val);
62031
62032 -/** Create a file for read-only access to an atomic_t. */
62033 +/** Create a file for read-only access to an atomic_unchecked_t. */
62034 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62035 - char const * name, atomic_t * val);
62036 + char const * name, atomic_unchecked_t * val);
62037
62038 /** create a directory */
62039 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62040 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62041 index ddbb6a9..be1680e 100644
62042 --- a/include/linux/perf_event.h
62043 +++ b/include/linux/perf_event.h
62044 @@ -879,8 +879,8 @@ struct perf_event {
62045
62046 enum perf_event_active_state state;
62047 unsigned int attach_state;
62048 - local64_t count;
62049 - atomic64_t child_count;
62050 + local64_t count; /* PaX: fix it one day */
62051 + atomic64_unchecked_t child_count;
62052
62053 /*
62054 * These are the total time in nanoseconds that the event
62055 @@ -931,8 +931,8 @@ struct perf_event {
62056 * These accumulate total time (in nanoseconds) that children
62057 * events have been enabled and running, respectively.
62058 */
62059 - atomic64_t child_total_time_enabled;
62060 - atomic64_t child_total_time_running;
62061 + atomic64_unchecked_t child_total_time_enabled;
62062 + atomic64_unchecked_t child_total_time_running;
62063
62064 /*
62065 * Protect attach/detach and child_list:
62066 diff --git a/include/linux/personality.h b/include/linux/personality.h
62067 index 8fc7dd1a..c19d89e 100644
62068 --- a/include/linux/personality.h
62069 +++ b/include/linux/personality.h
62070 @@ -44,6 +44,7 @@ enum {
62071 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62072 ADDR_NO_RANDOMIZE | \
62073 ADDR_COMPAT_LAYOUT | \
62074 + ADDR_LIMIT_3GB | \
62075 MMAP_PAGE_ZERO)
62076
62077 /*
62078 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62079 index e1ac1ce..0675fed 100644
62080 --- a/include/linux/pipe_fs_i.h
62081 +++ b/include/linux/pipe_fs_i.h
62082 @@ -45,9 +45,9 @@ struct pipe_buffer {
62083 struct pipe_inode_info {
62084 wait_queue_head_t wait;
62085 unsigned int nrbufs, curbuf, buffers;
62086 - unsigned int readers;
62087 - unsigned int writers;
62088 - unsigned int waiting_writers;
62089 + atomic_t readers;
62090 + atomic_t writers;
62091 + atomic_t waiting_writers;
62092 unsigned int r_counter;
62093 unsigned int w_counter;
62094 struct page *tmp_page;
62095 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62096 index 609daae..5392427 100644
62097 --- a/include/linux/pm_runtime.h
62098 +++ b/include/linux/pm_runtime.h
62099 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62100
62101 static inline void pm_runtime_mark_last_busy(struct device *dev)
62102 {
62103 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62104 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62105 }
62106
62107 #else /* !CONFIG_PM_RUNTIME */
62108 diff --git a/include/linux/poison.h b/include/linux/poison.h
62109 index 2110a81..13a11bb 100644
62110 --- a/include/linux/poison.h
62111 +++ b/include/linux/poison.h
62112 @@ -19,8 +19,8 @@
62113 * under normal circumstances, used to verify that nobody uses
62114 * non-initialized list entries.
62115 */
62116 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62117 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62118 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62119 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62120
62121 /********** include/linux/timer.h **********/
62122 /*
62123 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62124 index 5a710b9..0b0dab9 100644
62125 --- a/include/linux/preempt.h
62126 +++ b/include/linux/preempt.h
62127 @@ -126,7 +126,7 @@ struct preempt_ops {
62128 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62129 void (*sched_out)(struct preempt_notifier *notifier,
62130 struct task_struct *next);
62131 -};
62132 +} __no_const;
62133
62134 /**
62135 * preempt_notifier - key for installing preemption notifiers
62136 diff --git a/include/linux/printk.h b/include/linux/printk.h
62137 index 0525927..a5388b6 100644
62138 --- a/include/linux/printk.h
62139 +++ b/include/linux/printk.h
62140 @@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
62141 extern int printk_needs_cpu(int cpu);
62142 extern void printk_tick(void);
62143
62144 +extern int kptr_restrict;
62145 +
62146 #ifdef CONFIG_PRINTK
62147 asmlinkage __printf(1, 0)
62148 int vprintk(const char *fmt, va_list args);
62149 @@ -117,7 +119,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
62150
62151 extern int printk_delay_msec;
62152 extern int dmesg_restrict;
62153 -extern int kptr_restrict;
62154
62155 void log_buf_kexec_setup(void);
62156 void __init setup_log_buf(int early);
62157 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62158 index 85c5073..51fac8b 100644
62159 --- a/include/linux/proc_fs.h
62160 +++ b/include/linux/proc_fs.h
62161 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62162 return proc_create_data(name, mode, parent, proc_fops, NULL);
62163 }
62164
62165 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62166 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62167 +{
62168 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62169 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62170 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62171 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62172 +#else
62173 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62174 +#endif
62175 +}
62176 +
62177 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62178 umode_t mode, struct proc_dir_entry *base,
62179 read_proc_t *read_proc, void * data)
62180 @@ -258,7 +270,7 @@ union proc_op {
62181 int (*proc_show)(struct seq_file *m,
62182 struct pid_namespace *ns, struct pid *pid,
62183 struct task_struct *task);
62184 -};
62185 +} __no_const;
62186
62187 struct ctl_table_header;
62188 struct ctl_table;
62189 diff --git a/include/linux/random.h b/include/linux/random.h
62190 index 8f74538..02a1012 100644
62191 --- a/include/linux/random.h
62192 +++ b/include/linux/random.h
62193 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
62194
62195 u32 prandom32(struct rnd_state *);
62196
62197 +static inline unsigned long pax_get_random_long(void)
62198 +{
62199 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62200 +}
62201 +
62202 /*
62203 * Handle minimum values for seeds
62204 */
62205 static inline u32 __seed(u32 x, u32 m)
62206 {
62207 - return (x < m) ? x + m : x;
62208 + return (x <= m) ? x + m + 1 : x;
62209 }
62210
62211 /**
62212 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62213 index e0879a7..a12f962 100644
62214 --- a/include/linux/reboot.h
62215 +++ b/include/linux/reboot.h
62216 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62217 * Architecture-specific implementations of sys_reboot commands.
62218 */
62219
62220 -extern void machine_restart(char *cmd);
62221 -extern void machine_halt(void);
62222 -extern void machine_power_off(void);
62223 +extern void machine_restart(char *cmd) __noreturn;
62224 +extern void machine_halt(void) __noreturn;
62225 +extern void machine_power_off(void) __noreturn;
62226
62227 extern void machine_shutdown(void);
62228 struct pt_regs;
62229 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62230 */
62231
62232 extern void kernel_restart_prepare(char *cmd);
62233 -extern void kernel_restart(char *cmd);
62234 -extern void kernel_halt(void);
62235 -extern void kernel_power_off(void);
62236 +extern void kernel_restart(char *cmd) __noreturn;
62237 +extern void kernel_halt(void) __noreturn;
62238 +extern void kernel_power_off(void) __noreturn;
62239
62240 extern int C_A_D; /* for sysctl */
62241 void ctrl_alt_del(void);
62242 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62243 * Emergency restart, callable from an interrupt handler.
62244 */
62245
62246 -extern void emergency_restart(void);
62247 +extern void emergency_restart(void) __noreturn;
62248 #include <asm/emergency-restart.h>
62249
62250 #endif
62251 diff --git a/include/linux/relay.h b/include/linux/relay.h
62252 index 91cacc3..b55ff74 100644
62253 --- a/include/linux/relay.h
62254 +++ b/include/linux/relay.h
62255 @@ -160,7 +160,7 @@ struct rchan_callbacks
62256 * The callback should return 0 if successful, negative if not.
62257 */
62258 int (*remove_buf_file)(struct dentry *dentry);
62259 -};
62260 +} __no_const;
62261
62262 /*
62263 * CONFIG_RELAY kernel API, kernel/relay.c
62264 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62265 index 6fdf027..ff72610 100644
62266 --- a/include/linux/rfkill.h
62267 +++ b/include/linux/rfkill.h
62268 @@ -147,6 +147,7 @@ struct rfkill_ops {
62269 void (*query)(struct rfkill *rfkill, void *data);
62270 int (*set_block)(void *data, bool blocked);
62271 };
62272 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62273
62274 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62275 /**
62276 diff --git a/include/linux/rio.h b/include/linux/rio.h
62277 index 4d50611..c6858a2 100644
62278 --- a/include/linux/rio.h
62279 +++ b/include/linux/rio.h
62280 @@ -315,7 +315,7 @@ struct rio_ops {
62281 int mbox, void *buffer, size_t len);
62282 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62283 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62284 -};
62285 +} __no_const;
62286
62287 #define RIO_RESOURCE_MEM 0x00000100
62288 #define RIO_RESOURCE_DOORBELL 0x00000200
62289 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62290 index fd07c45..4676b8e 100644
62291 --- a/include/linux/rmap.h
62292 +++ b/include/linux/rmap.h
62293 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62294 void anon_vma_init(void); /* create anon_vma_cachep */
62295 int anon_vma_prepare(struct vm_area_struct *);
62296 void unlink_anon_vmas(struct vm_area_struct *);
62297 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62298 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62299 void anon_vma_moveto_tail(struct vm_area_struct *);
62300 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62301 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62302
62303 static inline void anon_vma_merge(struct vm_area_struct *vma,
62304 struct vm_area_struct *next)
62305 diff --git a/include/linux/sched.h b/include/linux/sched.h
62306 index 81a173c..85ccd8f 100644
62307 --- a/include/linux/sched.h
62308 +++ b/include/linux/sched.h
62309 @@ -100,6 +100,7 @@ struct bio_list;
62310 struct fs_struct;
62311 struct perf_event_context;
62312 struct blk_plug;
62313 +struct linux_binprm;
62314
62315 /*
62316 * List of flags we want to share for kernel threads,
62317 @@ -382,10 +383,13 @@ struct user_namespace;
62318 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62319
62320 extern int sysctl_max_map_count;
62321 +extern unsigned long sysctl_heap_stack_gap;
62322
62323 #include <linux/aio.h>
62324
62325 #ifdef CONFIG_MMU
62326 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62327 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62328 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62329 extern unsigned long
62330 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62331 @@ -643,6 +647,17 @@ struct signal_struct {
62332 #ifdef CONFIG_TASKSTATS
62333 struct taskstats *stats;
62334 #endif
62335 +
62336 +#ifdef CONFIG_GRKERNSEC
62337 + u32 curr_ip;
62338 + u32 saved_ip;
62339 + u32 gr_saddr;
62340 + u32 gr_daddr;
62341 + u16 gr_sport;
62342 + u16 gr_dport;
62343 + u8 used_accept:1;
62344 +#endif
62345 +
62346 #ifdef CONFIG_AUDIT
62347 unsigned audit_tty;
62348 struct tty_audit_buf *tty_audit_buf;
62349 @@ -726,6 +741,11 @@ struct user_struct {
62350 struct key *session_keyring; /* UID's default session keyring */
62351 #endif
62352
62353 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62354 + unsigned int banned;
62355 + unsigned long ban_expires;
62356 +#endif
62357 +
62358 /* Hash table maintenance information */
62359 struct hlist_node uidhash_node;
62360 uid_t uid;
62361 @@ -1386,8 +1406,8 @@ struct task_struct {
62362 struct list_head thread_group;
62363
62364 struct completion *vfork_done; /* for vfork() */
62365 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62366 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62367 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62368 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62369
62370 cputime_t utime, stime, utimescaled, stimescaled;
62371 cputime_t gtime;
62372 @@ -1403,13 +1423,6 @@ struct task_struct {
62373 struct task_cputime cputime_expires;
62374 struct list_head cpu_timers[3];
62375
62376 -/* process credentials */
62377 - const struct cred __rcu *real_cred; /* objective and real subjective task
62378 - * credentials (COW) */
62379 - const struct cred __rcu *cred; /* effective (overridable) subjective task
62380 - * credentials (COW) */
62381 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62382 -
62383 char comm[TASK_COMM_LEN]; /* executable name excluding path
62384 - access with [gs]et_task_comm (which lock
62385 it with task_lock())
62386 @@ -1426,8 +1439,16 @@ struct task_struct {
62387 #endif
62388 /* CPU-specific state of this task */
62389 struct thread_struct thread;
62390 +/* thread_info moved to task_struct */
62391 +#ifdef CONFIG_X86
62392 + struct thread_info tinfo;
62393 +#endif
62394 /* filesystem information */
62395 struct fs_struct *fs;
62396 +
62397 + const struct cred __rcu *cred; /* effective (overridable) subjective task
62398 + * credentials (COW) */
62399 +
62400 /* open file information */
62401 struct files_struct *files;
62402 /* namespaces */
62403 @@ -1469,6 +1490,11 @@ struct task_struct {
62404 struct rt_mutex_waiter *pi_blocked_on;
62405 #endif
62406
62407 +/* process credentials */
62408 + const struct cred __rcu *real_cred; /* objective and real subjective task
62409 + * credentials (COW) */
62410 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62411 +
62412 #ifdef CONFIG_DEBUG_MUTEXES
62413 /* mutex deadlock detection */
62414 struct mutex_waiter *blocked_on;
62415 @@ -1585,6 +1611,27 @@ struct task_struct {
62416 unsigned long default_timer_slack_ns;
62417
62418 struct list_head *scm_work_list;
62419 +
62420 +#ifdef CONFIG_GRKERNSEC
62421 + /* grsecurity */
62422 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62423 + u64 exec_id;
62424 +#endif
62425 +#ifdef CONFIG_GRKERNSEC_SETXID
62426 + const struct cred *delayed_cred;
62427 +#endif
62428 + struct dentry *gr_chroot_dentry;
62429 + struct acl_subject_label *acl;
62430 + struct acl_role_label *role;
62431 + struct file *exec_file;
62432 + u16 acl_role_id;
62433 + /* is this the task that authenticated to the special role */
62434 + u8 acl_sp_role;
62435 + u8 is_writable;
62436 + u8 brute;
62437 + u8 gr_is_chrooted;
62438 +#endif
62439 +
62440 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62441 /* Index of current stored address in ret_stack */
62442 int curr_ret_stack;
62443 @@ -1619,6 +1666,51 @@ struct task_struct {
62444 #endif
62445 };
62446
62447 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62448 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62449 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62450 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62451 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62452 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62453 +
62454 +#ifdef CONFIG_PAX_SOFTMODE
62455 +extern int pax_softmode;
62456 +#endif
62457 +
62458 +extern int pax_check_flags(unsigned long *);
62459 +
62460 +/* if tsk != current then task_lock must be held on it */
62461 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62462 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62463 +{
62464 + if (likely(tsk->mm))
62465 + return tsk->mm->pax_flags;
62466 + else
62467 + return 0UL;
62468 +}
62469 +
62470 +/* if tsk != current then task_lock must be held on it */
62471 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62472 +{
62473 + if (likely(tsk->mm)) {
62474 + tsk->mm->pax_flags = flags;
62475 + return 0;
62476 + }
62477 + return -EINVAL;
62478 +}
62479 +#endif
62480 +
62481 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62482 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62483 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62484 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62485 +#endif
62486 +
62487 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62488 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62489 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62490 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62491 +
62492 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62493 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62494
62495 @@ -2138,7 +2230,9 @@ void yield(void);
62496 extern struct exec_domain default_exec_domain;
62497
62498 union thread_union {
62499 +#ifndef CONFIG_X86
62500 struct thread_info thread_info;
62501 +#endif
62502 unsigned long stack[THREAD_SIZE/sizeof(long)];
62503 };
62504
62505 @@ -2171,6 +2265,7 @@ extern struct pid_namespace init_pid_ns;
62506 */
62507
62508 extern struct task_struct *find_task_by_vpid(pid_t nr);
62509 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62510 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62511 struct pid_namespace *ns);
62512
62513 @@ -2314,7 +2409,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62514 extern void exit_itimers(struct signal_struct *);
62515 extern void flush_itimer_signals(void);
62516
62517 -extern void do_group_exit(int);
62518 +extern __noreturn void do_group_exit(int);
62519
62520 extern void daemonize(const char *, ...);
62521 extern int allow_signal(int);
62522 @@ -2515,13 +2610,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62523
62524 #endif
62525
62526 -static inline int object_is_on_stack(void *obj)
62527 +static inline int object_starts_on_stack(void *obj)
62528 {
62529 - void *stack = task_stack_page(current);
62530 + const void *stack = task_stack_page(current);
62531
62532 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62533 }
62534
62535 +#ifdef CONFIG_PAX_USERCOPY
62536 +extern int object_is_on_stack(const void *obj, unsigned long len);
62537 +#endif
62538 +
62539 extern void thread_info_cache_init(void);
62540
62541 #ifdef CONFIG_DEBUG_STACK_USAGE
62542 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62543 index 899fbb4..1cb4138 100644
62544 --- a/include/linux/screen_info.h
62545 +++ b/include/linux/screen_info.h
62546 @@ -43,7 +43,8 @@ struct screen_info {
62547 __u16 pages; /* 0x32 */
62548 __u16 vesa_attributes; /* 0x34 */
62549 __u32 capabilities; /* 0x36 */
62550 - __u8 _reserved[6]; /* 0x3a */
62551 + __u16 vesapm_size; /* 0x3a */
62552 + __u8 _reserved[4]; /* 0x3c */
62553 } __attribute__((packed));
62554
62555 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62556 diff --git a/include/linux/security.h b/include/linux/security.h
62557 index 673afbb..2b7454b 100644
62558 --- a/include/linux/security.h
62559 +++ b/include/linux/security.h
62560 @@ -26,6 +26,7 @@
62561 #include <linux/capability.h>
62562 #include <linux/slab.h>
62563 #include <linux/err.h>
62564 +#include <linux/grsecurity.h>
62565
62566 struct linux_binprm;
62567 struct cred;
62568 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62569 index fc61854..d7c490b 100644
62570 --- a/include/linux/seq_file.h
62571 +++ b/include/linux/seq_file.h
62572 @@ -25,6 +25,9 @@ struct seq_file {
62573 struct mutex lock;
62574 const struct seq_operations *op;
62575 int poll_event;
62576 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62577 + u64 exec_id;
62578 +#endif
62579 void *private;
62580 };
62581
62582 @@ -34,6 +37,7 @@ struct seq_operations {
62583 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62584 int (*show) (struct seq_file *m, void *v);
62585 };
62586 +typedef struct seq_operations __no_const seq_operations_no_const;
62587
62588 #define SEQ_SKIP 1
62589
62590 diff --git a/include/linux/shm.h b/include/linux/shm.h
62591 index 92808b8..c28cac4 100644
62592 --- a/include/linux/shm.h
62593 +++ b/include/linux/shm.h
62594 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62595
62596 /* The task created the shm object. NULL if the task is dead. */
62597 struct task_struct *shm_creator;
62598 +#ifdef CONFIG_GRKERNSEC
62599 + time_t shm_createtime;
62600 + pid_t shm_lapid;
62601 +#endif
62602 };
62603
62604 /* shm_mode upper byte flags */
62605 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62606 index 111f26b..1b1a2e5 100644
62607 --- a/include/linux/skbuff.h
62608 +++ b/include/linux/skbuff.h
62609 @@ -666,7 +666,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62610 */
62611 static inline int skb_queue_empty(const struct sk_buff_head *list)
62612 {
62613 - return list->next == (struct sk_buff *)list;
62614 + return list->next == (const struct sk_buff *)list;
62615 }
62616
62617 /**
62618 @@ -679,7 +679,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62619 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62620 const struct sk_buff *skb)
62621 {
62622 - return skb->next == (struct sk_buff *)list;
62623 + return skb->next == (const struct sk_buff *)list;
62624 }
62625
62626 /**
62627 @@ -692,7 +692,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62628 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62629 const struct sk_buff *skb)
62630 {
62631 - return skb->prev == (struct sk_buff *)list;
62632 + return skb->prev == (const struct sk_buff *)list;
62633 }
62634
62635 /**
62636 @@ -1587,7 +1587,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62637 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62638 */
62639 #ifndef NET_SKB_PAD
62640 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62641 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62642 #endif
62643
62644 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62645 diff --git a/include/linux/slab.h b/include/linux/slab.h
62646 index a595dce..c403597 100644
62647 --- a/include/linux/slab.h
62648 +++ b/include/linux/slab.h
62649 @@ -11,12 +11,20 @@
62650
62651 #include <linux/gfp.h>
62652 #include <linux/types.h>
62653 +#include <linux/err.h>
62654
62655 /*
62656 * Flags to pass to kmem_cache_create().
62657 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62658 */
62659 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62660 +
62661 +#ifdef CONFIG_PAX_USERCOPY
62662 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62663 +#else
62664 +#define SLAB_USERCOPY 0x00000000UL
62665 +#endif
62666 +
62667 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62668 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62669 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62670 @@ -87,10 +95,13 @@
62671 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62672 * Both make kfree a no-op.
62673 */
62674 -#define ZERO_SIZE_PTR ((void *)16)
62675 +#define ZERO_SIZE_PTR \
62676 +({ \
62677 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62678 + (void *)(-MAX_ERRNO-1L); \
62679 +})
62680
62681 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62682 - (unsigned long)ZERO_SIZE_PTR)
62683 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62684
62685 /*
62686 * struct kmem_cache related prototypes
62687 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62688 void kfree(const void *);
62689 void kzfree(const void *);
62690 size_t ksize(const void *);
62691 +void check_object_size(const void *ptr, unsigned long n, bool to);
62692
62693 /*
62694 * Allocator specific definitions. These are mainly used to establish optimized
62695 @@ -240,6 +252,7 @@ size_t ksize(const void *);
62696 * for general use, and so are not documented here. For a full list of
62697 * potential flags, always refer to linux/gfp.h.
62698 */
62699 +static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
62700 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
62701 {
62702 if (size != 0 && n > ULONG_MAX / size)
62703 @@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
62704 */
62705 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62706 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62707 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62708 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
62709 #define kmalloc_track_caller(size, flags) \
62710 __kmalloc_track_caller(size, flags, _RET_IP_)
62711 #else
62712 @@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
62713 */
62714 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
62715 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
62716 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
62717 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
62718 #define kmalloc_node_track_caller(size, flags, node) \
62719 __kmalloc_node_track_caller(size, flags, node, \
62720 _RET_IP_)
62721 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62722 index fbd1117..d4d8ef8 100644
62723 --- a/include/linux/slab_def.h
62724 +++ b/include/linux/slab_def.h
62725 @@ -66,10 +66,10 @@ struct kmem_cache {
62726 unsigned long node_allocs;
62727 unsigned long node_frees;
62728 unsigned long node_overflow;
62729 - atomic_t allochit;
62730 - atomic_t allocmiss;
62731 - atomic_t freehit;
62732 - atomic_t freemiss;
62733 + atomic_unchecked_t allochit;
62734 + atomic_unchecked_t allocmiss;
62735 + atomic_unchecked_t freehit;
62736 + atomic_unchecked_t freemiss;
62737
62738 /*
62739 * If debugging is enabled, then the allocator can add additional
62740 @@ -107,7 +107,7 @@ struct cache_sizes {
62741 extern struct cache_sizes malloc_sizes[];
62742
62743 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62744 -void *__kmalloc(size_t size, gfp_t flags);
62745 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62746
62747 #ifdef CONFIG_TRACING
62748 extern void *kmem_cache_alloc_trace(size_t size,
62749 @@ -160,7 +160,7 @@ found:
62750 }
62751
62752 #ifdef CONFIG_NUMA
62753 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
62754 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62755 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62756
62757 #ifdef CONFIG_TRACING
62758 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
62759 index 0ec00b3..39cb7fc 100644
62760 --- a/include/linux/slob_def.h
62761 +++ b/include/linux/slob_def.h
62762 @@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
62763 return kmem_cache_alloc_node(cachep, flags, -1);
62764 }
62765
62766 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
62767 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62768
62769 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
62770 {
62771 @@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62772 return __kmalloc_node(size, flags, -1);
62773 }
62774
62775 +static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
62776 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
62777 {
62778 return kmalloc(size, flags);
62779 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62780 index c2f8c8b..be9e036 100644
62781 --- a/include/linux/slub_def.h
62782 +++ b/include/linux/slub_def.h
62783 @@ -92,7 +92,7 @@ struct kmem_cache {
62784 struct kmem_cache_order_objects max;
62785 struct kmem_cache_order_objects min;
62786 gfp_t allocflags; /* gfp flags to use on each alloc */
62787 - int refcount; /* Refcount for slab cache destroy */
62788 + atomic_t refcount; /* Refcount for slab cache destroy */
62789 void (*ctor)(void *);
62790 int inuse; /* Offset to metadata */
62791 int align; /* Alignment */
62792 @@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
62793 * Sorry that the following has to be that ugly but some versions of GCC
62794 * have trouble with constant propagation and loops.
62795 */
62796 +static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
62797 static __always_inline int kmalloc_index(size_t size)
62798 {
62799 if (!size)
62800 @@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62801 }
62802
62803 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62804 -void *__kmalloc(size_t size, gfp_t flags);
62805 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
62806
62807 static __always_inline void *
62808 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62809 @@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
62810 }
62811 #endif
62812
62813 +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
62814 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
62815 {
62816 unsigned int order = get_order(size);
62817 @@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
62818 }
62819
62820 #ifdef CONFIG_NUMA
62821 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
62822 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
62823 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62824
62825 #ifdef CONFIG_TRACING
62826 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62827 index de8832d..0147b46 100644
62828 --- a/include/linux/sonet.h
62829 +++ b/include/linux/sonet.h
62830 @@ -61,7 +61,7 @@ struct sonet_stats {
62831 #include <linux/atomic.h>
62832
62833 struct k_sonet_stats {
62834 -#define __HANDLE_ITEM(i) atomic_t i
62835 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
62836 __SONET_ITEMS
62837 #undef __HANDLE_ITEM
62838 };
62839 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62840 index 523547e..2cb7140 100644
62841 --- a/include/linux/sunrpc/clnt.h
62842 +++ b/include/linux/sunrpc/clnt.h
62843 @@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62844 {
62845 switch (sap->sa_family) {
62846 case AF_INET:
62847 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
62848 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62849 case AF_INET6:
62850 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62851 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62852 }
62853 return 0;
62854 }
62855 @@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62856 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
62857 const struct sockaddr *src)
62858 {
62859 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
62860 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
62861 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
62862
62863 dsin->sin_family = ssin->sin_family;
62864 @@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
62865 if (sa->sa_family != AF_INET6)
62866 return 0;
62867
62868 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
62869 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
62870 }
62871
62872 #endif /* __KERNEL__ */
62873 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
62874 index dc0c3cc..8503fb6 100644
62875 --- a/include/linux/sunrpc/sched.h
62876 +++ b/include/linux/sunrpc/sched.h
62877 @@ -106,6 +106,7 @@ struct rpc_call_ops {
62878 void (*rpc_count_stats)(struct rpc_task *, void *);
62879 void (*rpc_release)(void *);
62880 };
62881 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
62882
62883 struct rpc_task_setup {
62884 struct rpc_task *task;
62885 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
62886 index 0b8e3e6..33e0a01 100644
62887 --- a/include/linux/sunrpc/svc_rdma.h
62888 +++ b/include/linux/sunrpc/svc_rdma.h
62889 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
62890 extern unsigned int svcrdma_max_requests;
62891 extern unsigned int svcrdma_max_req_size;
62892
62893 -extern atomic_t rdma_stat_recv;
62894 -extern atomic_t rdma_stat_read;
62895 -extern atomic_t rdma_stat_write;
62896 -extern atomic_t rdma_stat_sq_starve;
62897 -extern atomic_t rdma_stat_rq_starve;
62898 -extern atomic_t rdma_stat_rq_poll;
62899 -extern atomic_t rdma_stat_rq_prod;
62900 -extern atomic_t rdma_stat_sq_poll;
62901 -extern atomic_t rdma_stat_sq_prod;
62902 +extern atomic_unchecked_t rdma_stat_recv;
62903 +extern atomic_unchecked_t rdma_stat_read;
62904 +extern atomic_unchecked_t rdma_stat_write;
62905 +extern atomic_unchecked_t rdma_stat_sq_starve;
62906 +extern atomic_unchecked_t rdma_stat_rq_starve;
62907 +extern atomic_unchecked_t rdma_stat_rq_poll;
62908 +extern atomic_unchecked_t rdma_stat_rq_prod;
62909 +extern atomic_unchecked_t rdma_stat_sq_poll;
62910 +extern atomic_unchecked_t rdma_stat_sq_prod;
62911
62912 #define RPCRDMA_VERSION 1
62913
62914 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
62915 index c34b4c8..a65b67d 100644
62916 --- a/include/linux/sysctl.h
62917 +++ b/include/linux/sysctl.h
62918 @@ -155,7 +155,11 @@ enum
62919 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
62920 };
62921
62922 -
62923 +#ifdef CONFIG_PAX_SOFTMODE
62924 +enum {
62925 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
62926 +};
62927 +#endif
62928
62929 /* CTL_VM names: */
62930 enum
62931 @@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
62932
62933 extern int proc_dostring(struct ctl_table *, int,
62934 void __user *, size_t *, loff_t *);
62935 +extern int proc_dostring_modpriv(struct ctl_table *, int,
62936 + void __user *, size_t *, loff_t *);
62937 extern int proc_dointvec(struct ctl_table *, int,
62938 void __user *, size_t *, loff_t *);
62939 extern int proc_dointvec_minmax(struct ctl_table *, int,
62940 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
62941 index ff7dc08..893e1bd 100644
62942 --- a/include/linux/tty_ldisc.h
62943 +++ b/include/linux/tty_ldisc.h
62944 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
62945
62946 struct module *owner;
62947
62948 - int refcount;
62949 + atomic_t refcount;
62950 };
62951
62952 struct tty_ldisc {
62953 diff --git a/include/linux/types.h b/include/linux/types.h
62954 index 7f480db..175c256 100644
62955 --- a/include/linux/types.h
62956 +++ b/include/linux/types.h
62957 @@ -220,10 +220,26 @@ typedef struct {
62958 int counter;
62959 } atomic_t;
62960
62961 +#ifdef CONFIG_PAX_REFCOUNT
62962 +typedef struct {
62963 + int counter;
62964 +} atomic_unchecked_t;
62965 +#else
62966 +typedef atomic_t atomic_unchecked_t;
62967 +#endif
62968 +
62969 #ifdef CONFIG_64BIT
62970 typedef struct {
62971 long counter;
62972 } atomic64_t;
62973 +
62974 +#ifdef CONFIG_PAX_REFCOUNT
62975 +typedef struct {
62976 + long counter;
62977 +} atomic64_unchecked_t;
62978 +#else
62979 +typedef atomic64_t atomic64_unchecked_t;
62980 +#endif
62981 #endif
62982
62983 struct list_head {
62984 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
62985 index 5ca0951..ab496a5 100644
62986 --- a/include/linux/uaccess.h
62987 +++ b/include/linux/uaccess.h
62988 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
62989 long ret; \
62990 mm_segment_t old_fs = get_fs(); \
62991 \
62992 - set_fs(KERNEL_DS); \
62993 pagefault_disable(); \
62994 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
62995 - pagefault_enable(); \
62996 + set_fs(KERNEL_DS); \
62997 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
62998 set_fs(old_fs); \
62999 + pagefault_enable(); \
63000 ret; \
63001 })
63002
63003 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63004 index 99c1b4d..bb94261 100644
63005 --- a/include/linux/unaligned/access_ok.h
63006 +++ b/include/linux/unaligned/access_ok.h
63007 @@ -6,32 +6,32 @@
63008
63009 static inline u16 get_unaligned_le16(const void *p)
63010 {
63011 - return le16_to_cpup((__le16 *)p);
63012 + return le16_to_cpup((const __le16 *)p);
63013 }
63014
63015 static inline u32 get_unaligned_le32(const void *p)
63016 {
63017 - return le32_to_cpup((__le32 *)p);
63018 + return le32_to_cpup((const __le32 *)p);
63019 }
63020
63021 static inline u64 get_unaligned_le64(const void *p)
63022 {
63023 - return le64_to_cpup((__le64 *)p);
63024 + return le64_to_cpup((const __le64 *)p);
63025 }
63026
63027 static inline u16 get_unaligned_be16(const void *p)
63028 {
63029 - return be16_to_cpup((__be16 *)p);
63030 + return be16_to_cpup((const __be16 *)p);
63031 }
63032
63033 static inline u32 get_unaligned_be32(const void *p)
63034 {
63035 - return be32_to_cpup((__be32 *)p);
63036 + return be32_to_cpup((const __be32 *)p);
63037 }
63038
63039 static inline u64 get_unaligned_be64(const void *p)
63040 {
63041 - return be64_to_cpup((__be64 *)p);
63042 + return be64_to_cpup((const __be64 *)p);
63043 }
63044
63045 static inline void put_unaligned_le16(u16 val, void *p)
63046 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63047 index 547e59c..db6ad19 100644
63048 --- a/include/linux/usb/renesas_usbhs.h
63049 +++ b/include/linux/usb/renesas_usbhs.h
63050 @@ -39,7 +39,7 @@ enum {
63051 */
63052 struct renesas_usbhs_driver_callback {
63053 int (*notify_hotplug)(struct platform_device *pdev);
63054 -};
63055 +} __no_const;
63056
63057 /*
63058 * callback functions for platform
63059 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63060 * VBUS control is needed for Host
63061 */
63062 int (*set_vbus)(struct platform_device *pdev, int enable);
63063 -};
63064 +} __no_const;
63065
63066 /*
63067 * parameters for renesas usbhs
63068 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63069 index 6f8fbcf..8259001 100644
63070 --- a/include/linux/vermagic.h
63071 +++ b/include/linux/vermagic.h
63072 @@ -25,9 +25,35 @@
63073 #define MODULE_ARCH_VERMAGIC ""
63074 #endif
63075
63076 +#ifdef CONFIG_PAX_REFCOUNT
63077 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63078 +#else
63079 +#define MODULE_PAX_REFCOUNT ""
63080 +#endif
63081 +
63082 +#ifdef CONSTIFY_PLUGIN
63083 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63084 +#else
63085 +#define MODULE_CONSTIFY_PLUGIN ""
63086 +#endif
63087 +
63088 +#ifdef STACKLEAK_PLUGIN
63089 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63090 +#else
63091 +#define MODULE_STACKLEAK_PLUGIN ""
63092 +#endif
63093 +
63094 +#ifdef CONFIG_GRKERNSEC
63095 +#define MODULE_GRSEC "GRSEC "
63096 +#else
63097 +#define MODULE_GRSEC ""
63098 +#endif
63099 +
63100 #define VERMAGIC_STRING \
63101 UTS_RELEASE " " \
63102 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63103 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63104 - MODULE_ARCH_VERMAGIC
63105 + MODULE_ARCH_VERMAGIC \
63106 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63107 + MODULE_GRSEC
63108
63109 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63110 index dcdfc2b..ec79ab5 100644
63111 --- a/include/linux/vmalloc.h
63112 +++ b/include/linux/vmalloc.h
63113 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63114 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63115 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63116 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63117 +
63118 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63119 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63120 +#endif
63121 +
63122 /* bits [20..32] reserved for arch specific ioremap internals */
63123
63124 /*
63125 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63126 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63127 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63128 unsigned long start, unsigned long end, gfp_t gfp_mask,
63129 - pgprot_t prot, int node, void *caller);
63130 + pgprot_t prot, int node, void *caller) __size_overflow(1);
63131 extern void vfree(const void *addr);
63132
63133 extern void *vmap(struct page **pages, unsigned int count,
63134 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63135 extern void free_vm_area(struct vm_struct *area);
63136
63137 /* for /dev/kmem */
63138 -extern long vread(char *buf, char *addr, unsigned long count);
63139 -extern long vwrite(char *buf, char *addr, unsigned long count);
63140 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63141 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63142
63143 /*
63144 * Internals. Dont't use..
63145 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63146 index 65efb92..137adbb 100644
63147 --- a/include/linux/vmstat.h
63148 +++ b/include/linux/vmstat.h
63149 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63150 /*
63151 * Zone based page accounting with per cpu differentials.
63152 */
63153 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63154 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63155
63156 static inline void zone_page_state_add(long x, struct zone *zone,
63157 enum zone_stat_item item)
63158 {
63159 - atomic_long_add(x, &zone->vm_stat[item]);
63160 - atomic_long_add(x, &vm_stat[item]);
63161 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63162 + atomic_long_add_unchecked(x, &vm_stat[item]);
63163 }
63164
63165 static inline unsigned long global_page_state(enum zone_stat_item item)
63166 {
63167 - long x = atomic_long_read(&vm_stat[item]);
63168 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63169 #ifdef CONFIG_SMP
63170 if (x < 0)
63171 x = 0;
63172 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63173 static inline unsigned long zone_page_state(struct zone *zone,
63174 enum zone_stat_item item)
63175 {
63176 - long x = atomic_long_read(&zone->vm_stat[item]);
63177 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63178 #ifdef CONFIG_SMP
63179 if (x < 0)
63180 x = 0;
63181 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63182 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63183 enum zone_stat_item item)
63184 {
63185 - long x = atomic_long_read(&zone->vm_stat[item]);
63186 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63187
63188 #ifdef CONFIG_SMP
63189 int cpu;
63190 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63191
63192 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63193 {
63194 - atomic_long_inc(&zone->vm_stat[item]);
63195 - atomic_long_inc(&vm_stat[item]);
63196 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63197 + atomic_long_inc_unchecked(&vm_stat[item]);
63198 }
63199
63200 static inline void __inc_zone_page_state(struct page *page,
63201 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63202
63203 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63204 {
63205 - atomic_long_dec(&zone->vm_stat[item]);
63206 - atomic_long_dec(&vm_stat[item]);
63207 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63208 + atomic_long_dec_unchecked(&vm_stat[item]);
63209 }
63210
63211 static inline void __dec_zone_page_state(struct page *page,
63212 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63213 index e5d1220..ef6e406 100644
63214 --- a/include/linux/xattr.h
63215 +++ b/include/linux/xattr.h
63216 @@ -57,6 +57,11 @@
63217 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63218 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63219
63220 +/* User namespace */
63221 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63222 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
63223 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63224 +
63225 #ifdef __KERNEL__
63226
63227 #include <linux/types.h>
63228 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63229 index 4aeff96..b378cdc 100644
63230 --- a/include/media/saa7146_vv.h
63231 +++ b/include/media/saa7146_vv.h
63232 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63233 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63234
63235 /* the extension can override this */
63236 - struct v4l2_ioctl_ops ops;
63237 + v4l2_ioctl_ops_no_const ops;
63238 /* pointer to the saa7146 core ops */
63239 const struct v4l2_ioctl_ops *core_ops;
63240
63241 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63242 index 96d2221..2292f89 100644
63243 --- a/include/media/v4l2-dev.h
63244 +++ b/include/media/v4l2-dev.h
63245 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63246
63247
63248 struct v4l2_file_operations {
63249 - struct module *owner;
63250 + struct module * const owner;
63251 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63252 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63253 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63254 @@ -71,6 +71,7 @@ struct v4l2_file_operations {
63255 int (*open) (struct file *);
63256 int (*release) (struct file *);
63257 };
63258 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63259
63260 /*
63261 * Newer version of video_device, handled by videodev2.c
63262 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63263 index 3cb939c..f23c6bb 100644
63264 --- a/include/media/v4l2-ioctl.h
63265 +++ b/include/media/v4l2-ioctl.h
63266 @@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
63267 long (*vidioc_default) (struct file *file, void *fh,
63268 bool valid_prio, int cmd, void *arg);
63269 };
63270 -
63271 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63272
63273 /* v4l debugging and diagnostics */
63274
63275 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63276 index 6db8ecf..8c23861 100644
63277 --- a/include/net/caif/caif_hsi.h
63278 +++ b/include/net/caif/caif_hsi.h
63279 @@ -98,7 +98,7 @@ struct cfhsi_drv {
63280 void (*rx_done_cb) (struct cfhsi_drv *drv);
63281 void (*wake_up_cb) (struct cfhsi_drv *drv);
63282 void (*wake_down_cb) (struct cfhsi_drv *drv);
63283 -};
63284 +} __no_const;
63285
63286 /* Structure implemented by HSI device. */
63287 struct cfhsi_dev {
63288 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63289 index 9e5425b..8136ffc 100644
63290 --- a/include/net/caif/cfctrl.h
63291 +++ b/include/net/caif/cfctrl.h
63292 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
63293 void (*radioset_rsp)(void);
63294 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63295 struct cflayer *client_layer);
63296 -};
63297 +} __no_const;
63298
63299 /* Link Setup Parameters for CAIF-Links. */
63300 struct cfctrl_link_param {
63301 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
63302 struct cfctrl {
63303 struct cfsrvl serv;
63304 struct cfctrl_rsp res;
63305 - atomic_t req_seq_no;
63306 - atomic_t rsp_seq_no;
63307 + atomic_unchecked_t req_seq_no;
63308 + atomic_unchecked_t rsp_seq_no;
63309 struct list_head list;
63310 /* Protects from simultaneous access to first_req list */
63311 spinlock_t info_list_lock;
63312 diff --git a/include/net/flow.h b/include/net/flow.h
63313 index 6c469db..7743b8e 100644
63314 --- a/include/net/flow.h
63315 +++ b/include/net/flow.h
63316 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63317
63318 extern void flow_cache_flush(void);
63319 extern void flow_cache_flush_deferred(void);
63320 -extern atomic_t flow_cache_genid;
63321 +extern atomic_unchecked_t flow_cache_genid;
63322
63323 #endif
63324 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63325 index b94765e..053f68b 100644
63326 --- a/include/net/inetpeer.h
63327 +++ b/include/net/inetpeer.h
63328 @@ -48,8 +48,8 @@ struct inet_peer {
63329 */
63330 union {
63331 struct {
63332 - atomic_t rid; /* Frag reception counter */
63333 - atomic_t ip_id_count; /* IP ID for the next packet */
63334 + atomic_unchecked_t rid; /* Frag reception counter */
63335 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63336 __u32 tcp_ts;
63337 __u32 tcp_ts_stamp;
63338 };
63339 @@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63340 more++;
63341 inet_peer_refcheck(p);
63342 do {
63343 - old = atomic_read(&p->ip_id_count);
63344 + old = atomic_read_unchecked(&p->ip_id_count);
63345 new = old + more;
63346 if (!new)
63347 new = 1;
63348 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63349 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63350 return new;
63351 }
63352
63353 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63354 index 10422ef..662570f 100644
63355 --- a/include/net/ip_fib.h
63356 +++ b/include/net/ip_fib.h
63357 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63358
63359 #define FIB_RES_SADDR(net, res) \
63360 ((FIB_RES_NH(res).nh_saddr_genid == \
63361 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63362 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63363 FIB_RES_NH(res).nh_saddr : \
63364 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63365 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63366 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63367 index 72522f0..6f03a2b 100644
63368 --- a/include/net/ip_vs.h
63369 +++ b/include/net/ip_vs.h
63370 @@ -510,7 +510,7 @@ struct ip_vs_conn {
63371 struct ip_vs_conn *control; /* Master control connection */
63372 atomic_t n_control; /* Number of controlled ones */
63373 struct ip_vs_dest *dest; /* real server */
63374 - atomic_t in_pkts; /* incoming packet counter */
63375 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63376
63377 /* packet transmitter for different forwarding methods. If it
63378 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63379 @@ -648,7 +648,7 @@ struct ip_vs_dest {
63380 __be16 port; /* port number of the server */
63381 union nf_inet_addr addr; /* IP address of the server */
63382 volatile unsigned flags; /* dest status flags */
63383 - atomic_t conn_flags; /* flags to copy to conn */
63384 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63385 atomic_t weight; /* server weight */
63386
63387 atomic_t refcnt; /* reference counter */
63388 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63389 index 69b610a..fe3962c 100644
63390 --- a/include/net/irda/ircomm_core.h
63391 +++ b/include/net/irda/ircomm_core.h
63392 @@ -51,7 +51,7 @@ typedef struct {
63393 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63394 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63395 struct ircomm_info *);
63396 -} call_t;
63397 +} __no_const call_t;
63398
63399 struct ircomm_cb {
63400 irda_queue_t queue;
63401 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63402 index 59ba38bc..d515662 100644
63403 --- a/include/net/irda/ircomm_tty.h
63404 +++ b/include/net/irda/ircomm_tty.h
63405 @@ -35,6 +35,7 @@
63406 #include <linux/termios.h>
63407 #include <linux/timer.h>
63408 #include <linux/tty.h> /* struct tty_struct */
63409 +#include <asm/local.h>
63410
63411 #include <net/irda/irias_object.h>
63412 #include <net/irda/ircomm_core.h>
63413 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63414 unsigned short close_delay;
63415 unsigned short closing_wait; /* time to wait before closing */
63416
63417 - int open_count;
63418 - int blocked_open; /* # of blocked opens */
63419 + local_t open_count;
63420 + local_t blocked_open; /* # of blocked opens */
63421
63422 /* Protect concurent access to :
63423 * o self->open_count
63424 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63425 index cc7c197..9f2da2a 100644
63426 --- a/include/net/iucv/af_iucv.h
63427 +++ b/include/net/iucv/af_iucv.h
63428 @@ -141,7 +141,7 @@ struct iucv_sock {
63429 struct iucv_sock_list {
63430 struct hlist_head head;
63431 rwlock_t lock;
63432 - atomic_t autobind_name;
63433 + atomic_unchecked_t autobind_name;
63434 };
63435
63436 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63437 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63438 index 34c996f..bb3b4d4 100644
63439 --- a/include/net/neighbour.h
63440 +++ b/include/net/neighbour.h
63441 @@ -123,7 +123,7 @@ struct neigh_ops {
63442 void (*error_report)(struct neighbour *, struct sk_buff *);
63443 int (*output)(struct neighbour *, struct sk_buff *);
63444 int (*connected_output)(struct neighbour *, struct sk_buff *);
63445 -};
63446 +} __do_const;
63447
63448 struct pneigh_entry {
63449 struct pneigh_entry *next;
63450 diff --git a/include/net/netlink.h b/include/net/netlink.h
63451 index f394fe5..fd073f9 100644
63452 --- a/include/net/netlink.h
63453 +++ b/include/net/netlink.h
63454 @@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63455 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63456 {
63457 if (mark)
63458 - skb_trim(skb, (unsigned char *) mark - skb->data);
63459 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63460 }
63461
63462 /**
63463 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63464 index bbd023a..97c6d0d 100644
63465 --- a/include/net/netns/ipv4.h
63466 +++ b/include/net/netns/ipv4.h
63467 @@ -57,8 +57,8 @@ struct netns_ipv4 {
63468 unsigned int sysctl_ping_group_range[2];
63469 long sysctl_tcp_mem[3];
63470
63471 - atomic_t rt_genid;
63472 - atomic_t dev_addr_genid;
63473 + atomic_unchecked_t rt_genid;
63474 + atomic_unchecked_t dev_addr_genid;
63475
63476 #ifdef CONFIG_IP_MROUTE
63477 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63478 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63479 index a2ef814..31a8e3f 100644
63480 --- a/include/net/sctp/sctp.h
63481 +++ b/include/net/sctp/sctp.h
63482 @@ -318,9 +318,9 @@ do { \
63483
63484 #else /* SCTP_DEBUG */
63485
63486 -#define SCTP_DEBUG_PRINTK(whatever...)
63487 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63488 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63489 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63490 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63491 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63492 #define SCTP_ENABLE_DEBUG
63493 #define SCTP_DISABLE_DEBUG
63494 #define SCTP_ASSERT(expr, str, func)
63495 diff --git a/include/net/sock.h b/include/net/sock.h
63496 index 5a0a58a..2e3d4d0 100644
63497 --- a/include/net/sock.h
63498 +++ b/include/net/sock.h
63499 @@ -302,7 +302,7 @@ struct sock {
63500 #ifdef CONFIG_RPS
63501 __u32 sk_rxhash;
63502 #endif
63503 - atomic_t sk_drops;
63504 + atomic_unchecked_t sk_drops;
63505 int sk_rcvbuf;
63506
63507 struct sk_filter __rcu *sk_filter;
63508 @@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
63509 }
63510
63511 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63512 - char __user *from, char *to,
63513 + char __user *from, unsigned char *to,
63514 int copy, int offset)
63515 {
63516 if (skb->ip_summed == CHECKSUM_NONE) {
63517 diff --git a/include/net/tcp.h b/include/net/tcp.h
63518 index f75a04d..702cf06 100644
63519 --- a/include/net/tcp.h
63520 +++ b/include/net/tcp.h
63521 @@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
63522 char *name;
63523 sa_family_t family;
63524 const struct file_operations *seq_fops;
63525 - struct seq_operations seq_ops;
63526 + seq_operations_no_const seq_ops;
63527 };
63528
63529 struct tcp_iter_state {
63530 diff --git a/include/net/udp.h b/include/net/udp.h
63531 index 5d606d9..e879f7b 100644
63532 --- a/include/net/udp.h
63533 +++ b/include/net/udp.h
63534 @@ -244,7 +244,7 @@ struct udp_seq_afinfo {
63535 sa_family_t family;
63536 struct udp_table *udp_table;
63537 const struct file_operations *seq_fops;
63538 - struct seq_operations seq_ops;
63539 + seq_operations_no_const seq_ops;
63540 };
63541
63542 struct udp_iter_state {
63543 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63544 index 96239e7..c85b032 100644
63545 --- a/include/net/xfrm.h
63546 +++ b/include/net/xfrm.h
63547 @@ -505,7 +505,7 @@ struct xfrm_policy {
63548 struct timer_list timer;
63549
63550 struct flow_cache_object flo;
63551 - atomic_t genid;
63552 + atomic_unchecked_t genid;
63553 u32 priority;
63554 u32 index;
63555 struct xfrm_mark mark;
63556 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63557 index 1a046b1..ee0bef0 100644
63558 --- a/include/rdma/iw_cm.h
63559 +++ b/include/rdma/iw_cm.h
63560 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
63561 int backlog);
63562
63563 int (*destroy_listen)(struct iw_cm_id *cm_id);
63564 -};
63565 +} __no_const;
63566
63567 /**
63568 * iw_create_cm_id - Create an IW CM identifier.
63569 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63570 index 8f9dfba..610ab6c 100644
63571 --- a/include/scsi/libfc.h
63572 +++ b/include/scsi/libfc.h
63573 @@ -756,6 +756,7 @@ struct libfc_function_template {
63574 */
63575 void (*disc_stop_final) (struct fc_lport *);
63576 };
63577 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63578
63579 /**
63580 * struct fc_disc - Discovery context
63581 @@ -861,7 +862,7 @@ struct fc_lport {
63582 struct fc_vport *vport;
63583
63584 /* Operational Information */
63585 - struct libfc_function_template tt;
63586 + libfc_function_template_no_const tt;
63587 u8 link_up;
63588 u8 qfull;
63589 enum fc_lport_state state;
63590 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63591 index 6efb2e1..cdad57f 100644
63592 --- a/include/scsi/scsi_device.h
63593 +++ b/include/scsi/scsi_device.h
63594 @@ -162,9 +162,9 @@ struct scsi_device {
63595 unsigned int max_device_blocked; /* what device_blocked counts down from */
63596 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63597
63598 - atomic_t iorequest_cnt;
63599 - atomic_t iodone_cnt;
63600 - atomic_t ioerr_cnt;
63601 + atomic_unchecked_t iorequest_cnt;
63602 + atomic_unchecked_t iodone_cnt;
63603 + atomic_unchecked_t ioerr_cnt;
63604
63605 struct device sdev_gendev,
63606 sdev_dev;
63607 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63608 index 719faf1..d1154d4 100644
63609 --- a/include/scsi/scsi_transport_fc.h
63610 +++ b/include/scsi/scsi_transport_fc.h
63611 @@ -739,7 +739,7 @@ struct fc_function_template {
63612 unsigned long show_host_system_hostname:1;
63613
63614 unsigned long disable_target_scan:1;
63615 -};
63616 +} __do_const;
63617
63618
63619 /**
63620 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63621 index 030b87c..98a6954 100644
63622 --- a/include/sound/ak4xxx-adda.h
63623 +++ b/include/sound/ak4xxx-adda.h
63624 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63625 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63626 unsigned char val);
63627 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63628 -};
63629 +} __no_const;
63630
63631 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63632
63633 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63634 index 8c05e47..2b5df97 100644
63635 --- a/include/sound/hwdep.h
63636 +++ b/include/sound/hwdep.h
63637 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63638 struct snd_hwdep_dsp_status *status);
63639 int (*dsp_load)(struct snd_hwdep *hw,
63640 struct snd_hwdep_dsp_image *image);
63641 -};
63642 +} __no_const;
63643
63644 struct snd_hwdep {
63645 struct snd_card *card;
63646 diff --git a/include/sound/info.h b/include/sound/info.h
63647 index 9ca1a49..aba1728 100644
63648 --- a/include/sound/info.h
63649 +++ b/include/sound/info.h
63650 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
63651 struct snd_info_buffer *buffer);
63652 void (*write)(struct snd_info_entry *entry,
63653 struct snd_info_buffer *buffer);
63654 -};
63655 +} __no_const;
63656
63657 struct snd_info_entry_ops {
63658 int (*open)(struct snd_info_entry *entry,
63659 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63660 index 0d11128..814178e 100644
63661 --- a/include/sound/pcm.h
63662 +++ b/include/sound/pcm.h
63663 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
63664 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63665 int (*ack)(struct snd_pcm_substream *substream);
63666 };
63667 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63668
63669 /*
63670 *
63671 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63672 index af1b49e..a5d55a5 100644
63673 --- a/include/sound/sb16_csp.h
63674 +++ b/include/sound/sb16_csp.h
63675 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63676 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63677 int (*csp_stop) (struct snd_sb_csp * p);
63678 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63679 -};
63680 +} __no_const;
63681
63682 /*
63683 * CSP private data
63684 diff --git a/include/sound/soc.h b/include/sound/soc.h
63685 index 2ebf787..0276839 100644
63686 --- a/include/sound/soc.h
63687 +++ b/include/sound/soc.h
63688 @@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
63689 /* platform IO - used for platform DAPM */
63690 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63691 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63692 -};
63693 +} __do_const;
63694
63695 struct snd_soc_platform {
63696 const char *name;
63697 @@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
63698 struct snd_soc_dai_link *dai_link;
63699 struct mutex pcm_mutex;
63700 enum snd_soc_pcm_subclass pcm_subclass;
63701 - struct snd_pcm_ops ops;
63702 + snd_pcm_ops_no_const ops;
63703
63704 unsigned int complete:1;
63705 unsigned int dev_registered:1;
63706 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63707 index 4119966..1a4671c 100644
63708 --- a/include/sound/ymfpci.h
63709 +++ b/include/sound/ymfpci.h
63710 @@ -358,7 +358,7 @@ struct snd_ymfpci {
63711 spinlock_t reg_lock;
63712 spinlock_t voice_lock;
63713 wait_queue_head_t interrupt_sleep;
63714 - atomic_t interrupt_sleep_count;
63715 + atomic_unchecked_t interrupt_sleep_count;
63716 struct snd_info_entry *proc_entry;
63717 const struct firmware *dsp_microcode;
63718 const struct firmware *controller_microcode;
63719 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63720 index aaccc5f..092d568 100644
63721 --- a/include/target/target_core_base.h
63722 +++ b/include/target/target_core_base.h
63723 @@ -447,7 +447,7 @@ struct t10_reservation_ops {
63724 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63725 int (*t10_pr_register)(struct se_cmd *);
63726 int (*t10_pr_clear)(struct se_cmd *);
63727 -};
63728 +} __no_const;
63729
63730 struct t10_reservation {
63731 /* Reservation effects all target ports */
63732 @@ -576,7 +576,7 @@ struct se_cmd {
63733 atomic_t t_se_count;
63734 atomic_t t_task_cdbs_left;
63735 atomic_t t_task_cdbs_ex_left;
63736 - atomic_t t_task_cdbs_sent;
63737 + atomic_unchecked_t t_task_cdbs_sent;
63738 unsigned int transport_state;
63739 #define CMD_T_ABORTED (1 << 0)
63740 #define CMD_T_ACTIVE (1 << 1)
63741 @@ -802,7 +802,7 @@ struct se_device {
63742 spinlock_t stats_lock;
63743 /* Active commands on this virtual SE device */
63744 atomic_t simple_cmds;
63745 - atomic_t dev_ordered_id;
63746 + atomic_unchecked_t dev_ordered_id;
63747 atomic_t execute_tasks;
63748 atomic_t dev_ordered_sync;
63749 atomic_t dev_qf_count;
63750 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63751 index 1c09820..7f5ec79 100644
63752 --- a/include/trace/events/irq.h
63753 +++ b/include/trace/events/irq.h
63754 @@ -36,7 +36,7 @@ struct softirq_action;
63755 */
63756 TRACE_EVENT(irq_handler_entry,
63757
63758 - TP_PROTO(int irq, struct irqaction *action),
63759 + TP_PROTO(int irq, const struct irqaction *action),
63760
63761 TP_ARGS(irq, action),
63762
63763 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63764 */
63765 TRACE_EVENT(irq_handler_exit,
63766
63767 - TP_PROTO(int irq, struct irqaction *action, int ret),
63768 + TP_PROTO(int irq, const struct irqaction *action, int ret),
63769
63770 TP_ARGS(irq, action, ret),
63771
63772 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63773 index f9466fa..f4e2b81 100644
63774 --- a/include/video/udlfb.h
63775 +++ b/include/video/udlfb.h
63776 @@ -53,10 +53,10 @@ struct dlfb_data {
63777 u32 pseudo_palette[256];
63778 int blank_mode; /*one of FB_BLANK_ */
63779 /* blit-only rendering path metrics, exposed through sysfs */
63780 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63781 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63782 - atomic_t bytes_sent; /* to usb, after compression including overhead */
63783 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63784 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63785 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63786 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63787 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63788 };
63789
63790 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63791 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63792 index 0993a22..32ba2fe 100644
63793 --- a/include/video/uvesafb.h
63794 +++ b/include/video/uvesafb.h
63795 @@ -177,6 +177,7 @@ struct uvesafb_par {
63796 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63797 u8 pmi_setpal; /* PMI for palette changes */
63798 u16 *pmi_base; /* protected mode interface location */
63799 + u8 *pmi_code; /* protected mode code location */
63800 void *pmi_start;
63801 void *pmi_pal;
63802 u8 *vbe_state_orig; /*
63803 diff --git a/init/Kconfig b/init/Kconfig
63804 index 6cfd71d..73cb68d 100644
63805 --- a/init/Kconfig
63806 +++ b/init/Kconfig
63807 @@ -790,6 +790,7 @@ endif # CGROUPS
63808
63809 config CHECKPOINT_RESTORE
63810 bool "Checkpoint/restore support" if EXPERT
63811 + depends on !GRKERNSEC
63812 default n
63813 help
63814 Enables additional kernel features in a sake of checkpoint/restore.
63815 @@ -1240,7 +1241,7 @@ config SLUB_DEBUG
63816
63817 config COMPAT_BRK
63818 bool "Disable heap randomization"
63819 - default y
63820 + default n
63821 help
63822 Randomizing heap placement makes heap exploits harder, but it
63823 also breaks ancient binaries (including anything libc5 based).
63824 diff --git a/init/do_mounts.c b/init/do_mounts.c
63825 index 42b0707..c06eef4 100644
63826 --- a/init/do_mounts.c
63827 +++ b/init/do_mounts.c
63828 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
63829 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
63830 {
63831 struct super_block *s;
63832 - int err = sys_mount(name, "/root", fs, flags, data);
63833 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
63834 if (err)
63835 return err;
63836
63837 - sys_chdir((const char __user __force *)"/root");
63838 + sys_chdir((const char __force_user *)"/root");
63839 s = current->fs->pwd.dentry->d_sb;
63840 ROOT_DEV = s->s_dev;
63841 printk(KERN_INFO
63842 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
63843 va_start(args, fmt);
63844 vsprintf(buf, fmt, args);
63845 va_end(args);
63846 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
63847 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
63848 if (fd >= 0) {
63849 sys_ioctl(fd, FDEJECT, 0);
63850 sys_close(fd);
63851 }
63852 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
63853 - fd = sys_open("/dev/console", O_RDWR, 0);
63854 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
63855 if (fd >= 0) {
63856 sys_ioctl(fd, TCGETS, (long)&termios);
63857 termios.c_lflag &= ~ICANON;
63858 sys_ioctl(fd, TCSETSF, (long)&termios);
63859 - sys_read(fd, &c, 1);
63860 + sys_read(fd, (char __user *)&c, 1);
63861 termios.c_lflag |= ICANON;
63862 sys_ioctl(fd, TCSETSF, (long)&termios);
63863 sys_close(fd);
63864 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
63865 mount_root();
63866 out:
63867 devtmpfs_mount("dev");
63868 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63869 - sys_chroot((const char __user __force *)".");
63870 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63871 + sys_chroot((const char __force_user *)".");
63872 }
63873 diff --git a/init/do_mounts.h b/init/do_mounts.h
63874 index f5b978a..69dbfe8 100644
63875 --- a/init/do_mounts.h
63876 +++ b/init/do_mounts.h
63877 @@ -15,15 +15,15 @@ extern int root_mountflags;
63878
63879 static inline int create_dev(char *name, dev_t dev)
63880 {
63881 - sys_unlink(name);
63882 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
63883 + sys_unlink((char __force_user *)name);
63884 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
63885 }
63886
63887 #if BITS_PER_LONG == 32
63888 static inline u32 bstat(char *name)
63889 {
63890 struct stat64 stat;
63891 - if (sys_stat64(name, &stat) != 0)
63892 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
63893 return 0;
63894 if (!S_ISBLK(stat.st_mode))
63895 return 0;
63896 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
63897 static inline u32 bstat(char *name)
63898 {
63899 struct stat stat;
63900 - if (sys_newstat(name, &stat) != 0)
63901 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
63902 return 0;
63903 if (!S_ISBLK(stat.st_mode))
63904 return 0;
63905 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
63906 index 9047330..de0d1fb 100644
63907 --- a/init/do_mounts_initrd.c
63908 +++ b/init/do_mounts_initrd.c
63909 @@ -43,13 +43,13 @@ static void __init handle_initrd(void)
63910 create_dev("/dev/root.old", Root_RAM0);
63911 /* mount initrd on rootfs' /root */
63912 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
63913 - sys_mkdir("/old", 0700);
63914 - root_fd = sys_open("/", 0, 0);
63915 - old_fd = sys_open("/old", 0, 0);
63916 + sys_mkdir((const char __force_user *)"/old", 0700);
63917 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
63918 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
63919 /* move initrd over / and chdir/chroot in initrd root */
63920 - sys_chdir("/root");
63921 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63922 - sys_chroot(".");
63923 + sys_chdir((const char __force_user *)"/root");
63924 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63925 + sys_chroot((const char __force_user *)".");
63926
63927 /*
63928 * In case that a resume from disk is carried out by linuxrc or one of
63929 @@ -66,15 +66,15 @@ static void __init handle_initrd(void)
63930
63931 /* move initrd to rootfs' /old */
63932 sys_fchdir(old_fd);
63933 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
63934 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
63935 /* switch root and cwd back to / of rootfs */
63936 sys_fchdir(root_fd);
63937 - sys_chroot(".");
63938 + sys_chroot((const char __force_user *)".");
63939 sys_close(old_fd);
63940 sys_close(root_fd);
63941
63942 if (new_decode_dev(real_root_dev) == Root_RAM0) {
63943 - sys_chdir("/old");
63944 + sys_chdir((const char __force_user *)"/old");
63945 return;
63946 }
63947
63948 @@ -82,17 +82,17 @@ static void __init handle_initrd(void)
63949 mount_root();
63950
63951 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
63952 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
63953 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
63954 if (!error)
63955 printk("okay\n");
63956 else {
63957 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
63958 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
63959 if (error == -ENOENT)
63960 printk("/initrd does not exist. Ignored.\n");
63961 else
63962 printk("failed\n");
63963 printk(KERN_NOTICE "Unmounting old root\n");
63964 - sys_umount("/old", MNT_DETACH);
63965 + sys_umount((char __force_user *)"/old", MNT_DETACH);
63966 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
63967 if (fd < 0) {
63968 error = fd;
63969 @@ -115,11 +115,11 @@ int __init initrd_load(void)
63970 * mounted in the normal path.
63971 */
63972 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
63973 - sys_unlink("/initrd.image");
63974 + sys_unlink((const char __force_user *)"/initrd.image");
63975 handle_initrd();
63976 return 1;
63977 }
63978 }
63979 - sys_unlink("/initrd.image");
63980 + sys_unlink((const char __force_user *)"/initrd.image");
63981 return 0;
63982 }
63983 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
63984 index 32c4799..c27ee74 100644
63985 --- a/init/do_mounts_md.c
63986 +++ b/init/do_mounts_md.c
63987 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
63988 partitioned ? "_d" : "", minor,
63989 md_setup_args[ent].device_names);
63990
63991 - fd = sys_open(name, 0, 0);
63992 + fd = sys_open((char __force_user *)name, 0, 0);
63993 if (fd < 0) {
63994 printk(KERN_ERR "md: open failed - cannot start "
63995 "array %s\n", name);
63996 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
63997 * array without it
63998 */
63999 sys_close(fd);
64000 - fd = sys_open(name, 0, 0);
64001 + fd = sys_open((char __force_user *)name, 0, 0);
64002 sys_ioctl(fd, BLKRRPART, 0);
64003 }
64004 sys_close(fd);
64005 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64006
64007 wait_for_device_probe();
64008
64009 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64010 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64011 if (fd >= 0) {
64012 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64013 sys_close(fd);
64014 diff --git a/init/initramfs.c b/init/initramfs.c
64015 index 8216c30..25e8e32 100644
64016 --- a/init/initramfs.c
64017 +++ b/init/initramfs.c
64018 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64019 }
64020 }
64021
64022 -static long __init do_utime(char __user *filename, time_t mtime)
64023 +static long __init do_utime(__force char __user *filename, time_t mtime)
64024 {
64025 struct timespec t[2];
64026
64027 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64028 struct dir_entry *de, *tmp;
64029 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64030 list_del(&de->list);
64031 - do_utime(de->name, de->mtime);
64032 + do_utime((char __force_user *)de->name, de->mtime);
64033 kfree(de->name);
64034 kfree(de);
64035 }
64036 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64037 if (nlink >= 2) {
64038 char *old = find_link(major, minor, ino, mode, collected);
64039 if (old)
64040 - return (sys_link(old, collected) < 0) ? -1 : 1;
64041 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64042 }
64043 return 0;
64044 }
64045 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64046 {
64047 struct stat st;
64048
64049 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64050 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64051 if (S_ISDIR(st.st_mode))
64052 - sys_rmdir(path);
64053 + sys_rmdir((char __force_user *)path);
64054 else
64055 - sys_unlink(path);
64056 + sys_unlink((char __force_user *)path);
64057 }
64058 }
64059
64060 @@ -305,7 +305,7 @@ static int __init do_name(void)
64061 int openflags = O_WRONLY|O_CREAT;
64062 if (ml != 1)
64063 openflags |= O_TRUNC;
64064 - wfd = sys_open(collected, openflags, mode);
64065 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64066
64067 if (wfd >= 0) {
64068 sys_fchown(wfd, uid, gid);
64069 @@ -317,17 +317,17 @@ static int __init do_name(void)
64070 }
64071 }
64072 } else if (S_ISDIR(mode)) {
64073 - sys_mkdir(collected, mode);
64074 - sys_chown(collected, uid, gid);
64075 - sys_chmod(collected, mode);
64076 + sys_mkdir((char __force_user *)collected, mode);
64077 + sys_chown((char __force_user *)collected, uid, gid);
64078 + sys_chmod((char __force_user *)collected, mode);
64079 dir_add(collected, mtime);
64080 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64081 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64082 if (maybe_link() == 0) {
64083 - sys_mknod(collected, mode, rdev);
64084 - sys_chown(collected, uid, gid);
64085 - sys_chmod(collected, mode);
64086 - do_utime(collected, mtime);
64087 + sys_mknod((char __force_user *)collected, mode, rdev);
64088 + sys_chown((char __force_user *)collected, uid, gid);
64089 + sys_chmod((char __force_user *)collected, mode);
64090 + do_utime((char __force_user *)collected, mtime);
64091 }
64092 }
64093 return 0;
64094 @@ -336,15 +336,15 @@ static int __init do_name(void)
64095 static int __init do_copy(void)
64096 {
64097 if (count >= body_len) {
64098 - sys_write(wfd, victim, body_len);
64099 + sys_write(wfd, (char __force_user *)victim, body_len);
64100 sys_close(wfd);
64101 - do_utime(vcollected, mtime);
64102 + do_utime((char __force_user *)vcollected, mtime);
64103 kfree(vcollected);
64104 eat(body_len);
64105 state = SkipIt;
64106 return 0;
64107 } else {
64108 - sys_write(wfd, victim, count);
64109 + sys_write(wfd, (char __force_user *)victim, count);
64110 body_len -= count;
64111 eat(count);
64112 return 1;
64113 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64114 {
64115 collected[N_ALIGN(name_len) + body_len] = '\0';
64116 clean_path(collected, 0);
64117 - sys_symlink(collected + N_ALIGN(name_len), collected);
64118 - sys_lchown(collected, uid, gid);
64119 - do_utime(collected, mtime);
64120 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64121 + sys_lchown((char __force_user *)collected, uid, gid);
64122 + do_utime((char __force_user *)collected, mtime);
64123 state = SkipIt;
64124 next_state = Reset;
64125 return 0;
64126 diff --git a/init/main.c b/init/main.c
64127 index cb54cd3..8773e3c 100644
64128 --- a/init/main.c
64129 +++ b/init/main.c
64130 @@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
64131 extern void tc_init(void);
64132 #endif
64133
64134 +extern void grsecurity_init(void);
64135 +
64136 /*
64137 * Debug helper: via this flag we know that we are in 'early bootup code'
64138 * where only the boot processor is running with IRQ disabled. This means
64139 @@ -148,6 +150,49 @@ static int __init set_reset_devices(char *str)
64140
64141 __setup("reset_devices", set_reset_devices);
64142
64143 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64144 +extern char pax_enter_kernel_user[];
64145 +extern char pax_exit_kernel_user[];
64146 +extern pgdval_t clone_pgd_mask;
64147 +#endif
64148 +
64149 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64150 +static int __init setup_pax_nouderef(char *str)
64151 +{
64152 +#ifdef CONFIG_X86_32
64153 + unsigned int cpu;
64154 + struct desc_struct *gdt;
64155 +
64156 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64157 + gdt = get_cpu_gdt_table(cpu);
64158 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64159 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64160 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64161 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64162 + }
64163 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64164 +#else
64165 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64166 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64167 + clone_pgd_mask = ~(pgdval_t)0UL;
64168 +#endif
64169 +
64170 + return 0;
64171 +}
64172 +early_param("pax_nouderef", setup_pax_nouderef);
64173 +#endif
64174 +
64175 +#ifdef CONFIG_PAX_SOFTMODE
64176 +int pax_softmode;
64177 +
64178 +static int __init setup_pax_softmode(char *str)
64179 +{
64180 + get_option(&str, &pax_softmode);
64181 + return 1;
64182 +}
64183 +__setup("pax_softmode=", setup_pax_softmode);
64184 +#endif
64185 +
64186 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64187 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64188 static const char *panic_later, *panic_param;
64189 @@ -674,6 +719,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64190 {
64191 int count = preempt_count();
64192 int ret;
64193 + const char *msg1 = "", *msg2 = "";
64194
64195 if (initcall_debug)
64196 ret = do_one_initcall_debug(fn);
64197 @@ -686,15 +732,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64198 sprintf(msgbuf, "error code %d ", ret);
64199
64200 if (preempt_count() != count) {
64201 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64202 + msg1 = " preemption imbalance";
64203 preempt_count() = count;
64204 }
64205 if (irqs_disabled()) {
64206 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64207 + msg2 = " disabled interrupts";
64208 local_irq_enable();
64209 }
64210 - if (msgbuf[0]) {
64211 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64212 + if (msgbuf[0] || *msg1 || *msg2) {
64213 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64214 }
64215
64216 return ret;
64217 @@ -865,7 +911,7 @@ static int __init kernel_init(void * unused)
64218 do_basic_setup();
64219
64220 /* Open the /dev/console on the rootfs, this should never fail */
64221 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64222 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64223 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64224
64225 (void) sys_dup(0);
64226 @@ -878,11 +924,13 @@ static int __init kernel_init(void * unused)
64227 if (!ramdisk_execute_command)
64228 ramdisk_execute_command = "/init";
64229
64230 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64231 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64232 ramdisk_execute_command = NULL;
64233 prepare_namespace();
64234 }
64235
64236 + grsecurity_init();
64237 +
64238 /*
64239 * Ok, we have completed the initial bootup, and
64240 * we're essentially up and running. Get rid of the
64241 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64242 index 28bd64d..c66b72a 100644
64243 --- a/ipc/mqueue.c
64244 +++ b/ipc/mqueue.c
64245 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64246 mq_bytes = (mq_msg_tblsz +
64247 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64248
64249 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64250 spin_lock(&mq_lock);
64251 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64252 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
64253 diff --git a/ipc/msg.c b/ipc/msg.c
64254 index 7385de2..a8180e08 100644
64255 --- a/ipc/msg.c
64256 +++ b/ipc/msg.c
64257 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64258 return security_msg_queue_associate(msq, msgflg);
64259 }
64260
64261 +static struct ipc_ops msg_ops = {
64262 + .getnew = newque,
64263 + .associate = msg_security,
64264 + .more_checks = NULL
64265 +};
64266 +
64267 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64268 {
64269 struct ipc_namespace *ns;
64270 - struct ipc_ops msg_ops;
64271 struct ipc_params msg_params;
64272
64273 ns = current->nsproxy->ipc_ns;
64274
64275 - msg_ops.getnew = newque;
64276 - msg_ops.associate = msg_security;
64277 - msg_ops.more_checks = NULL;
64278 -
64279 msg_params.key = key;
64280 msg_params.flg = msgflg;
64281
64282 diff --git a/ipc/sem.c b/ipc/sem.c
64283 index 5215a81..cfc0cac 100644
64284 --- a/ipc/sem.c
64285 +++ b/ipc/sem.c
64286 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64287 return 0;
64288 }
64289
64290 +static struct ipc_ops sem_ops = {
64291 + .getnew = newary,
64292 + .associate = sem_security,
64293 + .more_checks = sem_more_checks
64294 +};
64295 +
64296 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64297 {
64298 struct ipc_namespace *ns;
64299 - struct ipc_ops sem_ops;
64300 struct ipc_params sem_params;
64301
64302 ns = current->nsproxy->ipc_ns;
64303 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64304 if (nsems < 0 || nsems > ns->sc_semmsl)
64305 return -EINVAL;
64306
64307 - sem_ops.getnew = newary;
64308 - sem_ops.associate = sem_security;
64309 - sem_ops.more_checks = sem_more_checks;
64310 -
64311 sem_params.key = key;
64312 sem_params.flg = semflg;
64313 sem_params.u.nsems = nsems;
64314 diff --git a/ipc/shm.c b/ipc/shm.c
64315 index 406c5b2..bc66d67 100644
64316 --- a/ipc/shm.c
64317 +++ b/ipc/shm.c
64318 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64319 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64320 #endif
64321
64322 +#ifdef CONFIG_GRKERNSEC
64323 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64324 + const time_t shm_createtime, const uid_t cuid,
64325 + const int shmid);
64326 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64327 + const time_t shm_createtime);
64328 +#endif
64329 +
64330 void shm_init_ns(struct ipc_namespace *ns)
64331 {
64332 ns->shm_ctlmax = SHMMAX;
64333 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64334 shp->shm_lprid = 0;
64335 shp->shm_atim = shp->shm_dtim = 0;
64336 shp->shm_ctim = get_seconds();
64337 +#ifdef CONFIG_GRKERNSEC
64338 + {
64339 + struct timespec timeval;
64340 + do_posix_clock_monotonic_gettime(&timeval);
64341 +
64342 + shp->shm_createtime = timeval.tv_sec;
64343 + }
64344 +#endif
64345 shp->shm_segsz = size;
64346 shp->shm_nattch = 0;
64347 shp->shm_file = file;
64348 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64349 return 0;
64350 }
64351
64352 +static struct ipc_ops shm_ops = {
64353 + .getnew = newseg,
64354 + .associate = shm_security,
64355 + .more_checks = shm_more_checks
64356 +};
64357 +
64358 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64359 {
64360 struct ipc_namespace *ns;
64361 - struct ipc_ops shm_ops;
64362 struct ipc_params shm_params;
64363
64364 ns = current->nsproxy->ipc_ns;
64365
64366 - shm_ops.getnew = newseg;
64367 - shm_ops.associate = shm_security;
64368 - shm_ops.more_checks = shm_more_checks;
64369 -
64370 shm_params.key = key;
64371 shm_params.flg = shmflg;
64372 shm_params.u.size = size;
64373 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64374 f_mode = FMODE_READ | FMODE_WRITE;
64375 }
64376 if (shmflg & SHM_EXEC) {
64377 +
64378 +#ifdef CONFIG_PAX_MPROTECT
64379 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
64380 + goto out;
64381 +#endif
64382 +
64383 prot |= PROT_EXEC;
64384 acc_mode |= S_IXUGO;
64385 }
64386 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64387 if (err)
64388 goto out_unlock;
64389
64390 +#ifdef CONFIG_GRKERNSEC
64391 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64392 + shp->shm_perm.cuid, shmid) ||
64393 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64394 + err = -EACCES;
64395 + goto out_unlock;
64396 + }
64397 +#endif
64398 +
64399 path = shp->shm_file->f_path;
64400 path_get(&path);
64401 shp->shm_nattch++;
64402 +#ifdef CONFIG_GRKERNSEC
64403 + shp->shm_lapid = current->pid;
64404 +#endif
64405 size = i_size_read(path.dentry->d_inode);
64406 shm_unlock(shp);
64407
64408 diff --git a/kernel/acct.c b/kernel/acct.c
64409 index 02e6167..54824f7 100644
64410 --- a/kernel/acct.c
64411 +++ b/kernel/acct.c
64412 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64413 */
64414 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64415 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64416 - file->f_op->write(file, (char *)&ac,
64417 + file->f_op->write(file, (char __force_user *)&ac,
64418 sizeof(acct_t), &file->f_pos);
64419 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64420 set_fs(fs);
64421 diff --git a/kernel/audit.c b/kernel/audit.c
64422 index 1c7f2c6..9ba5359 100644
64423 --- a/kernel/audit.c
64424 +++ b/kernel/audit.c
64425 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64426 3) suppressed due to audit_rate_limit
64427 4) suppressed due to audit_backlog_limit
64428 */
64429 -static atomic_t audit_lost = ATOMIC_INIT(0);
64430 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64431
64432 /* The netlink socket. */
64433 static struct sock *audit_sock;
64434 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64435 unsigned long now;
64436 int print;
64437
64438 - atomic_inc(&audit_lost);
64439 + atomic_inc_unchecked(&audit_lost);
64440
64441 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64442
64443 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64444 printk(KERN_WARNING
64445 "audit: audit_lost=%d audit_rate_limit=%d "
64446 "audit_backlog_limit=%d\n",
64447 - atomic_read(&audit_lost),
64448 + atomic_read_unchecked(&audit_lost),
64449 audit_rate_limit,
64450 audit_backlog_limit);
64451 audit_panic(message);
64452 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64453 status_set.pid = audit_pid;
64454 status_set.rate_limit = audit_rate_limit;
64455 status_set.backlog_limit = audit_backlog_limit;
64456 - status_set.lost = atomic_read(&audit_lost);
64457 + status_set.lost = atomic_read_unchecked(&audit_lost);
64458 status_set.backlog = skb_queue_len(&audit_skb_queue);
64459 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64460 &status_set, sizeof(status_set));
64461 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64462 index af1de0f..06dfe57 100644
64463 --- a/kernel/auditsc.c
64464 +++ b/kernel/auditsc.c
64465 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64466 }
64467
64468 /* global counter which is incremented every time something logs in */
64469 -static atomic_t session_id = ATOMIC_INIT(0);
64470 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64471
64472 /**
64473 * audit_set_loginuid - set current task's audit_context loginuid
64474 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
64475 return -EPERM;
64476 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
64477
64478 - sessionid = atomic_inc_return(&session_id);
64479 + sessionid = atomic_inc_return_unchecked(&session_id);
64480 if (context && context->in_syscall) {
64481 struct audit_buffer *ab;
64482
64483 diff --git a/kernel/capability.c b/kernel/capability.c
64484 index 3f1adb6..c564db0 100644
64485 --- a/kernel/capability.c
64486 +++ b/kernel/capability.c
64487 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64488 * before modification is attempted and the application
64489 * fails.
64490 */
64491 + if (tocopy > ARRAY_SIZE(kdata))
64492 + return -EFAULT;
64493 +
64494 if (copy_to_user(dataptr, kdata, tocopy
64495 * sizeof(struct __user_cap_data_struct))) {
64496 return -EFAULT;
64497 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
64498 int ret;
64499
64500 rcu_read_lock();
64501 - ret = security_capable(__task_cred(t), ns, cap);
64502 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
64503 + gr_task_is_capable(t, __task_cred(t), cap);
64504 rcu_read_unlock();
64505
64506 - return (ret == 0);
64507 + return ret;
64508 }
64509
64510 /**
64511 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
64512 int ret;
64513
64514 rcu_read_lock();
64515 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
64516 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
64517 rcu_read_unlock();
64518
64519 - return (ret == 0);
64520 + return ret;
64521 }
64522
64523 /**
64524 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64525 BUG();
64526 }
64527
64528 - if (security_capable(current_cred(), ns, cap) == 0) {
64529 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
64530 current->flags |= PF_SUPERPRIV;
64531 return true;
64532 }
64533 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
64534 }
64535 EXPORT_SYMBOL(ns_capable);
64536
64537 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
64538 +{
64539 + if (unlikely(!cap_valid(cap))) {
64540 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64541 + BUG();
64542 + }
64543 +
64544 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
64545 + current->flags |= PF_SUPERPRIV;
64546 + return true;
64547 + }
64548 + return false;
64549 +}
64550 +EXPORT_SYMBOL(ns_capable_nolog);
64551 +
64552 /**
64553 * capable - Determine if the current task has a superior capability in effect
64554 * @cap: The capability to be tested for
64555 @@ -408,6 +427,12 @@ bool capable(int cap)
64556 }
64557 EXPORT_SYMBOL(capable);
64558
64559 +bool capable_nolog(int cap)
64560 +{
64561 + return ns_capable_nolog(&init_user_ns, cap);
64562 +}
64563 +EXPORT_SYMBOL(capable_nolog);
64564 +
64565 /**
64566 * nsown_capable - Check superior capability to one's own user_ns
64567 * @cap: The capability in question
64568 diff --git a/kernel/compat.c b/kernel/compat.c
64569 index d2c67aa..a629b2e 100644
64570 --- a/kernel/compat.c
64571 +++ b/kernel/compat.c
64572 @@ -13,6 +13,7 @@
64573
64574 #include <linux/linkage.h>
64575 #include <linux/compat.h>
64576 +#include <linux/module.h>
64577 #include <linux/errno.h>
64578 #include <linux/time.h>
64579 #include <linux/signal.h>
64580 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64581 mm_segment_t oldfs;
64582 long ret;
64583
64584 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64585 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64586 oldfs = get_fs();
64587 set_fs(KERNEL_DS);
64588 ret = hrtimer_nanosleep_restart(restart);
64589 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64590 oldfs = get_fs();
64591 set_fs(KERNEL_DS);
64592 ret = hrtimer_nanosleep(&tu,
64593 - rmtp ? (struct timespec __user *)&rmt : NULL,
64594 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
64595 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64596 set_fs(oldfs);
64597
64598 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64599 mm_segment_t old_fs = get_fs();
64600
64601 set_fs(KERNEL_DS);
64602 - ret = sys_sigpending((old_sigset_t __user *) &s);
64603 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
64604 set_fs(old_fs);
64605 if (ret == 0)
64606 ret = put_user(s, set);
64607 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64608 mm_segment_t old_fs = get_fs();
64609
64610 set_fs(KERNEL_DS);
64611 - ret = sys_old_getrlimit(resource, &r);
64612 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64613 set_fs(old_fs);
64614
64615 if (!ret) {
64616 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64617 mm_segment_t old_fs = get_fs();
64618
64619 set_fs(KERNEL_DS);
64620 - ret = sys_getrusage(who, (struct rusage __user *) &r);
64621 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64622 set_fs(old_fs);
64623
64624 if (ret)
64625 @@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64626 set_fs (KERNEL_DS);
64627 ret = sys_wait4(pid,
64628 (stat_addr ?
64629 - (unsigned int __user *) &status : NULL),
64630 - options, (struct rusage __user *) &r);
64631 + (unsigned int __force_user *) &status : NULL),
64632 + options, (struct rusage __force_user *) &r);
64633 set_fs (old_fs);
64634
64635 if (ret > 0) {
64636 @@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64637 memset(&info, 0, sizeof(info));
64638
64639 set_fs(KERNEL_DS);
64640 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64641 - uru ? (struct rusage __user *)&ru : NULL);
64642 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64643 + uru ? (struct rusage __force_user *)&ru : NULL);
64644 set_fs(old_fs);
64645
64646 if ((ret < 0) || (info.si_signo == 0))
64647 @@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64648 oldfs = get_fs();
64649 set_fs(KERNEL_DS);
64650 err = sys_timer_settime(timer_id, flags,
64651 - (struct itimerspec __user *) &newts,
64652 - (struct itimerspec __user *) &oldts);
64653 + (struct itimerspec __force_user *) &newts,
64654 + (struct itimerspec __force_user *) &oldts);
64655 set_fs(oldfs);
64656 if (!err && old && put_compat_itimerspec(old, &oldts))
64657 return -EFAULT;
64658 @@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64659 oldfs = get_fs();
64660 set_fs(KERNEL_DS);
64661 err = sys_timer_gettime(timer_id,
64662 - (struct itimerspec __user *) &ts);
64663 + (struct itimerspec __force_user *) &ts);
64664 set_fs(oldfs);
64665 if (!err && put_compat_itimerspec(setting, &ts))
64666 return -EFAULT;
64667 @@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64668 oldfs = get_fs();
64669 set_fs(KERNEL_DS);
64670 err = sys_clock_settime(which_clock,
64671 - (struct timespec __user *) &ts);
64672 + (struct timespec __force_user *) &ts);
64673 set_fs(oldfs);
64674 return err;
64675 }
64676 @@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64677 oldfs = get_fs();
64678 set_fs(KERNEL_DS);
64679 err = sys_clock_gettime(which_clock,
64680 - (struct timespec __user *) &ts);
64681 + (struct timespec __force_user *) &ts);
64682 set_fs(oldfs);
64683 if (!err && put_compat_timespec(&ts, tp))
64684 return -EFAULT;
64685 @@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64686
64687 oldfs = get_fs();
64688 set_fs(KERNEL_DS);
64689 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64690 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64691 set_fs(oldfs);
64692
64693 err = compat_put_timex(utp, &txc);
64694 @@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64695 oldfs = get_fs();
64696 set_fs(KERNEL_DS);
64697 err = sys_clock_getres(which_clock,
64698 - (struct timespec __user *) &ts);
64699 + (struct timespec __force_user *) &ts);
64700 set_fs(oldfs);
64701 if (!err && tp && put_compat_timespec(&ts, tp))
64702 return -EFAULT;
64703 @@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64704 long err;
64705 mm_segment_t oldfs;
64706 struct timespec tu;
64707 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64708 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64709
64710 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64711 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64712 oldfs = get_fs();
64713 set_fs(KERNEL_DS);
64714 err = clock_nanosleep_restart(restart);
64715 @@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64716 oldfs = get_fs();
64717 set_fs(KERNEL_DS);
64718 err = sys_clock_nanosleep(which_clock, flags,
64719 - (struct timespec __user *) &in,
64720 - (struct timespec __user *) &out);
64721 + (struct timespec __force_user *) &in,
64722 + (struct timespec __force_user *) &out);
64723 set_fs(oldfs);
64724
64725 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64726 diff --git a/kernel/configs.c b/kernel/configs.c
64727 index 42e8fa0..9e7406b 100644
64728 --- a/kernel/configs.c
64729 +++ b/kernel/configs.c
64730 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64731 struct proc_dir_entry *entry;
64732
64733 /* create the current config file */
64734 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64735 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64736 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64737 + &ikconfig_file_ops);
64738 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64739 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64740 + &ikconfig_file_ops);
64741 +#endif
64742 +#else
64743 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64744 &ikconfig_file_ops);
64745 +#endif
64746 +
64747 if (!entry)
64748 return -ENOMEM;
64749
64750 diff --git a/kernel/cred.c b/kernel/cred.c
64751 index e70683d..27761b6 100644
64752 --- a/kernel/cred.c
64753 +++ b/kernel/cred.c
64754 @@ -205,6 +205,15 @@ void exit_creds(struct task_struct *tsk)
64755 validate_creds(cred);
64756 put_cred(cred);
64757 }
64758 +
64759 +#ifdef CONFIG_GRKERNSEC_SETXID
64760 + cred = (struct cred *) tsk->delayed_cred;
64761 + if (cred) {
64762 + tsk->delayed_cred = NULL;
64763 + validate_creds(cred);
64764 + put_cred(cred);
64765 + }
64766 +#endif
64767 }
64768
64769 /**
64770 @@ -473,7 +482,7 @@ error_put:
64771 * Always returns 0 thus allowing this function to be tail-called at the end
64772 * of, say, sys_setgid().
64773 */
64774 -int commit_creds(struct cred *new)
64775 +static int __commit_creds(struct cred *new)
64776 {
64777 struct task_struct *task = current;
64778 const struct cred *old = task->real_cred;
64779 @@ -492,6 +501,8 @@ int commit_creds(struct cred *new)
64780
64781 get_cred(new); /* we will require a ref for the subj creds too */
64782
64783 + gr_set_role_label(task, new->uid, new->gid);
64784 +
64785 /* dumpability changes */
64786 if (old->euid != new->euid ||
64787 old->egid != new->egid ||
64788 @@ -541,6 +552,101 @@ int commit_creds(struct cred *new)
64789 put_cred(old);
64790 return 0;
64791 }
64792 +#ifdef CONFIG_GRKERNSEC_SETXID
64793 +extern int set_user(struct cred *new);
64794 +
64795 +void gr_delayed_cred_worker(void)
64796 +{
64797 + const struct cred *new = current->delayed_cred;
64798 + struct cred *ncred;
64799 +
64800 + current->delayed_cred = NULL;
64801 +
64802 + if (current_uid() && new != NULL) {
64803 + // from doing get_cred on it when queueing this
64804 + put_cred(new);
64805 + return;
64806 + } else if (new == NULL)
64807 + return;
64808 +
64809 + ncred = prepare_creds();
64810 + if (!ncred)
64811 + goto die;
64812 + // uids
64813 + ncred->uid = new->uid;
64814 + ncred->euid = new->euid;
64815 + ncred->suid = new->suid;
64816 + ncred->fsuid = new->fsuid;
64817 + // gids
64818 + ncred->gid = new->gid;
64819 + ncred->egid = new->egid;
64820 + ncred->sgid = new->sgid;
64821 + ncred->fsgid = new->fsgid;
64822 + // groups
64823 + if (set_groups(ncred, new->group_info) < 0) {
64824 + abort_creds(ncred);
64825 + goto die;
64826 + }
64827 + // caps
64828 + ncred->securebits = new->securebits;
64829 + ncred->cap_inheritable = new->cap_inheritable;
64830 + ncred->cap_permitted = new->cap_permitted;
64831 + ncred->cap_effective = new->cap_effective;
64832 + ncred->cap_bset = new->cap_bset;
64833 +
64834 + if (set_user(ncred)) {
64835 + abort_creds(ncred);
64836 + goto die;
64837 + }
64838 +
64839 + // from doing get_cred on it when queueing this
64840 + put_cred(new);
64841 +
64842 + __commit_creds(ncred);
64843 + return;
64844 +die:
64845 + // from doing get_cred on it when queueing this
64846 + put_cred(new);
64847 + do_group_exit(SIGKILL);
64848 +}
64849 +#endif
64850 +
64851 +int commit_creds(struct cred *new)
64852 +{
64853 +#ifdef CONFIG_GRKERNSEC_SETXID
64854 + int ret;
64855 + int schedule_it = 0;
64856 + struct task_struct *t;
64857 +
64858 + /* we won't get called with tasklist_lock held for writing
64859 + and interrupts disabled as the cred struct in that case is
64860 + init_cred
64861 + */
64862 + if (grsec_enable_setxid && !current_is_single_threaded() &&
64863 + !current_uid() && new->uid) {
64864 + schedule_it = 1;
64865 + }
64866 + ret = __commit_creds(new);
64867 + if (schedule_it) {
64868 + rcu_read_lock();
64869 + read_lock(&tasklist_lock);
64870 + for (t = next_thread(current); t != current;
64871 + t = next_thread(t)) {
64872 + if (t->delayed_cred == NULL) {
64873 + t->delayed_cred = get_cred(new);
64874 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
64875 + set_tsk_need_resched(t);
64876 + }
64877 + }
64878 + read_unlock(&tasklist_lock);
64879 + rcu_read_unlock();
64880 + }
64881 + return ret;
64882 +#else
64883 + return __commit_creds(new);
64884 +#endif
64885 +}
64886 +
64887 EXPORT_SYMBOL(commit_creds);
64888
64889 /**
64890 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
64891 index 0557f24..1a00d9a 100644
64892 --- a/kernel/debug/debug_core.c
64893 +++ b/kernel/debug/debug_core.c
64894 @@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
64895 */
64896 static atomic_t masters_in_kgdb;
64897 static atomic_t slaves_in_kgdb;
64898 -static atomic_t kgdb_break_tasklet_var;
64899 +static atomic_unchecked_t kgdb_break_tasklet_var;
64900 atomic_t kgdb_setting_breakpoint;
64901
64902 struct task_struct *kgdb_usethread;
64903 @@ -132,7 +132,7 @@ int kgdb_single_step;
64904 static pid_t kgdb_sstep_pid;
64905
64906 /* to keep track of the CPU which is doing the single stepping*/
64907 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64908 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64909
64910 /*
64911 * If you are debugging a problem where roundup (the collection of
64912 @@ -540,7 +540,7 @@ return_normal:
64913 * kernel will only try for the value of sstep_tries before
64914 * giving up and continuing on.
64915 */
64916 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
64917 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
64918 (kgdb_info[cpu].task &&
64919 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
64920 atomic_set(&kgdb_active, -1);
64921 @@ -634,8 +634,8 @@ cpu_master_loop:
64922 }
64923
64924 kgdb_restore:
64925 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
64926 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
64927 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
64928 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
64929 if (kgdb_info[sstep_cpu].task)
64930 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
64931 else
64932 @@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
64933 static void kgdb_tasklet_bpt(unsigned long ing)
64934 {
64935 kgdb_breakpoint();
64936 - atomic_set(&kgdb_break_tasklet_var, 0);
64937 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
64938 }
64939
64940 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
64941
64942 void kgdb_schedule_breakpoint(void)
64943 {
64944 - if (atomic_read(&kgdb_break_tasklet_var) ||
64945 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
64946 atomic_read(&kgdb_active) != -1 ||
64947 atomic_read(&kgdb_setting_breakpoint))
64948 return;
64949 - atomic_inc(&kgdb_break_tasklet_var);
64950 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
64951 tasklet_schedule(&kgdb_tasklet_breakpoint);
64952 }
64953 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
64954 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
64955 index 67b847d..93834dd 100644
64956 --- a/kernel/debug/kdb/kdb_main.c
64957 +++ b/kernel/debug/kdb/kdb_main.c
64958 @@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
64959 list_for_each_entry(mod, kdb_modules, list) {
64960
64961 kdb_printf("%-20s%8u 0x%p ", mod->name,
64962 - mod->core_size, (void *)mod);
64963 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
64964 #ifdef CONFIG_MODULE_UNLOAD
64965 kdb_printf("%4ld ", module_refcount(mod));
64966 #endif
64967 @@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
64968 kdb_printf(" (Loading)");
64969 else
64970 kdb_printf(" (Live)");
64971 - kdb_printf(" 0x%p", mod->module_core);
64972 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64973
64974 #ifdef CONFIG_MODULE_UNLOAD
64975 {
64976 diff --git a/kernel/events/core.c b/kernel/events/core.c
64977 index fd126f8..70b755b 100644
64978 --- a/kernel/events/core.c
64979 +++ b/kernel/events/core.c
64980 @@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
64981 return 0;
64982 }
64983
64984 -static atomic64_t perf_event_id;
64985 +static atomic64_unchecked_t perf_event_id;
64986
64987 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
64988 enum event_type_t event_type);
64989 @@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info)
64990
64991 static inline u64 perf_event_count(struct perf_event *event)
64992 {
64993 - return local64_read(&event->count) + atomic64_read(&event->child_count);
64994 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
64995 }
64996
64997 static u64 perf_event_read(struct perf_event *event)
64998 @@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
64999 mutex_lock(&event->child_mutex);
65000 total += perf_event_read(event);
65001 *enabled += event->total_time_enabled +
65002 - atomic64_read(&event->child_total_time_enabled);
65003 + atomic64_read_unchecked(&event->child_total_time_enabled);
65004 *running += event->total_time_running +
65005 - atomic64_read(&event->child_total_time_running);
65006 + atomic64_read_unchecked(&event->child_total_time_running);
65007
65008 list_for_each_entry(child, &event->child_list, child_list) {
65009 total += perf_event_read(child);
65010 @@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct perf_event *event)
65011 userpg->offset -= local64_read(&event->hw.prev_count);
65012
65013 userpg->time_enabled = enabled +
65014 - atomic64_read(&event->child_total_time_enabled);
65015 + atomic64_read_unchecked(&event->child_total_time_enabled);
65016
65017 userpg->time_running = running +
65018 - atomic64_read(&event->child_total_time_running);
65019 + atomic64_read_unchecked(&event->child_total_time_running);
65020
65021 arch_perf_update_userpage(userpg, now);
65022
65023 @@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65024 values[n++] = perf_event_count(event);
65025 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65026 values[n++] = enabled +
65027 - atomic64_read(&event->child_total_time_enabled);
65028 + atomic64_read_unchecked(&event->child_total_time_enabled);
65029 }
65030 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65031 values[n++] = running +
65032 - atomic64_read(&event->child_total_time_running);
65033 + atomic64_read_unchecked(&event->child_total_time_running);
65034 }
65035 if (read_format & PERF_FORMAT_ID)
65036 values[n++] = primary_event_id(event);
65037 @@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65038 * need to add enough zero bytes after the string to handle
65039 * the 64bit alignment we do later.
65040 */
65041 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65042 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65043 if (!buf) {
65044 name = strncpy(tmp, "//enomem", sizeof(tmp));
65045 goto got_name;
65046 }
65047 - name = d_path(&file->f_path, buf, PATH_MAX);
65048 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65049 if (IS_ERR(name)) {
65050 name = strncpy(tmp, "//toolong", sizeof(tmp));
65051 goto got_name;
65052 @@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65053 event->parent = parent_event;
65054
65055 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65056 - event->id = atomic64_inc_return(&perf_event_id);
65057 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65058
65059 event->state = PERF_EVENT_STATE_INACTIVE;
65060
65061 @@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf_event *child_event,
65062 /*
65063 * Add back the child's count to the parent's count:
65064 */
65065 - atomic64_add(child_val, &parent_event->child_count);
65066 - atomic64_add(child_event->total_time_enabled,
65067 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65068 + atomic64_add_unchecked(child_event->total_time_enabled,
65069 &parent_event->child_total_time_enabled);
65070 - atomic64_add(child_event->total_time_running,
65071 + atomic64_add_unchecked(child_event->total_time_running,
65072 &parent_event->child_total_time_running);
65073
65074 /*
65075 diff --git a/kernel/exit.c b/kernel/exit.c
65076 index d8bd3b42..26bd8dc 100644
65077 --- a/kernel/exit.c
65078 +++ b/kernel/exit.c
65079 @@ -59,6 +59,10 @@
65080 #include <asm/pgtable.h>
65081 #include <asm/mmu_context.h>
65082
65083 +#ifdef CONFIG_GRKERNSEC
65084 +extern rwlock_t grsec_exec_file_lock;
65085 +#endif
65086 +
65087 static void exit_mm(struct task_struct * tsk);
65088
65089 static void __unhash_process(struct task_struct *p, bool group_dead)
65090 @@ -170,6 +174,10 @@ void release_task(struct task_struct * p)
65091 struct task_struct *leader;
65092 int zap_leader;
65093 repeat:
65094 +#ifdef CONFIG_NET
65095 + gr_del_task_from_ip_table(p);
65096 +#endif
65097 +
65098 /* don't need to get the RCU readlock here - the process is dead and
65099 * can't be modifying its own credentials. But shut RCU-lockdep up */
65100 rcu_read_lock();
65101 @@ -382,7 +390,7 @@ int allow_signal(int sig)
65102 * know it'll be handled, so that they don't get converted to
65103 * SIGKILL or just silently dropped.
65104 */
65105 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65106 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65107 recalc_sigpending();
65108 spin_unlock_irq(&current->sighand->siglock);
65109 return 0;
65110 @@ -418,6 +426,17 @@ void daemonize(const char *name, ...)
65111 vsnprintf(current->comm, sizeof(current->comm), name, args);
65112 va_end(args);
65113
65114 +#ifdef CONFIG_GRKERNSEC
65115 + write_lock(&grsec_exec_file_lock);
65116 + if (current->exec_file) {
65117 + fput(current->exec_file);
65118 + current->exec_file = NULL;
65119 + }
65120 + write_unlock(&grsec_exec_file_lock);
65121 +#endif
65122 +
65123 + gr_set_kernel_label(current);
65124 +
65125 /*
65126 * If we were started as result of loading a module, close all of the
65127 * user space pages. We don't need them, and if we didn't close them
65128 @@ -900,6 +919,8 @@ void do_exit(long code)
65129 struct task_struct *tsk = current;
65130 int group_dead;
65131
65132 + set_fs(USER_DS);
65133 +
65134 profile_task_exit(tsk);
65135
65136 WARN_ON(blk_needs_flush_plug(tsk));
65137 @@ -916,7 +937,6 @@ void do_exit(long code)
65138 * mm_release()->clear_child_tid() from writing to a user-controlled
65139 * kernel address.
65140 */
65141 - set_fs(USER_DS);
65142
65143 ptrace_event(PTRACE_EVENT_EXIT, code);
65144
65145 @@ -977,6 +997,9 @@ void do_exit(long code)
65146 tsk->exit_code = code;
65147 taskstats_exit(tsk, group_dead);
65148
65149 + gr_acl_handle_psacct(tsk, code);
65150 + gr_acl_handle_exit();
65151 +
65152 exit_mm(tsk);
65153
65154 if (group_dead)
65155 @@ -1093,7 +1116,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65156 * Take down every thread in the group. This is called by fatal signals
65157 * as well as by sys_exit_group (below).
65158 */
65159 -void
65160 +__noreturn void
65161 do_group_exit(int exit_code)
65162 {
65163 struct signal_struct *sig = current->signal;
65164 diff --git a/kernel/fork.c b/kernel/fork.c
65165 index 687a15d..efb4692 100644
65166 --- a/kernel/fork.c
65167 +++ b/kernel/fork.c
65168 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65169 *stackend = STACK_END_MAGIC; /* for overflow detection */
65170
65171 #ifdef CONFIG_CC_STACKPROTECTOR
65172 - tsk->stack_canary = get_random_int();
65173 + tsk->stack_canary = pax_get_random_long();
65174 #endif
65175
65176 /*
65177 @@ -310,13 +310,78 @@ out:
65178 }
65179
65180 #ifdef CONFIG_MMU
65181 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
65182 +{
65183 + struct vm_area_struct *tmp;
65184 + unsigned long charge;
65185 + struct mempolicy *pol;
65186 + struct file *file;
65187 +
65188 + charge = 0;
65189 + if (mpnt->vm_flags & VM_ACCOUNT) {
65190 + unsigned long len;
65191 + len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65192 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65193 + goto fail_nomem;
65194 + charge = len;
65195 + }
65196 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65197 + if (!tmp)
65198 + goto fail_nomem;
65199 + *tmp = *mpnt;
65200 + tmp->vm_mm = mm;
65201 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
65202 + pol = mpol_dup(vma_policy(mpnt));
65203 + if (IS_ERR(pol))
65204 + goto fail_nomem_policy;
65205 + vma_set_policy(tmp, pol);
65206 + if (anon_vma_fork(tmp, mpnt))
65207 + goto fail_nomem_anon_vma_fork;
65208 + tmp->vm_flags &= ~VM_LOCKED;
65209 + tmp->vm_next = tmp->vm_prev = NULL;
65210 + tmp->vm_mirror = NULL;
65211 + file = tmp->vm_file;
65212 + if (file) {
65213 + struct inode *inode = file->f_path.dentry->d_inode;
65214 + struct address_space *mapping = file->f_mapping;
65215 +
65216 + get_file(file);
65217 + if (tmp->vm_flags & VM_DENYWRITE)
65218 + atomic_dec(&inode->i_writecount);
65219 + mutex_lock(&mapping->i_mmap_mutex);
65220 + if (tmp->vm_flags & VM_SHARED)
65221 + mapping->i_mmap_writable++;
65222 + flush_dcache_mmap_lock(mapping);
65223 + /* insert tmp into the share list, just after mpnt */
65224 + vma_prio_tree_add(tmp, mpnt);
65225 + flush_dcache_mmap_unlock(mapping);
65226 + mutex_unlock(&mapping->i_mmap_mutex);
65227 + }
65228 +
65229 + /*
65230 + * Clear hugetlb-related page reserves for children. This only
65231 + * affects MAP_PRIVATE mappings. Faults generated by the child
65232 + * are not guaranteed to succeed, even if read-only
65233 + */
65234 + if (is_vm_hugetlb_page(tmp))
65235 + reset_vma_resv_huge_pages(tmp);
65236 +
65237 + return tmp;
65238 +
65239 +fail_nomem_anon_vma_fork:
65240 + mpol_put(pol);
65241 +fail_nomem_policy:
65242 + kmem_cache_free(vm_area_cachep, tmp);
65243 +fail_nomem:
65244 + vm_unacct_memory(charge);
65245 + return NULL;
65246 +}
65247 +
65248 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65249 {
65250 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65251 struct rb_node **rb_link, *rb_parent;
65252 int retval;
65253 - unsigned long charge;
65254 - struct mempolicy *pol;
65255
65256 down_write(&oldmm->mmap_sem);
65257 flush_cache_dup_mm(oldmm);
65258 @@ -328,8 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65259 mm->locked_vm = 0;
65260 mm->mmap = NULL;
65261 mm->mmap_cache = NULL;
65262 - mm->free_area_cache = oldmm->mmap_base;
65263 - mm->cached_hole_size = ~0UL;
65264 + mm->free_area_cache = oldmm->free_area_cache;
65265 + mm->cached_hole_size = oldmm->cached_hole_size;
65266 mm->map_count = 0;
65267 cpumask_clear(mm_cpumask(mm));
65268 mm->mm_rb = RB_ROOT;
65269 @@ -345,8 +410,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65270
65271 prev = NULL;
65272 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65273 - struct file *file;
65274 -
65275 if (mpnt->vm_flags & VM_DONTCOPY) {
65276 long pages = vma_pages(mpnt);
65277 mm->total_vm -= pages;
65278 @@ -354,53 +417,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65279 -pages);
65280 continue;
65281 }
65282 - charge = 0;
65283 - if (mpnt->vm_flags & VM_ACCOUNT) {
65284 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65285 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65286 - goto fail_nomem;
65287 - charge = len;
65288 + tmp = dup_vma(mm, oldmm, mpnt);
65289 + if (!tmp) {
65290 + retval = -ENOMEM;
65291 + goto out;
65292 }
65293 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65294 - if (!tmp)
65295 - goto fail_nomem;
65296 - *tmp = *mpnt;
65297 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
65298 - pol = mpol_dup(vma_policy(mpnt));
65299 - retval = PTR_ERR(pol);
65300 - if (IS_ERR(pol))
65301 - goto fail_nomem_policy;
65302 - vma_set_policy(tmp, pol);
65303 - tmp->vm_mm = mm;
65304 - if (anon_vma_fork(tmp, mpnt))
65305 - goto fail_nomem_anon_vma_fork;
65306 - tmp->vm_flags &= ~VM_LOCKED;
65307 - tmp->vm_next = tmp->vm_prev = NULL;
65308 - file = tmp->vm_file;
65309 - if (file) {
65310 - struct inode *inode = file->f_path.dentry->d_inode;
65311 - struct address_space *mapping = file->f_mapping;
65312 -
65313 - get_file(file);
65314 - if (tmp->vm_flags & VM_DENYWRITE)
65315 - atomic_dec(&inode->i_writecount);
65316 - mutex_lock(&mapping->i_mmap_mutex);
65317 - if (tmp->vm_flags & VM_SHARED)
65318 - mapping->i_mmap_writable++;
65319 - flush_dcache_mmap_lock(mapping);
65320 - /* insert tmp into the share list, just after mpnt */
65321 - vma_prio_tree_add(tmp, mpnt);
65322 - flush_dcache_mmap_unlock(mapping);
65323 - mutex_unlock(&mapping->i_mmap_mutex);
65324 - }
65325 -
65326 - /*
65327 - * Clear hugetlb-related page reserves for children. This only
65328 - * affects MAP_PRIVATE mappings. Faults generated by the child
65329 - * are not guaranteed to succeed, even if read-only
65330 - */
65331 - if (is_vm_hugetlb_page(tmp))
65332 - reset_vma_resv_huge_pages(tmp);
65333
65334 /*
65335 * Link in the new vma and copy the page table entries.
65336 @@ -423,6 +444,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65337 if (retval)
65338 goto out;
65339 }
65340 +
65341 +#ifdef CONFIG_PAX_SEGMEXEC
65342 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65343 + struct vm_area_struct *mpnt_m;
65344 +
65345 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65346 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65347 +
65348 + if (!mpnt->vm_mirror)
65349 + continue;
65350 +
65351 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65352 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65353 + mpnt->vm_mirror = mpnt_m;
65354 + } else {
65355 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65356 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65357 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65358 + mpnt->vm_mirror->vm_mirror = mpnt;
65359 + }
65360 + }
65361 + BUG_ON(mpnt_m);
65362 + }
65363 +#endif
65364 +
65365 /* a new mm has just been created */
65366 arch_dup_mmap(oldmm, mm);
65367 retval = 0;
65368 @@ -431,14 +477,6 @@ out:
65369 flush_tlb_mm(oldmm);
65370 up_write(&oldmm->mmap_sem);
65371 return retval;
65372 -fail_nomem_anon_vma_fork:
65373 - mpol_put(pol);
65374 -fail_nomem_policy:
65375 - kmem_cache_free(vm_area_cachep, tmp);
65376 -fail_nomem:
65377 - retval = -ENOMEM;
65378 - vm_unacct_memory(charge);
65379 - goto out;
65380 }
65381
65382 static inline int mm_alloc_pgd(struct mm_struct *mm)
65383 @@ -675,8 +713,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65384 return ERR_PTR(err);
65385
65386 mm = get_task_mm(task);
65387 - if (mm && mm != current->mm &&
65388 - !ptrace_may_access(task, mode)) {
65389 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65390 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65391 mmput(mm);
65392 mm = ERR_PTR(-EACCES);
65393 }
65394 @@ -898,13 +936,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65395 spin_unlock(&fs->lock);
65396 return -EAGAIN;
65397 }
65398 - fs->users++;
65399 + atomic_inc(&fs->users);
65400 spin_unlock(&fs->lock);
65401 return 0;
65402 }
65403 tsk->fs = copy_fs_struct(fs);
65404 if (!tsk->fs)
65405 return -ENOMEM;
65406 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65407 return 0;
65408 }
65409
65410 @@ -1171,6 +1210,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65411 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65412 #endif
65413 retval = -EAGAIN;
65414 +
65415 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65416 +
65417 if (atomic_read(&p->real_cred->user->processes) >=
65418 task_rlimit(p, RLIMIT_NPROC)) {
65419 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65420 @@ -1327,6 +1369,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65421 if (clone_flags & CLONE_THREAD)
65422 p->tgid = current->tgid;
65423
65424 + gr_copy_label(p);
65425 +
65426 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65427 /*
65428 * Clear TID on mm_release()?
65429 @@ -1501,6 +1545,8 @@ bad_fork_cleanup_count:
65430 bad_fork_free:
65431 free_task(p);
65432 fork_out:
65433 + gr_log_forkfail(retval);
65434 +
65435 return ERR_PTR(retval);
65436 }
65437
65438 @@ -1601,6 +1647,8 @@ long do_fork(unsigned long clone_flags,
65439 if (clone_flags & CLONE_PARENT_SETTID)
65440 put_user(nr, parent_tidptr);
65441
65442 + gr_handle_brute_check();
65443 +
65444 if (clone_flags & CLONE_VFORK) {
65445 p->vfork_done = &vfork;
65446 init_completion(&vfork);
65447 @@ -1699,7 +1747,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65448 return 0;
65449
65450 /* don't need lock here; in the worst case we'll do useless copy */
65451 - if (fs->users == 1)
65452 + if (atomic_read(&fs->users) == 1)
65453 return 0;
65454
65455 *new_fsp = copy_fs_struct(fs);
65456 @@ -1788,7 +1836,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65457 fs = current->fs;
65458 spin_lock(&fs->lock);
65459 current->fs = new_fs;
65460 - if (--fs->users)
65461 + gr_set_chroot_entries(current, &current->fs->root);
65462 + if (atomic_dec_return(&fs->users))
65463 new_fs = NULL;
65464 else
65465 new_fs = fs;
65466 diff --git a/kernel/futex.c b/kernel/futex.c
65467 index e2b0fb9..db818ac 100644
65468 --- a/kernel/futex.c
65469 +++ b/kernel/futex.c
65470 @@ -54,6 +54,7 @@
65471 #include <linux/mount.h>
65472 #include <linux/pagemap.h>
65473 #include <linux/syscalls.h>
65474 +#include <linux/ptrace.h>
65475 #include <linux/signal.h>
65476 #include <linux/export.h>
65477 #include <linux/magic.h>
65478 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65479 struct page *page, *page_head;
65480 int err, ro = 0;
65481
65482 +#ifdef CONFIG_PAX_SEGMEXEC
65483 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65484 + return -EFAULT;
65485 +#endif
65486 +
65487 /*
65488 * The futex address must be "naturally" aligned.
65489 */
65490 @@ -2711,6 +2717,7 @@ static int __init futex_init(void)
65491 {
65492 u32 curval;
65493 int i;
65494 + mm_segment_t oldfs;
65495
65496 /*
65497 * This will fail and we want it. Some arch implementations do
65498 @@ -2722,8 +2729,11 @@ static int __init futex_init(void)
65499 * implementation, the non-functional ones will return
65500 * -ENOSYS.
65501 */
65502 + oldfs = get_fs();
65503 + set_fs(USER_DS);
65504 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65505 futex_cmpxchg_enabled = 1;
65506 + set_fs(oldfs);
65507
65508 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65509 plist_head_init(&futex_queues[i].chain);
65510 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65511 index 9b22d03..6295b62 100644
65512 --- a/kernel/gcov/base.c
65513 +++ b/kernel/gcov/base.c
65514 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
65515 }
65516
65517 #ifdef CONFIG_MODULES
65518 -static inline int within(void *addr, void *start, unsigned long size)
65519 -{
65520 - return ((addr >= start) && (addr < start + size));
65521 -}
65522 -
65523 /* Update list and generate events when modules are unloaded. */
65524 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65525 void *data)
65526 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65527 prev = NULL;
65528 /* Remove entries located in module from linked list. */
65529 for (info = gcov_info_head; info; info = info->next) {
65530 - if (within(info, mod->module_core, mod->core_size)) {
65531 + if (within_module_core_rw((unsigned long)info, mod)) {
65532 if (prev)
65533 prev->next = info->next;
65534 else
65535 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65536 index ae34bf5..4e2f3d0 100644
65537 --- a/kernel/hrtimer.c
65538 +++ b/kernel/hrtimer.c
65539 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65540 local_irq_restore(flags);
65541 }
65542
65543 -static void run_hrtimer_softirq(struct softirq_action *h)
65544 +static void run_hrtimer_softirq(void)
65545 {
65546 hrtimer_peek_ahead_timers();
65547 }
65548 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65549 index 4304919..bbc53fa 100644
65550 --- a/kernel/jump_label.c
65551 +++ b/kernel/jump_label.c
65552 @@ -50,7 +50,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65553
65554 size = (((unsigned long)stop - (unsigned long)start)
65555 / sizeof(struct jump_entry));
65556 + pax_open_kernel();
65557 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65558 + pax_close_kernel();
65559 }
65560
65561 static void jump_label_update(struct static_key *key, int enable);
65562 @@ -356,10 +358,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65563 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65564 struct jump_entry *iter;
65565
65566 + pax_open_kernel();
65567 for (iter = iter_start; iter < iter_stop; iter++) {
65568 if (within_module_init(iter->code, mod))
65569 iter->code = 0;
65570 }
65571 + pax_close_kernel();
65572 }
65573
65574 static int
65575 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65576 index 079f1d3..a407562 100644
65577 --- a/kernel/kallsyms.c
65578 +++ b/kernel/kallsyms.c
65579 @@ -11,6 +11,9 @@
65580 * Changed the compression method from stem compression to "table lookup"
65581 * compression (see scripts/kallsyms.c for a more complete description)
65582 */
65583 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65584 +#define __INCLUDED_BY_HIDESYM 1
65585 +#endif
65586 #include <linux/kallsyms.h>
65587 #include <linux/module.h>
65588 #include <linux/init.h>
65589 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65590
65591 static inline int is_kernel_inittext(unsigned long addr)
65592 {
65593 + if (system_state != SYSTEM_BOOTING)
65594 + return 0;
65595 +
65596 if (addr >= (unsigned long)_sinittext
65597 && addr <= (unsigned long)_einittext)
65598 return 1;
65599 return 0;
65600 }
65601
65602 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65603 +#ifdef CONFIG_MODULES
65604 +static inline int is_module_text(unsigned long addr)
65605 +{
65606 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65607 + return 1;
65608 +
65609 + addr = ktla_ktva(addr);
65610 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65611 +}
65612 +#else
65613 +static inline int is_module_text(unsigned long addr)
65614 +{
65615 + return 0;
65616 +}
65617 +#endif
65618 +#endif
65619 +
65620 static inline int is_kernel_text(unsigned long addr)
65621 {
65622 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65623 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65624
65625 static inline int is_kernel(unsigned long addr)
65626 {
65627 +
65628 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65629 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
65630 + return 1;
65631 +
65632 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65633 +#else
65634 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65635 +#endif
65636 +
65637 return 1;
65638 return in_gate_area_no_mm(addr);
65639 }
65640
65641 static int is_ksym_addr(unsigned long addr)
65642 {
65643 +
65644 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65645 + if (is_module_text(addr))
65646 + return 0;
65647 +#endif
65648 +
65649 if (all_var)
65650 return is_kernel(addr);
65651
65652 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65653
65654 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65655 {
65656 - iter->name[0] = '\0';
65657 iter->nameoff = get_symbol_offset(new_pos);
65658 iter->pos = new_pos;
65659 }
65660 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65661 {
65662 struct kallsym_iter *iter = m->private;
65663
65664 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65665 + if (current_uid())
65666 + return 0;
65667 +#endif
65668 +
65669 /* Some debugging symbols have no name. Ignore them. */
65670 if (!iter->name[0])
65671 return 0;
65672 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65673 struct kallsym_iter *iter;
65674 int ret;
65675
65676 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65677 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65678 if (!iter)
65679 return -ENOMEM;
65680 reset_iter(iter, 0);
65681 diff --git a/kernel/kexec.c b/kernel/kexec.c
65682 index 4e2e472..cd0c7ae 100644
65683 --- a/kernel/kexec.c
65684 +++ b/kernel/kexec.c
65685 @@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65686 unsigned long flags)
65687 {
65688 struct compat_kexec_segment in;
65689 - struct kexec_segment out, __user *ksegments;
65690 + struct kexec_segment out;
65691 + struct kexec_segment __user *ksegments;
65692 unsigned long i, result;
65693
65694 /* Don't allow clients that don't understand the native
65695 diff --git a/kernel/kmod.c b/kernel/kmod.c
65696 index 05698a7..a4c1e3a 100644
65697 --- a/kernel/kmod.c
65698 +++ b/kernel/kmod.c
65699 @@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
65700 kfree(info->argv);
65701 }
65702
65703 -static int call_modprobe(char *module_name, int wait)
65704 +static int call_modprobe(char *module_name, char *module_param, int wait)
65705 {
65706 static char *envp[] = {
65707 "HOME=/",
65708 @@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
65709 NULL
65710 };
65711
65712 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
65713 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
65714 if (!argv)
65715 goto out;
65716
65717 @@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
65718 argv[1] = "-q";
65719 argv[2] = "--";
65720 argv[3] = module_name; /* check free_modprobe_argv() */
65721 - argv[4] = NULL;
65722 + argv[4] = module_param;
65723 + argv[5] = NULL;
65724
65725 return call_usermodehelper_fns(modprobe_path, argv, envp,
65726 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
65727 @@ -112,9 +113,8 @@ out:
65728 * If module auto-loading support is disabled then this function
65729 * becomes a no-operation.
65730 */
65731 -int __request_module(bool wait, const char *fmt, ...)
65732 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65733 {
65734 - va_list args;
65735 char module_name[MODULE_NAME_LEN];
65736 unsigned int max_modprobes;
65737 int ret;
65738 @@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
65739 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65740 static int kmod_loop_msg;
65741
65742 - va_start(args, fmt);
65743 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65744 - va_end(args);
65745 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65746 if (ret >= MODULE_NAME_LEN)
65747 return -ENAMETOOLONG;
65748
65749 @@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
65750 if (ret)
65751 return ret;
65752
65753 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65754 + if (!current_uid()) {
65755 + /* hack to workaround consolekit/udisks stupidity */
65756 + read_lock(&tasklist_lock);
65757 + if (!strcmp(current->comm, "mount") &&
65758 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65759 + read_unlock(&tasklist_lock);
65760 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65761 + return -EPERM;
65762 + }
65763 + read_unlock(&tasklist_lock);
65764 + }
65765 +#endif
65766 +
65767 /* If modprobe needs a service that is in a module, we get a recursive
65768 * loop. Limit the number of running kmod threads to max_threads/2 or
65769 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65770 @@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
65771
65772 trace_module_request(module_name, wait, _RET_IP_);
65773
65774 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65775 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
65776
65777 atomic_dec(&kmod_concurrent);
65778 return ret;
65779 }
65780 +
65781 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65782 +{
65783 + va_list args;
65784 + int ret;
65785 +
65786 + va_start(args, fmt);
65787 + ret = ____request_module(wait, module_param, fmt, args);
65788 + va_end(args);
65789 +
65790 + return ret;
65791 +}
65792 +
65793 +int __request_module(bool wait, const char *fmt, ...)
65794 +{
65795 + va_list args;
65796 + int ret;
65797 +
65798 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65799 + if (current_uid()) {
65800 + char module_param[MODULE_NAME_LEN];
65801 +
65802 + memset(module_param, 0, sizeof(module_param));
65803 +
65804 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
65805 +
65806 + va_start(args, fmt);
65807 + ret = ____request_module(wait, module_param, fmt, args);
65808 + va_end(args);
65809 +
65810 + return ret;
65811 + }
65812 +#endif
65813 +
65814 + va_start(args, fmt);
65815 + ret = ____request_module(wait, NULL, fmt, args);
65816 + va_end(args);
65817 +
65818 + return ret;
65819 +}
65820 +
65821 EXPORT_SYMBOL(__request_module);
65822 #endif /* CONFIG_MODULES */
65823
65824 @@ -267,7 +320,7 @@ static int wait_for_helper(void *data)
65825 *
65826 * Thus the __user pointer cast is valid here.
65827 */
65828 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
65829 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
65830
65831 /*
65832 * If ret is 0, either ____call_usermodehelper failed and the
65833 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
65834 index c62b854..cb67968 100644
65835 --- a/kernel/kprobes.c
65836 +++ b/kernel/kprobes.c
65837 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
65838 * kernel image and loaded module images reside. This is required
65839 * so x86_64 can correctly handle the %rip-relative fixups.
65840 */
65841 - kip->insns = module_alloc(PAGE_SIZE);
65842 + kip->insns = module_alloc_exec(PAGE_SIZE);
65843 if (!kip->insns) {
65844 kfree(kip);
65845 return NULL;
65846 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
65847 */
65848 if (!list_is_singular(&kip->list)) {
65849 list_del(&kip->list);
65850 - module_free(NULL, kip->insns);
65851 + module_free_exec(NULL, kip->insns);
65852 kfree(kip);
65853 }
65854 return 1;
65855 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
65856 {
65857 int i, err = 0;
65858 unsigned long offset = 0, size = 0;
65859 - char *modname, namebuf[128];
65860 + char *modname, namebuf[KSYM_NAME_LEN];
65861 const char *symbol_name;
65862 void *addr;
65863 struct kprobe_blackpoint *kb;
65864 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
65865 const char *sym = NULL;
65866 unsigned int i = *(loff_t *) v;
65867 unsigned long offset = 0;
65868 - char *modname, namebuf[128];
65869 + char *modname, namebuf[KSYM_NAME_LEN];
65870
65871 head = &kprobe_table[i];
65872 preempt_disable();
65873 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
65874 index 4e316e1..5501eef 100644
65875 --- a/kernel/ksysfs.c
65876 +++ b/kernel/ksysfs.c
65877 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
65878 {
65879 if (count+1 > UEVENT_HELPER_PATH_LEN)
65880 return -ENOENT;
65881 + if (!capable(CAP_SYS_ADMIN))
65882 + return -EPERM;
65883 memcpy(uevent_helper, buf, count);
65884 uevent_helper[count] = '\0';
65885 if (count && uevent_helper[count-1] == '\n')
65886 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
65887 index ea9ee45..67ebc8f 100644
65888 --- a/kernel/lockdep.c
65889 +++ b/kernel/lockdep.c
65890 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
65891 end = (unsigned long) &_end,
65892 addr = (unsigned long) obj;
65893
65894 +#ifdef CONFIG_PAX_KERNEXEC
65895 + start = ktla_ktva(start);
65896 +#endif
65897 +
65898 /*
65899 * static variable?
65900 */
65901 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
65902 if (!static_obj(lock->key)) {
65903 debug_locks_off();
65904 printk("INFO: trying to register non-static key.\n");
65905 + printk("lock:%pS key:%pS.\n", lock, lock->key);
65906 printk("the code is fine but needs lockdep annotation.\n");
65907 printk("turning off the locking correctness validator.\n");
65908 dump_stack();
65909 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
65910 if (!class)
65911 return 0;
65912 }
65913 - atomic_inc((atomic_t *)&class->ops);
65914 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
65915 if (very_verbose(class)) {
65916 printk("\nacquire class [%p] %s", class->key, class->name);
65917 if (class->name_version > 1)
65918 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
65919 index 91c32a0..b2c71c5 100644
65920 --- a/kernel/lockdep_proc.c
65921 +++ b/kernel/lockdep_proc.c
65922 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
65923
65924 static void print_name(struct seq_file *m, struct lock_class *class)
65925 {
65926 - char str[128];
65927 + char str[KSYM_NAME_LEN];
65928 const char *name = class->name;
65929
65930 if (!name) {
65931 diff --git a/kernel/module.c b/kernel/module.c
65932 index 78ac6ec..e87db0e 100644
65933 --- a/kernel/module.c
65934 +++ b/kernel/module.c
65935 @@ -58,6 +58,7 @@
65936 #include <linux/jump_label.h>
65937 #include <linux/pfn.h>
65938 #include <linux/bsearch.h>
65939 +#include <linux/grsecurity.h>
65940
65941 #define CREATE_TRACE_POINTS
65942 #include <trace/events/module.h>
65943 @@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
65944
65945 /* Bounds of module allocation, for speeding __module_address.
65946 * Protected by module_mutex. */
65947 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
65948 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
65949 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
65950
65951 int register_module_notifier(struct notifier_block * nb)
65952 {
65953 @@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65954 return true;
65955
65956 list_for_each_entry_rcu(mod, &modules, list) {
65957 - struct symsearch arr[] = {
65958 + struct symsearch modarr[] = {
65959 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
65960 NOT_GPL_ONLY, false },
65961 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
65962 @@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65963 #endif
65964 };
65965
65966 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
65967 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
65968 return true;
65969 }
65970 return false;
65971 @@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
65972 static int percpu_modalloc(struct module *mod,
65973 unsigned long size, unsigned long align)
65974 {
65975 - if (align > PAGE_SIZE) {
65976 + if (align-1 >= PAGE_SIZE) {
65977 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
65978 mod->name, align, PAGE_SIZE);
65979 align = PAGE_SIZE;
65980 @@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
65981 static ssize_t show_coresize(struct module_attribute *mattr,
65982 struct module_kobject *mk, char *buffer)
65983 {
65984 - return sprintf(buffer, "%u\n", mk->mod->core_size);
65985 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
65986 }
65987
65988 static struct module_attribute modinfo_coresize =
65989 @@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
65990 static ssize_t show_initsize(struct module_attribute *mattr,
65991 struct module_kobject *mk, char *buffer)
65992 {
65993 - return sprintf(buffer, "%u\n", mk->mod->init_size);
65994 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
65995 }
65996
65997 static struct module_attribute modinfo_initsize =
65998 @@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
65999 */
66000 #ifdef CONFIG_SYSFS
66001
66002 -#ifdef CONFIG_KALLSYMS
66003 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66004 static inline bool sect_empty(const Elf_Shdr *sect)
66005 {
66006 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66007 @@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
66008
66009 static void unset_module_core_ro_nx(struct module *mod)
66010 {
66011 - set_page_attributes(mod->module_core + mod->core_text_size,
66012 - mod->module_core + mod->core_size,
66013 + set_page_attributes(mod->module_core_rw,
66014 + mod->module_core_rw + mod->core_size_rw,
66015 set_memory_x);
66016 - set_page_attributes(mod->module_core,
66017 - mod->module_core + mod->core_ro_size,
66018 + set_page_attributes(mod->module_core_rx,
66019 + mod->module_core_rx + mod->core_size_rx,
66020 set_memory_rw);
66021 }
66022
66023 static void unset_module_init_ro_nx(struct module *mod)
66024 {
66025 - set_page_attributes(mod->module_init + mod->init_text_size,
66026 - mod->module_init + mod->init_size,
66027 + set_page_attributes(mod->module_init_rw,
66028 + mod->module_init_rw + mod->init_size_rw,
66029 set_memory_x);
66030 - set_page_attributes(mod->module_init,
66031 - mod->module_init + mod->init_ro_size,
66032 + set_page_attributes(mod->module_init_rx,
66033 + mod->module_init_rx + mod->init_size_rx,
66034 set_memory_rw);
66035 }
66036
66037 @@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
66038
66039 mutex_lock(&module_mutex);
66040 list_for_each_entry_rcu(mod, &modules, list) {
66041 - if ((mod->module_core) && (mod->core_text_size)) {
66042 - set_page_attributes(mod->module_core,
66043 - mod->module_core + mod->core_text_size,
66044 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66045 + set_page_attributes(mod->module_core_rx,
66046 + mod->module_core_rx + mod->core_size_rx,
66047 set_memory_rw);
66048 }
66049 - if ((mod->module_init) && (mod->init_text_size)) {
66050 - set_page_attributes(mod->module_init,
66051 - mod->module_init + mod->init_text_size,
66052 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66053 + set_page_attributes(mod->module_init_rx,
66054 + mod->module_init_rx + mod->init_size_rx,
66055 set_memory_rw);
66056 }
66057 }
66058 @@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
66059
66060 mutex_lock(&module_mutex);
66061 list_for_each_entry_rcu(mod, &modules, list) {
66062 - if ((mod->module_core) && (mod->core_text_size)) {
66063 - set_page_attributes(mod->module_core,
66064 - mod->module_core + mod->core_text_size,
66065 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66066 + set_page_attributes(mod->module_core_rx,
66067 + mod->module_core_rx + mod->core_size_rx,
66068 set_memory_ro);
66069 }
66070 - if ((mod->module_init) && (mod->init_text_size)) {
66071 - set_page_attributes(mod->module_init,
66072 - mod->module_init + mod->init_text_size,
66073 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66074 + set_page_attributes(mod->module_init_rx,
66075 + mod->module_init_rx + mod->init_size_rx,
66076 set_memory_ro);
66077 }
66078 }
66079 @@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
66080
66081 /* This may be NULL, but that's OK */
66082 unset_module_init_ro_nx(mod);
66083 - module_free(mod, mod->module_init);
66084 + module_free(mod, mod->module_init_rw);
66085 + module_free_exec(mod, mod->module_init_rx);
66086 kfree(mod->args);
66087 percpu_modfree(mod);
66088
66089 /* Free lock-classes: */
66090 - lockdep_free_key_range(mod->module_core, mod->core_size);
66091 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66092 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66093
66094 /* Finally, free the core (containing the module structure) */
66095 unset_module_core_ro_nx(mod);
66096 - module_free(mod, mod->module_core);
66097 + module_free_exec(mod, mod->module_core_rx);
66098 + module_free(mod, mod->module_core_rw);
66099
66100 #ifdef CONFIG_MPU
66101 update_protections(current->mm);
66102 @@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66103 int ret = 0;
66104 const struct kernel_symbol *ksym;
66105
66106 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66107 + int is_fs_load = 0;
66108 + int register_filesystem_found = 0;
66109 + char *p;
66110 +
66111 + p = strstr(mod->args, "grsec_modharden_fs");
66112 + if (p) {
66113 + char *endptr = p + strlen("grsec_modharden_fs");
66114 + /* copy \0 as well */
66115 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66116 + is_fs_load = 1;
66117 + }
66118 +#endif
66119 +
66120 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66121 const char *name = info->strtab + sym[i].st_name;
66122
66123 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66124 + /* it's a real shame this will never get ripped and copied
66125 + upstream! ;(
66126 + */
66127 + if (is_fs_load && !strcmp(name, "register_filesystem"))
66128 + register_filesystem_found = 1;
66129 +#endif
66130 +
66131 switch (sym[i].st_shndx) {
66132 case SHN_COMMON:
66133 /* We compiled with -fno-common. These are not
66134 @@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66135 ksym = resolve_symbol_wait(mod, info, name);
66136 /* Ok if resolved. */
66137 if (ksym && !IS_ERR(ksym)) {
66138 + pax_open_kernel();
66139 sym[i].st_value = ksym->value;
66140 + pax_close_kernel();
66141 break;
66142 }
66143
66144 @@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66145 secbase = (unsigned long)mod_percpu(mod);
66146 else
66147 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66148 + pax_open_kernel();
66149 sym[i].st_value += secbase;
66150 + pax_close_kernel();
66151 break;
66152 }
66153 }
66154
66155 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66156 + if (is_fs_load && !register_filesystem_found) {
66157 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66158 + ret = -EPERM;
66159 + }
66160 +#endif
66161 +
66162 return ret;
66163 }
66164
66165 @@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66166 || s->sh_entsize != ~0UL
66167 || strstarts(sname, ".init"))
66168 continue;
66169 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66170 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66171 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66172 + else
66173 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66174 pr_debug("\t%s\n", sname);
66175 }
66176 - switch (m) {
66177 - case 0: /* executable */
66178 - mod->core_size = debug_align(mod->core_size);
66179 - mod->core_text_size = mod->core_size;
66180 - break;
66181 - case 1: /* RO: text and ro-data */
66182 - mod->core_size = debug_align(mod->core_size);
66183 - mod->core_ro_size = mod->core_size;
66184 - break;
66185 - case 3: /* whole core */
66186 - mod->core_size = debug_align(mod->core_size);
66187 - break;
66188 - }
66189 }
66190
66191 pr_debug("Init section allocation order:\n");
66192 @@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66193 || s->sh_entsize != ~0UL
66194 || !strstarts(sname, ".init"))
66195 continue;
66196 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66197 - | INIT_OFFSET_MASK);
66198 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66199 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66200 + else
66201 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66202 + s->sh_entsize |= INIT_OFFSET_MASK;
66203 pr_debug("\t%s\n", sname);
66204 }
66205 - switch (m) {
66206 - case 0: /* executable */
66207 - mod->init_size = debug_align(mod->init_size);
66208 - mod->init_text_size = mod->init_size;
66209 - break;
66210 - case 1: /* RO: text and ro-data */
66211 - mod->init_size = debug_align(mod->init_size);
66212 - mod->init_ro_size = mod->init_size;
66213 - break;
66214 - case 3: /* whole init */
66215 - mod->init_size = debug_align(mod->init_size);
66216 - break;
66217 - }
66218 }
66219 }
66220
66221 @@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66222
66223 /* Put symbol section at end of init part of module. */
66224 symsect->sh_flags |= SHF_ALLOC;
66225 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66226 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66227 info->index.sym) | INIT_OFFSET_MASK;
66228 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
66229
66230 @@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66231 }
66232
66233 /* Append room for core symbols at end of core part. */
66234 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66235 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66236 - mod->core_size += strtab_size;
66237 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66238 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66239 + mod->core_size_rx += strtab_size;
66240
66241 /* Put string table section at end of init part of module. */
66242 strsect->sh_flags |= SHF_ALLOC;
66243 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66244 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66245 info->index.str) | INIT_OFFSET_MASK;
66246 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
66247 }
66248 @@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66249 /* Make sure we get permanent strtab: don't use info->strtab. */
66250 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66251
66252 + pax_open_kernel();
66253 +
66254 /* Set types up while we still have access to sections. */
66255 for (i = 0; i < mod->num_symtab; i++)
66256 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66257
66258 - mod->core_symtab = dst = mod->module_core + info->symoffs;
66259 - mod->core_strtab = s = mod->module_core + info->stroffs;
66260 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66261 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66262 src = mod->symtab;
66263 *dst = *src;
66264 *s++ = 0;
66265 @@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66266 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
66267 }
66268 mod->core_num_syms = ndst;
66269 +
66270 + pax_close_kernel();
66271 }
66272 #else
66273 static inline void layout_symtab(struct module *mod, struct load_info *info)
66274 @@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
66275 return size == 0 ? NULL : vmalloc_exec(size);
66276 }
66277
66278 -static void *module_alloc_update_bounds(unsigned long size)
66279 +static void *module_alloc_update_bounds_rw(unsigned long size)
66280 {
66281 void *ret = module_alloc(size);
66282
66283 if (ret) {
66284 mutex_lock(&module_mutex);
66285 /* Update module bounds. */
66286 - if ((unsigned long)ret < module_addr_min)
66287 - module_addr_min = (unsigned long)ret;
66288 - if ((unsigned long)ret + size > module_addr_max)
66289 - module_addr_max = (unsigned long)ret + size;
66290 + if ((unsigned long)ret < module_addr_min_rw)
66291 + module_addr_min_rw = (unsigned long)ret;
66292 + if ((unsigned long)ret + size > module_addr_max_rw)
66293 + module_addr_max_rw = (unsigned long)ret + size;
66294 + mutex_unlock(&module_mutex);
66295 + }
66296 + return ret;
66297 +}
66298 +
66299 +static void *module_alloc_update_bounds_rx(unsigned long size)
66300 +{
66301 + void *ret = module_alloc_exec(size);
66302 +
66303 + if (ret) {
66304 + mutex_lock(&module_mutex);
66305 + /* Update module bounds. */
66306 + if ((unsigned long)ret < module_addr_min_rx)
66307 + module_addr_min_rx = (unsigned long)ret;
66308 + if ((unsigned long)ret + size > module_addr_max_rx)
66309 + module_addr_max_rx = (unsigned long)ret + size;
66310 mutex_unlock(&module_mutex);
66311 }
66312 return ret;
66313 @@ -2543,8 +2581,14 @@ static struct module *setup_load_info(struct load_info *info)
66314 static int check_modinfo(struct module *mod, struct load_info *info)
66315 {
66316 const char *modmagic = get_modinfo(info, "vermagic");
66317 + const char *license = get_modinfo(info, "license");
66318 int err;
66319
66320 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66321 + if (!license || !license_is_gpl_compatible(license))
66322 + return -ENOEXEC;
66323 +#endif
66324 +
66325 /* This is allowed: modprobe --force will invalidate it. */
66326 if (!modmagic) {
66327 err = try_to_force_load(mod, "bad vermagic");
66328 @@ -2567,7 +2611,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66329 }
66330
66331 /* Set up license info based on the info section */
66332 - set_license(mod, get_modinfo(info, "license"));
66333 + set_license(mod, license);
66334
66335 return 0;
66336 }
66337 @@ -2661,7 +2705,7 @@ static int move_module(struct module *mod, struct load_info *info)
66338 void *ptr;
66339
66340 /* Do the allocs. */
66341 - ptr = module_alloc_update_bounds(mod->core_size);
66342 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66343 /*
66344 * The pointer to this block is stored in the module structure
66345 * which is inside the block. Just mark it as not being a
66346 @@ -2671,23 +2715,50 @@ static int move_module(struct module *mod, struct load_info *info)
66347 if (!ptr)
66348 return -ENOMEM;
66349
66350 - memset(ptr, 0, mod->core_size);
66351 - mod->module_core = ptr;
66352 + memset(ptr, 0, mod->core_size_rw);
66353 + mod->module_core_rw = ptr;
66354
66355 - ptr = module_alloc_update_bounds(mod->init_size);
66356 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66357 /*
66358 * The pointer to this block is stored in the module structure
66359 * which is inside the block. This block doesn't need to be
66360 * scanned as it contains data and code that will be freed
66361 * after the module is initialized.
66362 */
66363 - kmemleak_ignore(ptr);
66364 - if (!ptr && mod->init_size) {
66365 - module_free(mod, mod->module_core);
66366 + kmemleak_not_leak(ptr);
66367 + if (!ptr && mod->init_size_rw) {
66368 + module_free(mod, mod->module_core_rw);
66369 return -ENOMEM;
66370 }
66371 - memset(ptr, 0, mod->init_size);
66372 - mod->module_init = ptr;
66373 + memset(ptr, 0, mod->init_size_rw);
66374 + mod->module_init_rw = ptr;
66375 +
66376 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66377 + kmemleak_not_leak(ptr);
66378 + if (!ptr) {
66379 + module_free(mod, mod->module_init_rw);
66380 + module_free(mod, mod->module_core_rw);
66381 + return -ENOMEM;
66382 + }
66383 +
66384 + pax_open_kernel();
66385 + memset(ptr, 0, mod->core_size_rx);
66386 + pax_close_kernel();
66387 + mod->module_core_rx = ptr;
66388 +
66389 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66390 + kmemleak_not_leak(ptr);
66391 + if (!ptr && mod->init_size_rx) {
66392 + module_free_exec(mod, mod->module_core_rx);
66393 + module_free(mod, mod->module_init_rw);
66394 + module_free(mod, mod->module_core_rw);
66395 + return -ENOMEM;
66396 + }
66397 +
66398 + pax_open_kernel();
66399 + memset(ptr, 0, mod->init_size_rx);
66400 + pax_close_kernel();
66401 + mod->module_init_rx = ptr;
66402
66403 /* Transfer each section which specifies SHF_ALLOC */
66404 pr_debug("final section addresses:\n");
66405 @@ -2698,16 +2769,45 @@ static int move_module(struct module *mod, struct load_info *info)
66406 if (!(shdr->sh_flags & SHF_ALLOC))
66407 continue;
66408
66409 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
66410 - dest = mod->module_init
66411 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66412 - else
66413 - dest = mod->module_core + shdr->sh_entsize;
66414 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66415 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66416 + dest = mod->module_init_rw
66417 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66418 + else
66419 + dest = mod->module_init_rx
66420 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66421 + } else {
66422 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66423 + dest = mod->module_core_rw + shdr->sh_entsize;
66424 + else
66425 + dest = mod->module_core_rx + shdr->sh_entsize;
66426 + }
66427 +
66428 + if (shdr->sh_type != SHT_NOBITS) {
66429 +
66430 +#ifdef CONFIG_PAX_KERNEXEC
66431 +#ifdef CONFIG_X86_64
66432 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66433 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66434 +#endif
66435 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66436 + pax_open_kernel();
66437 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66438 + pax_close_kernel();
66439 + } else
66440 +#endif
66441
66442 - if (shdr->sh_type != SHT_NOBITS)
66443 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66444 + }
66445 /* Update sh_addr to point to copy in image. */
66446 - shdr->sh_addr = (unsigned long)dest;
66447 +
66448 +#ifdef CONFIG_PAX_KERNEXEC
66449 + if (shdr->sh_flags & SHF_EXECINSTR)
66450 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
66451 + else
66452 +#endif
66453 +
66454 + shdr->sh_addr = (unsigned long)dest;
66455 pr_debug("\t0x%lx %s\n",
66456 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
66457 }
66458 @@ -2758,12 +2858,12 @@ static void flush_module_icache(const struct module *mod)
66459 * Do it before processing of module parameters, so the module
66460 * can provide parameter accessor functions of its own.
66461 */
66462 - if (mod->module_init)
66463 - flush_icache_range((unsigned long)mod->module_init,
66464 - (unsigned long)mod->module_init
66465 - + mod->init_size);
66466 - flush_icache_range((unsigned long)mod->module_core,
66467 - (unsigned long)mod->module_core + mod->core_size);
66468 + if (mod->module_init_rx)
66469 + flush_icache_range((unsigned long)mod->module_init_rx,
66470 + (unsigned long)mod->module_init_rx
66471 + + mod->init_size_rx);
66472 + flush_icache_range((unsigned long)mod->module_core_rx,
66473 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
66474
66475 set_fs(old_fs);
66476 }
66477 @@ -2833,8 +2933,10 @@ out:
66478 static void module_deallocate(struct module *mod, struct load_info *info)
66479 {
66480 percpu_modfree(mod);
66481 - module_free(mod, mod->module_init);
66482 - module_free(mod, mod->module_core);
66483 + module_free_exec(mod, mod->module_init_rx);
66484 + module_free_exec(mod, mod->module_core_rx);
66485 + module_free(mod, mod->module_init_rw);
66486 + module_free(mod, mod->module_core_rw);
66487 }
66488
66489 int __weak module_finalize(const Elf_Ehdr *hdr,
66490 @@ -2898,9 +3000,38 @@ static struct module *load_module(void __user *umod,
66491 if (err)
66492 goto free_unload;
66493
66494 + /* Now copy in args */
66495 + mod->args = strndup_user(uargs, ~0UL >> 1);
66496 + if (IS_ERR(mod->args)) {
66497 + err = PTR_ERR(mod->args);
66498 + goto free_unload;
66499 + }
66500 +
66501 /* Set up MODINFO_ATTR fields */
66502 setup_modinfo(mod, &info);
66503
66504 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66505 + {
66506 + char *p, *p2;
66507 +
66508 + if (strstr(mod->args, "grsec_modharden_netdev")) {
66509 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66510 + err = -EPERM;
66511 + goto free_modinfo;
66512 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66513 + p += strlen("grsec_modharden_normal");
66514 + p2 = strstr(p, "_");
66515 + if (p2) {
66516 + *p2 = '\0';
66517 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66518 + *p2 = '_';
66519 + }
66520 + err = -EPERM;
66521 + goto free_modinfo;
66522 + }
66523 + }
66524 +#endif
66525 +
66526 /* Fix up syms, so that st_value is a pointer to location. */
66527 err = simplify_symbols(mod, &info);
66528 if (err < 0)
66529 @@ -2916,13 +3047,6 @@ static struct module *load_module(void __user *umod,
66530
66531 flush_module_icache(mod);
66532
66533 - /* Now copy in args */
66534 - mod->args = strndup_user(uargs, ~0UL >> 1);
66535 - if (IS_ERR(mod->args)) {
66536 - err = PTR_ERR(mod->args);
66537 - goto free_arch_cleanup;
66538 - }
66539 -
66540 /* Mark state as coming so strong_try_module_get() ignores us. */
66541 mod->state = MODULE_STATE_COMING;
66542
66543 @@ -2980,11 +3104,10 @@ static struct module *load_module(void __user *umod,
66544 unlock:
66545 mutex_unlock(&module_mutex);
66546 synchronize_sched();
66547 - kfree(mod->args);
66548 - free_arch_cleanup:
66549 module_arch_cleanup(mod);
66550 free_modinfo:
66551 free_modinfo(mod);
66552 + kfree(mod->args);
66553 free_unload:
66554 module_unload_free(mod);
66555 free_module:
66556 @@ -3025,16 +3148,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66557 MODULE_STATE_COMING, mod);
66558
66559 /* Set RO and NX regions for core */
66560 - set_section_ro_nx(mod->module_core,
66561 - mod->core_text_size,
66562 - mod->core_ro_size,
66563 - mod->core_size);
66564 + set_section_ro_nx(mod->module_core_rx,
66565 + mod->core_size_rx,
66566 + mod->core_size_rx,
66567 + mod->core_size_rx);
66568
66569 /* Set RO and NX regions for init */
66570 - set_section_ro_nx(mod->module_init,
66571 - mod->init_text_size,
66572 - mod->init_ro_size,
66573 - mod->init_size);
66574 + set_section_ro_nx(mod->module_init_rx,
66575 + mod->init_size_rx,
66576 + mod->init_size_rx,
66577 + mod->init_size_rx);
66578
66579 do_mod_ctors(mod);
66580 /* Start the module */
66581 @@ -3080,11 +3203,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66582 mod->strtab = mod->core_strtab;
66583 #endif
66584 unset_module_init_ro_nx(mod);
66585 - module_free(mod, mod->module_init);
66586 - mod->module_init = NULL;
66587 - mod->init_size = 0;
66588 - mod->init_ro_size = 0;
66589 - mod->init_text_size = 0;
66590 + module_free(mod, mod->module_init_rw);
66591 + module_free_exec(mod, mod->module_init_rx);
66592 + mod->module_init_rw = NULL;
66593 + mod->module_init_rx = NULL;
66594 + mod->init_size_rw = 0;
66595 + mod->init_size_rx = 0;
66596 mutex_unlock(&module_mutex);
66597
66598 return 0;
66599 @@ -3115,10 +3239,16 @@ static const char *get_ksymbol(struct module *mod,
66600 unsigned long nextval;
66601
66602 /* At worse, next value is at end of module */
66603 - if (within_module_init(addr, mod))
66604 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
66605 + if (within_module_init_rx(addr, mod))
66606 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66607 + else if (within_module_init_rw(addr, mod))
66608 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66609 + else if (within_module_core_rx(addr, mod))
66610 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66611 + else if (within_module_core_rw(addr, mod))
66612 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66613 else
66614 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
66615 + return NULL;
66616
66617 /* Scan for closest preceding symbol, and next symbol. (ELF
66618 starts real symbols at 1). */
66619 @@ -3353,7 +3483,7 @@ static int m_show(struct seq_file *m, void *p)
66620 char buf[8];
66621
66622 seq_printf(m, "%s %u",
66623 - mod->name, mod->init_size + mod->core_size);
66624 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66625 print_unload_info(m, mod);
66626
66627 /* Informative for users. */
66628 @@ -3362,7 +3492,7 @@ static int m_show(struct seq_file *m, void *p)
66629 mod->state == MODULE_STATE_COMING ? "Loading":
66630 "Live");
66631 /* Used by oprofile and other similar tools. */
66632 - seq_printf(m, " 0x%pK", mod->module_core);
66633 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66634
66635 /* Taints info */
66636 if (mod->taints)
66637 @@ -3398,7 +3528,17 @@ static const struct file_operations proc_modules_operations = {
66638
66639 static int __init proc_modules_init(void)
66640 {
66641 +#ifndef CONFIG_GRKERNSEC_HIDESYM
66642 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66643 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66644 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66645 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66646 +#else
66647 proc_create("modules", 0, NULL, &proc_modules_operations);
66648 +#endif
66649 +#else
66650 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66651 +#endif
66652 return 0;
66653 }
66654 module_init(proc_modules_init);
66655 @@ -3457,12 +3597,12 @@ struct module *__module_address(unsigned long addr)
66656 {
66657 struct module *mod;
66658
66659 - if (addr < module_addr_min || addr > module_addr_max)
66660 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66661 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
66662 return NULL;
66663
66664 list_for_each_entry_rcu(mod, &modules, list)
66665 - if (within_module_core(addr, mod)
66666 - || within_module_init(addr, mod))
66667 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
66668 return mod;
66669 return NULL;
66670 }
66671 @@ -3496,11 +3636,20 @@ bool is_module_text_address(unsigned long addr)
66672 */
66673 struct module *__module_text_address(unsigned long addr)
66674 {
66675 - struct module *mod = __module_address(addr);
66676 + struct module *mod;
66677 +
66678 +#ifdef CONFIG_X86_32
66679 + addr = ktla_ktva(addr);
66680 +#endif
66681 +
66682 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66683 + return NULL;
66684 +
66685 + mod = __module_address(addr);
66686 +
66687 if (mod) {
66688 /* Make sure it's within the text section. */
66689 - if (!within(addr, mod->module_init, mod->init_text_size)
66690 - && !within(addr, mod->module_core, mod->core_text_size))
66691 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66692 mod = NULL;
66693 }
66694 return mod;
66695 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66696 index 7e3443f..b2a1e6b 100644
66697 --- a/kernel/mutex-debug.c
66698 +++ b/kernel/mutex-debug.c
66699 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66700 }
66701
66702 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66703 - struct thread_info *ti)
66704 + struct task_struct *task)
66705 {
66706 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66707
66708 /* Mark the current thread as blocked on the lock: */
66709 - ti->task->blocked_on = waiter;
66710 + task->blocked_on = waiter;
66711 }
66712
66713 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66714 - struct thread_info *ti)
66715 + struct task_struct *task)
66716 {
66717 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66718 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66719 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66720 - ti->task->blocked_on = NULL;
66721 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
66722 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66723 + task->blocked_on = NULL;
66724
66725 list_del_init(&waiter->list);
66726 waiter->task = NULL;
66727 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66728 index 0799fd3..d06ae3b 100644
66729 --- a/kernel/mutex-debug.h
66730 +++ b/kernel/mutex-debug.h
66731 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66732 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66733 extern void debug_mutex_add_waiter(struct mutex *lock,
66734 struct mutex_waiter *waiter,
66735 - struct thread_info *ti);
66736 + struct task_struct *task);
66737 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66738 - struct thread_info *ti);
66739 + struct task_struct *task);
66740 extern void debug_mutex_unlock(struct mutex *lock);
66741 extern void debug_mutex_init(struct mutex *lock, const char *name,
66742 struct lock_class_key *key);
66743 diff --git a/kernel/mutex.c b/kernel/mutex.c
66744 index a307cc9..27fd2e9 100644
66745 --- a/kernel/mutex.c
66746 +++ b/kernel/mutex.c
66747 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66748 spin_lock_mutex(&lock->wait_lock, flags);
66749
66750 debug_mutex_lock_common(lock, &waiter);
66751 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66752 + debug_mutex_add_waiter(lock, &waiter, task);
66753
66754 /* add waiting tasks to the end of the waitqueue (FIFO): */
66755 list_add_tail(&waiter.list, &lock->wait_list);
66756 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66757 * TASK_UNINTERRUPTIBLE case.)
66758 */
66759 if (unlikely(signal_pending_state(state, task))) {
66760 - mutex_remove_waiter(lock, &waiter,
66761 - task_thread_info(task));
66762 + mutex_remove_waiter(lock, &waiter, task);
66763 mutex_release(&lock->dep_map, 1, ip);
66764 spin_unlock_mutex(&lock->wait_lock, flags);
66765
66766 @@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66767 done:
66768 lock_acquired(&lock->dep_map, ip);
66769 /* got the lock - rejoice! */
66770 - mutex_remove_waiter(lock, &waiter, current_thread_info());
66771 + mutex_remove_waiter(lock, &waiter, task);
66772 mutex_set_owner(lock);
66773
66774 /* set it to 0 if there are no waiters left: */
66775 diff --git a/kernel/panic.c b/kernel/panic.c
66776 index 8ed89a1..e83856a 100644
66777 --- a/kernel/panic.c
66778 +++ b/kernel/panic.c
66779 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66780 const char *board;
66781
66782 printk(KERN_WARNING "------------[ cut here ]------------\n");
66783 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66784 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66785 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66786 if (board)
66787 printk(KERN_WARNING "Hardware name: %s\n", board);
66788 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
66789 */
66790 void __stack_chk_fail(void)
66791 {
66792 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
66793 + dump_stack();
66794 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
66795 __builtin_return_address(0));
66796 }
66797 EXPORT_SYMBOL(__stack_chk_fail);
66798 diff --git a/kernel/pid.c b/kernel/pid.c
66799 index 9f08dfa..6765c40 100644
66800 --- a/kernel/pid.c
66801 +++ b/kernel/pid.c
66802 @@ -33,6 +33,7 @@
66803 #include <linux/rculist.h>
66804 #include <linux/bootmem.h>
66805 #include <linux/hash.h>
66806 +#include <linux/security.h>
66807 #include <linux/pid_namespace.h>
66808 #include <linux/init_task.h>
66809 #include <linux/syscalls.h>
66810 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
66811
66812 int pid_max = PID_MAX_DEFAULT;
66813
66814 -#define RESERVED_PIDS 300
66815 +#define RESERVED_PIDS 500
66816
66817 int pid_max_min = RESERVED_PIDS + 1;
66818 int pid_max_max = PID_MAX_LIMIT;
66819 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
66820 */
66821 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
66822 {
66823 + struct task_struct *task;
66824 +
66825 rcu_lockdep_assert(rcu_read_lock_held(),
66826 "find_task_by_pid_ns() needs rcu_read_lock()"
66827 " protection");
66828 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66829 +
66830 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66831 +
66832 + if (gr_pid_is_chrooted(task))
66833 + return NULL;
66834 +
66835 + return task;
66836 }
66837
66838 struct task_struct *find_task_by_vpid(pid_t vnr)
66839 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
66840 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
66841 }
66842
66843 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
66844 +{
66845 + rcu_lockdep_assert(rcu_read_lock_held(),
66846 + "find_task_by_pid_ns() needs rcu_read_lock()"
66847 + " protection");
66848 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
66849 +}
66850 +
66851 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
66852 {
66853 struct pid *pid;
66854 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
66855 index 125cb67..a4d1c30 100644
66856 --- a/kernel/posix-cpu-timers.c
66857 +++ b/kernel/posix-cpu-timers.c
66858 @@ -6,6 +6,7 @@
66859 #include <linux/posix-timers.h>
66860 #include <linux/errno.h>
66861 #include <linux/math64.h>
66862 +#include <linux/security.h>
66863 #include <asm/uaccess.h>
66864 #include <linux/kernel_stat.h>
66865 #include <trace/events/timer.h>
66866 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
66867
66868 static __init int init_posix_cpu_timers(void)
66869 {
66870 - struct k_clock process = {
66871 + static struct k_clock process = {
66872 .clock_getres = process_cpu_clock_getres,
66873 .clock_get = process_cpu_clock_get,
66874 .timer_create = process_cpu_timer_create,
66875 .nsleep = process_cpu_nsleep,
66876 .nsleep_restart = process_cpu_nsleep_restart,
66877 };
66878 - struct k_clock thread = {
66879 + static struct k_clock thread = {
66880 .clock_getres = thread_cpu_clock_getres,
66881 .clock_get = thread_cpu_clock_get,
66882 .timer_create = thread_cpu_timer_create,
66883 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
66884 index 69185ae..cc2847a 100644
66885 --- a/kernel/posix-timers.c
66886 +++ b/kernel/posix-timers.c
66887 @@ -43,6 +43,7 @@
66888 #include <linux/idr.h>
66889 #include <linux/posix-clock.h>
66890 #include <linux/posix-timers.h>
66891 +#include <linux/grsecurity.h>
66892 #include <linux/syscalls.h>
66893 #include <linux/wait.h>
66894 #include <linux/workqueue.h>
66895 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
66896 * which we beg off on and pass to do_sys_settimeofday().
66897 */
66898
66899 -static struct k_clock posix_clocks[MAX_CLOCKS];
66900 +static struct k_clock *posix_clocks[MAX_CLOCKS];
66901
66902 /*
66903 * These ones are defined below.
66904 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66905 */
66906 static __init int init_posix_timers(void)
66907 {
66908 - struct k_clock clock_realtime = {
66909 + static struct k_clock clock_realtime = {
66910 .clock_getres = hrtimer_get_res,
66911 .clock_get = posix_clock_realtime_get,
66912 .clock_set = posix_clock_realtime_set,
66913 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66914 .timer_get = common_timer_get,
66915 .timer_del = common_timer_del,
66916 };
66917 - struct k_clock clock_monotonic = {
66918 + static struct k_clock clock_monotonic = {
66919 .clock_getres = hrtimer_get_res,
66920 .clock_get = posix_ktime_get_ts,
66921 .nsleep = common_nsleep,
66922 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66923 .timer_get = common_timer_get,
66924 .timer_del = common_timer_del,
66925 };
66926 - struct k_clock clock_monotonic_raw = {
66927 + static struct k_clock clock_monotonic_raw = {
66928 .clock_getres = hrtimer_get_res,
66929 .clock_get = posix_get_monotonic_raw,
66930 };
66931 - struct k_clock clock_realtime_coarse = {
66932 + static struct k_clock clock_realtime_coarse = {
66933 .clock_getres = posix_get_coarse_res,
66934 .clock_get = posix_get_realtime_coarse,
66935 };
66936 - struct k_clock clock_monotonic_coarse = {
66937 + static struct k_clock clock_monotonic_coarse = {
66938 .clock_getres = posix_get_coarse_res,
66939 .clock_get = posix_get_monotonic_coarse,
66940 };
66941 - struct k_clock clock_boottime = {
66942 + static struct k_clock clock_boottime = {
66943 .clock_getres = hrtimer_get_res,
66944 .clock_get = posix_get_boottime,
66945 .nsleep = common_nsleep,
66946 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
66947 return;
66948 }
66949
66950 - posix_clocks[clock_id] = *new_clock;
66951 + posix_clocks[clock_id] = new_clock;
66952 }
66953 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
66954
66955 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
66956 return (id & CLOCKFD_MASK) == CLOCKFD ?
66957 &clock_posix_dynamic : &clock_posix_cpu;
66958
66959 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
66960 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
66961 return NULL;
66962 - return &posix_clocks[id];
66963 + return posix_clocks[id];
66964 }
66965
66966 static int common_timer_create(struct k_itimer *new_timer)
66967 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
66968 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
66969 return -EFAULT;
66970
66971 + /* only the CLOCK_REALTIME clock can be set, all other clocks
66972 + have their clock_set fptr set to a nosettime dummy function
66973 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
66974 + call common_clock_set, which calls do_sys_settimeofday, which
66975 + we hook
66976 + */
66977 +
66978 return kc->clock_set(which_clock, &new_tp);
66979 }
66980
66981 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
66982 index d523593..68197a4 100644
66983 --- a/kernel/power/poweroff.c
66984 +++ b/kernel/power/poweroff.c
66985 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
66986 .enable_mask = SYSRQ_ENABLE_BOOT,
66987 };
66988
66989 -static int pm_sysrq_init(void)
66990 +static int __init pm_sysrq_init(void)
66991 {
66992 register_sysrq_key('o', &sysrq_poweroff_op);
66993 return 0;
66994 diff --git a/kernel/power/process.c b/kernel/power/process.c
66995 index 19db29f..33b52b6 100644
66996 --- a/kernel/power/process.c
66997 +++ b/kernel/power/process.c
66998 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
66999 u64 elapsed_csecs64;
67000 unsigned int elapsed_csecs;
67001 bool wakeup = false;
67002 + bool timedout = false;
67003
67004 do_gettimeofday(&start);
67005
67006 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67007
67008 while (true) {
67009 todo = 0;
67010 + if (time_after(jiffies, end_time))
67011 + timedout = true;
67012 read_lock(&tasklist_lock);
67013 do_each_thread(g, p) {
67014 if (p == current || !freeze_task(p))
67015 @@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
67016 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
67017 * transition can't race with task state testing here.
67018 */
67019 - if (!task_is_stopped_or_traced(p) &&
67020 - !freezer_should_skip(p))
67021 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67022 todo++;
67023 + if (timedout) {
67024 + printk(KERN_ERR "Task refusing to freeze:\n");
67025 + sched_show_task(p);
67026 + }
67027 + }
67028 } while_each_thread(g, p);
67029 read_unlock(&tasklist_lock);
67030
67031 @@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
67032 todo += wq_busy;
67033 }
67034
67035 - if (!todo || time_after(jiffies, end_time))
67036 + if (!todo || timedout)
67037 break;
67038
67039 if (pm_wakeup_pending()) {
67040 diff --git a/kernel/printk.c b/kernel/printk.c
67041 index b663c2c..1d6ba7a 100644
67042 --- a/kernel/printk.c
67043 +++ b/kernel/printk.c
67044 @@ -316,6 +316,11 @@ static int check_syslog_permissions(int type, bool from_file)
67045 if (from_file && type != SYSLOG_ACTION_OPEN)
67046 return 0;
67047
67048 +#ifdef CONFIG_GRKERNSEC_DMESG
67049 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67050 + return -EPERM;
67051 +#endif
67052 +
67053 if (syslog_action_restricted(type)) {
67054 if (capable(CAP_SYSLOG))
67055 return 0;
67056 diff --git a/kernel/profile.c b/kernel/profile.c
67057 index 76b8e77..a2930e8 100644
67058 --- a/kernel/profile.c
67059 +++ b/kernel/profile.c
67060 @@ -39,7 +39,7 @@ struct profile_hit {
67061 /* Oprofile timer tick hook */
67062 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67063
67064 -static atomic_t *prof_buffer;
67065 +static atomic_unchecked_t *prof_buffer;
67066 static unsigned long prof_len, prof_shift;
67067
67068 int prof_on __read_mostly;
67069 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67070 hits[i].pc = 0;
67071 continue;
67072 }
67073 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67074 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67075 hits[i].hits = hits[i].pc = 0;
67076 }
67077 }
67078 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67079 * Add the current hit(s) and flush the write-queue out
67080 * to the global buffer:
67081 */
67082 - atomic_add(nr_hits, &prof_buffer[pc]);
67083 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67084 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67085 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67086 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67087 hits[i].pc = hits[i].hits = 0;
67088 }
67089 out:
67090 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67091 {
67092 unsigned long pc;
67093 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67094 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67095 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67096 }
67097 #endif /* !CONFIG_SMP */
67098
67099 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67100 return -EFAULT;
67101 buf++; p++; count--; read++;
67102 }
67103 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67104 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67105 if (copy_to_user(buf, (void *)pnt, count))
67106 return -EFAULT;
67107 read += count;
67108 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67109 }
67110 #endif
67111 profile_discard_flip_buffers();
67112 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67113 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67114 return count;
67115 }
67116
67117 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67118 index ee8d49b..bd3d790 100644
67119 --- a/kernel/ptrace.c
67120 +++ b/kernel/ptrace.c
67121 @@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67122
67123 if (seize)
67124 flags |= PT_SEIZED;
67125 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
67126 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
67127 flags |= PT_PTRACE_CAP;
67128 task->ptrace = flags;
67129
67130 @@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67131 break;
67132 return -EIO;
67133 }
67134 - if (copy_to_user(dst, buf, retval))
67135 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67136 return -EFAULT;
67137 copied += retval;
67138 src += retval;
67139 @@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
67140 bool seized = child->ptrace & PT_SEIZED;
67141 int ret = -EIO;
67142 siginfo_t siginfo, *si;
67143 - void __user *datavp = (void __user *) data;
67144 + void __user *datavp = (__force void __user *) data;
67145 unsigned long __user *datalp = datavp;
67146 unsigned long flags;
67147
67148 @@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67149 goto out;
67150 }
67151
67152 + if (gr_handle_ptrace(child, request)) {
67153 + ret = -EPERM;
67154 + goto out_put_task_struct;
67155 + }
67156 +
67157 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67158 ret = ptrace_attach(child, request, addr, data);
67159 /*
67160 * Some architectures need to do book-keeping after
67161 * a ptrace attach.
67162 */
67163 - if (!ret)
67164 + if (!ret) {
67165 arch_ptrace_attach(child);
67166 + gr_audit_ptrace(child);
67167 + }
67168 goto out_put_task_struct;
67169 }
67170
67171 @@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67172 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67173 if (copied != sizeof(tmp))
67174 return -EIO;
67175 - return put_user(tmp, (unsigned long __user *)data);
67176 + return put_user(tmp, (__force unsigned long __user *)data);
67177 }
67178
67179 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67180 @@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67181 goto out;
67182 }
67183
67184 + if (gr_handle_ptrace(child, request)) {
67185 + ret = -EPERM;
67186 + goto out_put_task_struct;
67187 + }
67188 +
67189 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67190 ret = ptrace_attach(child, request, addr, data);
67191 /*
67192 * Some architectures need to do book-keeping after
67193 * a ptrace attach.
67194 */
67195 - if (!ret)
67196 + if (!ret) {
67197 arch_ptrace_attach(child);
67198 + gr_audit_ptrace(child);
67199 + }
67200 goto out_put_task_struct;
67201 }
67202
67203 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
67204 index 37a5444..eec170a 100644
67205 --- a/kernel/rcutiny.c
67206 +++ b/kernel/rcutiny.c
67207 @@ -46,7 +46,7 @@
67208 struct rcu_ctrlblk;
67209 static void invoke_rcu_callbacks(void);
67210 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
67211 -static void rcu_process_callbacks(struct softirq_action *unused);
67212 +static void rcu_process_callbacks(void);
67213 static void __call_rcu(struct rcu_head *head,
67214 void (*func)(struct rcu_head *rcu),
67215 struct rcu_ctrlblk *rcp);
67216 @@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
67217 rcu_is_callbacks_kthread()));
67218 }
67219
67220 -static void rcu_process_callbacks(struct softirq_action *unused)
67221 +static void rcu_process_callbacks(void)
67222 {
67223 __rcu_process_callbacks(&rcu_sched_ctrlblk);
67224 __rcu_process_callbacks(&rcu_bh_ctrlblk);
67225 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
67226 index 22ecea0..3789898 100644
67227 --- a/kernel/rcutiny_plugin.h
67228 +++ b/kernel/rcutiny_plugin.h
67229 @@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
67230 have_rcu_kthread_work = morework;
67231 local_irq_restore(flags);
67232 if (work)
67233 - rcu_process_callbacks(NULL);
67234 + rcu_process_callbacks();
67235 schedule_timeout_interruptible(1); /* Leave CPU for others. */
67236 }
67237
67238 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67239 index a89b381..efdcad8 100644
67240 --- a/kernel/rcutorture.c
67241 +++ b/kernel/rcutorture.c
67242 @@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67243 { 0 };
67244 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67245 { 0 };
67246 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67247 -static atomic_t n_rcu_torture_alloc;
67248 -static atomic_t n_rcu_torture_alloc_fail;
67249 -static atomic_t n_rcu_torture_free;
67250 -static atomic_t n_rcu_torture_mberror;
67251 -static atomic_t n_rcu_torture_error;
67252 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67253 +static atomic_unchecked_t n_rcu_torture_alloc;
67254 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
67255 +static atomic_unchecked_t n_rcu_torture_free;
67256 +static atomic_unchecked_t n_rcu_torture_mberror;
67257 +static atomic_unchecked_t n_rcu_torture_error;
67258 static long n_rcu_torture_boost_ktrerror;
67259 static long n_rcu_torture_boost_rterror;
67260 static long n_rcu_torture_boost_failure;
67261 @@ -253,11 +253,11 @@ rcu_torture_alloc(void)
67262
67263 spin_lock_bh(&rcu_torture_lock);
67264 if (list_empty(&rcu_torture_freelist)) {
67265 - atomic_inc(&n_rcu_torture_alloc_fail);
67266 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67267 spin_unlock_bh(&rcu_torture_lock);
67268 return NULL;
67269 }
67270 - atomic_inc(&n_rcu_torture_alloc);
67271 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67272 p = rcu_torture_freelist.next;
67273 list_del_init(p);
67274 spin_unlock_bh(&rcu_torture_lock);
67275 @@ -270,7 +270,7 @@ rcu_torture_alloc(void)
67276 static void
67277 rcu_torture_free(struct rcu_torture *p)
67278 {
67279 - atomic_inc(&n_rcu_torture_free);
67280 + atomic_inc_unchecked(&n_rcu_torture_free);
67281 spin_lock_bh(&rcu_torture_lock);
67282 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67283 spin_unlock_bh(&rcu_torture_lock);
67284 @@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
67285 i = rp->rtort_pipe_count;
67286 if (i > RCU_TORTURE_PIPE_LEN)
67287 i = RCU_TORTURE_PIPE_LEN;
67288 - atomic_inc(&rcu_torture_wcount[i]);
67289 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67290 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67291 rp->rtort_mbtest = 0;
67292 rcu_torture_free(rp);
67293 @@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67294 i = rp->rtort_pipe_count;
67295 if (i > RCU_TORTURE_PIPE_LEN)
67296 i = RCU_TORTURE_PIPE_LEN;
67297 - atomic_inc(&rcu_torture_wcount[i]);
67298 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67299 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67300 rp->rtort_mbtest = 0;
67301 list_del(&rp->rtort_free);
67302 @@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
67303 i = old_rp->rtort_pipe_count;
67304 if (i > RCU_TORTURE_PIPE_LEN)
67305 i = RCU_TORTURE_PIPE_LEN;
67306 - atomic_inc(&rcu_torture_wcount[i]);
67307 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67308 old_rp->rtort_pipe_count++;
67309 cur_ops->deferred_free(old_rp);
67310 }
67311 @@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
67312 }
67313 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67314 if (p->rtort_mbtest == 0)
67315 - atomic_inc(&n_rcu_torture_mberror);
67316 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67317 spin_lock(&rand_lock);
67318 cur_ops->read_delay(&rand);
67319 n_rcu_torture_timers++;
67320 @@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
67321 }
67322 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67323 if (p->rtort_mbtest == 0)
67324 - atomic_inc(&n_rcu_torture_mberror);
67325 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67326 cur_ops->read_delay(&rand);
67327 preempt_disable();
67328 pipe_count = p->rtort_pipe_count;
67329 @@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
67330 rcu_torture_current,
67331 rcu_torture_current_version,
67332 list_empty(&rcu_torture_freelist),
67333 - atomic_read(&n_rcu_torture_alloc),
67334 - atomic_read(&n_rcu_torture_alloc_fail),
67335 - atomic_read(&n_rcu_torture_free),
67336 - atomic_read(&n_rcu_torture_mberror),
67337 + atomic_read_unchecked(&n_rcu_torture_alloc),
67338 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67339 + atomic_read_unchecked(&n_rcu_torture_free),
67340 + atomic_read_unchecked(&n_rcu_torture_mberror),
67341 n_rcu_torture_boost_ktrerror,
67342 n_rcu_torture_boost_rterror,
67343 n_rcu_torture_boost_failure,
67344 @@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
67345 n_online_attempts,
67346 n_offline_successes,
67347 n_offline_attempts);
67348 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67349 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67350 n_rcu_torture_boost_ktrerror != 0 ||
67351 n_rcu_torture_boost_rterror != 0 ||
67352 n_rcu_torture_boost_failure != 0)
67353 @@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
67354 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67355 if (i > 1) {
67356 cnt += sprintf(&page[cnt], "!!! ");
67357 - atomic_inc(&n_rcu_torture_error);
67358 + atomic_inc_unchecked(&n_rcu_torture_error);
67359 WARN_ON_ONCE(1);
67360 }
67361 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67362 @@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
67363 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67364 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67365 cnt += sprintf(&page[cnt], " %d",
67366 - atomic_read(&rcu_torture_wcount[i]));
67367 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67368 }
67369 cnt += sprintf(&page[cnt], "\n");
67370 if (cur_ops->stats)
67371 @@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
67372
67373 if (cur_ops->cleanup)
67374 cur_ops->cleanup();
67375 - if (atomic_read(&n_rcu_torture_error))
67376 + if (atomic_read_unchecked(&n_rcu_torture_error))
67377 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67378 else if (n_online_successes != n_online_attempts ||
67379 n_offline_successes != n_offline_attempts)
67380 @@ -1744,17 +1744,17 @@ rcu_torture_init(void)
67381
67382 rcu_torture_current = NULL;
67383 rcu_torture_current_version = 0;
67384 - atomic_set(&n_rcu_torture_alloc, 0);
67385 - atomic_set(&n_rcu_torture_alloc_fail, 0);
67386 - atomic_set(&n_rcu_torture_free, 0);
67387 - atomic_set(&n_rcu_torture_mberror, 0);
67388 - atomic_set(&n_rcu_torture_error, 0);
67389 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67390 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67391 + atomic_set_unchecked(&n_rcu_torture_free, 0);
67392 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67393 + atomic_set_unchecked(&n_rcu_torture_error, 0);
67394 n_rcu_torture_boost_ktrerror = 0;
67395 n_rcu_torture_boost_rterror = 0;
67396 n_rcu_torture_boost_failure = 0;
67397 n_rcu_torture_boosts = 0;
67398 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67399 - atomic_set(&rcu_torture_wcount[i], 0);
67400 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67401 for_each_possible_cpu(cpu) {
67402 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67403 per_cpu(rcu_torture_count, cpu)[i] = 0;
67404 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67405 index d0c5baf..109b2e7 100644
67406 --- a/kernel/rcutree.c
67407 +++ b/kernel/rcutree.c
67408 @@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
67409 rcu_prepare_for_idle(smp_processor_id());
67410 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67411 smp_mb__before_atomic_inc(); /* See above. */
67412 - atomic_inc(&rdtp->dynticks);
67413 + atomic_inc_unchecked(&rdtp->dynticks);
67414 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67415 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67416 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67417
67418 /*
67419 * The idle task is not permitted to enter the idle loop while
67420 @@ -448,10 +448,10 @@ void rcu_irq_exit(void)
67421 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
67422 {
67423 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67424 - atomic_inc(&rdtp->dynticks);
67425 + atomic_inc_unchecked(&rdtp->dynticks);
67426 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67427 smp_mb__after_atomic_inc(); /* See above. */
67428 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67429 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67430 rcu_cleanup_after_idle(smp_processor_id());
67431 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
67432 if (!is_idle_task(current)) {
67433 @@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
67434 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67435
67436 if (rdtp->dynticks_nmi_nesting == 0 &&
67437 - (atomic_read(&rdtp->dynticks) & 0x1))
67438 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67439 return;
67440 rdtp->dynticks_nmi_nesting++;
67441 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67442 - atomic_inc(&rdtp->dynticks);
67443 + atomic_inc_unchecked(&rdtp->dynticks);
67444 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67445 smp_mb__after_atomic_inc(); /* See above. */
67446 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67447 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67448 }
67449
67450 /**
67451 @@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
67452 return;
67453 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67454 smp_mb__before_atomic_inc(); /* See above. */
67455 - atomic_inc(&rdtp->dynticks);
67456 + atomic_inc_unchecked(&rdtp->dynticks);
67457 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67458 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67459 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67460 }
67461
67462 #ifdef CONFIG_PROVE_RCU
67463 @@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
67464 int ret;
67465
67466 preempt_disable();
67467 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67468 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
67469 preempt_enable();
67470 return ret;
67471 }
67472 @@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
67473 */
67474 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67475 {
67476 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67477 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67478 return (rdp->dynticks_snap & 0x1) == 0;
67479 }
67480
67481 @@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67482 unsigned int curr;
67483 unsigned int snap;
67484
67485 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67486 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67487 snap = (unsigned int)rdp->dynticks_snap;
67488
67489 /*
67490 @@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void)
67491 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
67492 */
67493 if (till_stall_check < 3) {
67494 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
67495 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
67496 till_stall_check = 3;
67497 } else if (till_stall_check > 300) {
67498 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
67499 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
67500 till_stall_check = 300;
67501 }
67502 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
67503 @@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67504 /*
67505 * Do RCU core processing for the current CPU.
67506 */
67507 -static void rcu_process_callbacks(struct softirq_action *unused)
67508 +static void rcu_process_callbacks(void)
67509 {
67510 trace_rcu_utilization("Start RCU core");
67511 __rcu_process_callbacks(&rcu_sched_state,
67512 @@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
67513 }
67514 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
67515
67516 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67517 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67518 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67519 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67520
67521 static int synchronize_sched_expedited_cpu_stop(void *data)
67522 {
67523 @@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
67524 int firstsnap, s, snap, trycount = 0;
67525
67526 /* Note that atomic_inc_return() implies full memory barrier. */
67527 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67528 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67529 get_online_cpus();
67530 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
67531
67532 @@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
67533 }
67534
67535 /* Check to see if someone else did our work for us. */
67536 - s = atomic_read(&sync_sched_expedited_done);
67537 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67538 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67539 smp_mb(); /* ensure test happens before caller kfree */
67540 return;
67541 @@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
67542 * grace period works for us.
67543 */
67544 get_online_cpus();
67545 - snap = atomic_read(&sync_sched_expedited_started);
67546 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
67547 smp_mb(); /* ensure read is before try_stop_cpus(). */
67548 }
67549
67550 @@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
67551 * than we did beat us to the punch.
67552 */
67553 do {
67554 - s = atomic_read(&sync_sched_expedited_done);
67555 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67556 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67557 smp_mb(); /* ensure test happens before caller kfree */
67558 break;
67559 }
67560 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67561 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67562
67563 put_online_cpus();
67564 }
67565 @@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
67566 rdp->qlen = 0;
67567 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
67568 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
67569 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
67570 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
67571 rdp->cpu = cpu;
67572 rdp->rsp = rsp;
67573 raw_spin_unlock_irqrestore(&rnp->lock, flags);
67574 @@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
67575 rdp->n_force_qs_snap = rsp->n_force_qs;
67576 rdp->blimit = blimit;
67577 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
67578 - atomic_set(&rdp->dynticks->dynticks,
67579 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
67580 + atomic_set_unchecked(&rdp->dynticks->dynticks,
67581 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
67582 rcu_prepare_for_idle_init(cpu);
67583 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
67584
67585 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67586 index cdd1be0..5b2efb4 100644
67587 --- a/kernel/rcutree.h
67588 +++ b/kernel/rcutree.h
67589 @@ -87,7 +87,7 @@ struct rcu_dynticks {
67590 long long dynticks_nesting; /* Track irq/process nesting level. */
67591 /* Process level is worth LLONG_MAX/2. */
67592 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67593 - atomic_t dynticks; /* Even value for idle, else odd. */
67594 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
67595 };
67596
67597 /* RCU's kthread states for tracing. */
67598 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67599 index c023464..7f57225 100644
67600 --- a/kernel/rcutree_plugin.h
67601 +++ b/kernel/rcutree_plugin.h
67602 @@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
67603
67604 /* Clean up and exit. */
67605 smp_mb(); /* ensure expedited GP seen before counter increment. */
67606 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67607 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67608 unlock_mb_ret:
67609 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67610 mb_ret:
67611 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67612 index ed459ed..a03c3fa 100644
67613 --- a/kernel/rcutree_trace.c
67614 +++ b/kernel/rcutree_trace.c
67615 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67616 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67617 rdp->qs_pending);
67618 seq_printf(m, " dt=%d/%llx/%d df=%lu",
67619 - atomic_read(&rdp->dynticks->dynticks),
67620 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67621 rdp->dynticks->dynticks_nesting,
67622 rdp->dynticks->dynticks_nmi_nesting,
67623 rdp->dynticks_fqs);
67624 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67625 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
67626 rdp->qs_pending);
67627 seq_printf(m, ",%d,%llx,%d,%lu",
67628 - atomic_read(&rdp->dynticks->dynticks),
67629 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67630 rdp->dynticks->dynticks_nesting,
67631 rdp->dynticks->dynticks_nmi_nesting,
67632 rdp->dynticks_fqs);
67633 diff --git a/kernel/resource.c b/kernel/resource.c
67634 index 7e8ea66..1efd11f 100644
67635 --- a/kernel/resource.c
67636 +++ b/kernel/resource.c
67637 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67638
67639 static int __init ioresources_init(void)
67640 {
67641 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67642 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67643 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67644 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67645 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67646 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67647 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67648 +#endif
67649 +#else
67650 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67651 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67652 +#endif
67653 return 0;
67654 }
67655 __initcall(ioresources_init);
67656 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67657 index 98ec494..4241d6d 100644
67658 --- a/kernel/rtmutex-tester.c
67659 +++ b/kernel/rtmutex-tester.c
67660 @@ -20,7 +20,7 @@
67661 #define MAX_RT_TEST_MUTEXES 8
67662
67663 static spinlock_t rttest_lock;
67664 -static atomic_t rttest_event;
67665 +static atomic_unchecked_t rttest_event;
67666
67667 struct test_thread_data {
67668 int opcode;
67669 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67670
67671 case RTTEST_LOCKCONT:
67672 td->mutexes[td->opdata] = 1;
67673 - td->event = atomic_add_return(1, &rttest_event);
67674 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67675 return 0;
67676
67677 case RTTEST_RESET:
67678 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67679 return 0;
67680
67681 case RTTEST_RESETEVENT:
67682 - atomic_set(&rttest_event, 0);
67683 + atomic_set_unchecked(&rttest_event, 0);
67684 return 0;
67685
67686 default:
67687 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67688 return ret;
67689
67690 td->mutexes[id] = 1;
67691 - td->event = atomic_add_return(1, &rttest_event);
67692 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67693 rt_mutex_lock(&mutexes[id]);
67694 - td->event = atomic_add_return(1, &rttest_event);
67695 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67696 td->mutexes[id] = 4;
67697 return 0;
67698
67699 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67700 return ret;
67701
67702 td->mutexes[id] = 1;
67703 - td->event = atomic_add_return(1, &rttest_event);
67704 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67705 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67706 - td->event = atomic_add_return(1, &rttest_event);
67707 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67708 td->mutexes[id] = ret ? 0 : 4;
67709 return ret ? -EINTR : 0;
67710
67711 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67712 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67713 return ret;
67714
67715 - td->event = atomic_add_return(1, &rttest_event);
67716 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67717 rt_mutex_unlock(&mutexes[id]);
67718 - td->event = atomic_add_return(1, &rttest_event);
67719 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67720 td->mutexes[id] = 0;
67721 return 0;
67722
67723 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67724 break;
67725
67726 td->mutexes[dat] = 2;
67727 - td->event = atomic_add_return(1, &rttest_event);
67728 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67729 break;
67730
67731 default:
67732 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67733 return;
67734
67735 td->mutexes[dat] = 3;
67736 - td->event = atomic_add_return(1, &rttest_event);
67737 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67738 break;
67739
67740 case RTTEST_LOCKNOWAIT:
67741 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67742 return;
67743
67744 td->mutexes[dat] = 1;
67745 - td->event = atomic_add_return(1, &rttest_event);
67746 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67747 return;
67748
67749 default:
67750 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
67751 index 0984a21..939f183 100644
67752 --- a/kernel/sched/auto_group.c
67753 +++ b/kernel/sched/auto_group.c
67754 @@ -11,7 +11,7 @@
67755
67756 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67757 static struct autogroup autogroup_default;
67758 -static atomic_t autogroup_seq_nr;
67759 +static atomic_unchecked_t autogroup_seq_nr;
67760
67761 void __init autogroup_init(struct task_struct *init_task)
67762 {
67763 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67764
67765 kref_init(&ag->kref);
67766 init_rwsem(&ag->lock);
67767 - ag->id = atomic_inc_return(&autogroup_seq_nr);
67768 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67769 ag->tg = tg;
67770 #ifdef CONFIG_RT_GROUP_SCHED
67771 /*
67772 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
67773 index e5212ae..2fcf98d 100644
67774 --- a/kernel/sched/core.c
67775 +++ b/kernel/sched/core.c
67776 @@ -3907,6 +3907,8 @@ int can_nice(const struct task_struct *p, const int nice)
67777 /* convert nice value [19,-20] to rlimit style value [1,40] */
67778 int nice_rlim = 20 - nice;
67779
67780 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67781 +
67782 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67783 capable(CAP_SYS_NICE));
67784 }
67785 @@ -3940,7 +3942,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67786 if (nice > 19)
67787 nice = 19;
67788
67789 - if (increment < 0 && !can_nice(current, nice))
67790 + if (increment < 0 && (!can_nice(current, nice) ||
67791 + gr_handle_chroot_nice()))
67792 return -EPERM;
67793
67794 retval = security_task_setnice(current, nice);
67795 @@ -4097,6 +4100,7 @@ recheck:
67796 unsigned long rlim_rtprio =
67797 task_rlimit(p, RLIMIT_RTPRIO);
67798
67799 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
67800 /* can't set/change the rt policy */
67801 if (policy != p->policy && !rlim_rtprio)
67802 return -EPERM;
67803 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
67804 index e955364..eacd2a4 100644
67805 --- a/kernel/sched/fair.c
67806 +++ b/kernel/sched/fair.c
67807 @@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
67808 * run_rebalance_domains is triggered when needed from the scheduler tick.
67809 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
67810 */
67811 -static void run_rebalance_domains(struct softirq_action *h)
67812 +static void run_rebalance_domains(void)
67813 {
67814 int this_cpu = smp_processor_id();
67815 struct rq *this_rq = cpu_rq(this_cpu);
67816 diff --git a/kernel/signal.c b/kernel/signal.c
67817 index 17afcaf..4500b05 100644
67818 --- a/kernel/signal.c
67819 +++ b/kernel/signal.c
67820 @@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
67821
67822 int print_fatal_signals __read_mostly;
67823
67824 -static void __user *sig_handler(struct task_struct *t, int sig)
67825 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
67826 {
67827 return t->sighand->action[sig - 1].sa.sa_handler;
67828 }
67829
67830 -static int sig_handler_ignored(void __user *handler, int sig)
67831 +static int sig_handler_ignored(__sighandler_t handler, int sig)
67832 {
67833 /* Is it explicitly or implicitly ignored? */
67834 return handler == SIG_IGN ||
67835 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
67836
67837 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
67838 {
67839 - void __user *handler;
67840 + __sighandler_t handler;
67841
67842 handler = sig_handler(t, sig);
67843
67844 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
67845 atomic_inc(&user->sigpending);
67846 rcu_read_unlock();
67847
67848 + if (!override_rlimit)
67849 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
67850 +
67851 if (override_rlimit ||
67852 atomic_read(&user->sigpending) <=
67853 task_rlimit(t, RLIMIT_SIGPENDING)) {
67854 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
67855
67856 int unhandled_signal(struct task_struct *tsk, int sig)
67857 {
67858 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
67859 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
67860 if (is_global_init(tsk))
67861 return 1;
67862 if (handler != SIG_IGN && handler != SIG_DFL)
67863 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
67864 }
67865 }
67866
67867 + /* allow glibc communication via tgkill to other threads in our
67868 + thread group */
67869 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
67870 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
67871 + && gr_handle_signal(t, sig))
67872 + return -EPERM;
67873 +
67874 return security_task_kill(t, info, sig, 0);
67875 }
67876
67877 @@ -1204,7 +1214,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67878 return send_signal(sig, info, p, 1);
67879 }
67880
67881 -static int
67882 +int
67883 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67884 {
67885 return send_signal(sig, info, t, 0);
67886 @@ -1241,6 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67887 unsigned long int flags;
67888 int ret, blocked, ignored;
67889 struct k_sigaction *action;
67890 + int is_unhandled = 0;
67891
67892 spin_lock_irqsave(&t->sighand->siglock, flags);
67893 action = &t->sighand->action[sig-1];
67894 @@ -1255,9 +1266,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67895 }
67896 if (action->sa.sa_handler == SIG_DFL)
67897 t->signal->flags &= ~SIGNAL_UNKILLABLE;
67898 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
67899 + is_unhandled = 1;
67900 ret = specific_send_sig_info(sig, info, t);
67901 spin_unlock_irqrestore(&t->sighand->siglock, flags);
67902
67903 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
67904 + normal operation */
67905 + if (is_unhandled) {
67906 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
67907 + gr_handle_crash(t, sig);
67908 + }
67909 +
67910 return ret;
67911 }
67912
67913 @@ -1324,8 +1344,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67914 ret = check_kill_permission(sig, info, p);
67915 rcu_read_unlock();
67916
67917 - if (!ret && sig)
67918 + if (!ret && sig) {
67919 ret = do_send_sig_info(sig, info, p, true);
67920 + if (!ret)
67921 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
67922 + }
67923
67924 return ret;
67925 }
67926 @@ -2840,7 +2863,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
67927 int error = -ESRCH;
67928
67929 rcu_read_lock();
67930 - p = find_task_by_vpid(pid);
67931 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67932 + /* allow glibc communication via tgkill to other threads in our
67933 + thread group */
67934 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
67935 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
67936 + p = find_task_by_vpid_unrestricted(pid);
67937 + else
67938 +#endif
67939 + p = find_task_by_vpid(pid);
67940 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
67941 error = check_kill_permission(sig, info, p);
67942 /*
67943 diff --git a/kernel/smp.c b/kernel/smp.c
67944 index 2f8b10e..a41bc14 100644
67945 --- a/kernel/smp.c
67946 +++ b/kernel/smp.c
67947 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
67948 }
67949 EXPORT_SYMBOL(smp_call_function);
67950
67951 -void ipi_call_lock(void)
67952 +void ipi_call_lock(void) __acquires(call_function.lock)
67953 {
67954 raw_spin_lock(&call_function.lock);
67955 }
67956
67957 -void ipi_call_unlock(void)
67958 +void ipi_call_unlock(void) __releases(call_function.lock)
67959 {
67960 raw_spin_unlock(&call_function.lock);
67961 }
67962
67963 -void ipi_call_lock_irq(void)
67964 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
67965 {
67966 raw_spin_lock_irq(&call_function.lock);
67967 }
67968
67969 -void ipi_call_unlock_irq(void)
67970 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
67971 {
67972 raw_spin_unlock_irq(&call_function.lock);
67973 }
67974 diff --git a/kernel/softirq.c b/kernel/softirq.c
67975 index 671f959..91c51cb 100644
67976 --- a/kernel/softirq.c
67977 +++ b/kernel/softirq.c
67978 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
67979
67980 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
67981
67982 -char *softirq_to_name[NR_SOFTIRQS] = {
67983 +const char * const softirq_to_name[NR_SOFTIRQS] = {
67984 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
67985 "TASKLET", "SCHED", "HRTIMER", "RCU"
67986 };
67987 @@ -235,7 +235,7 @@ restart:
67988 kstat_incr_softirqs_this_cpu(vec_nr);
67989
67990 trace_softirq_entry(vec_nr);
67991 - h->action(h);
67992 + h->action();
67993 trace_softirq_exit(vec_nr);
67994 if (unlikely(prev_count != preempt_count())) {
67995 printk(KERN_ERR "huh, entered softirq %u %s %p"
67996 @@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
67997 or_softirq_pending(1UL << nr);
67998 }
67999
68000 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68001 +void open_softirq(int nr, void (*action)(void))
68002 {
68003 - softirq_vec[nr].action = action;
68004 + pax_open_kernel();
68005 + *(void **)&softirq_vec[nr].action = action;
68006 + pax_close_kernel();
68007 }
68008
68009 /*
68010 @@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68011
68012 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68013
68014 -static void tasklet_action(struct softirq_action *a)
68015 +static void tasklet_action(void)
68016 {
68017 struct tasklet_struct *list;
68018
68019 @@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
68020 }
68021 }
68022
68023 -static void tasklet_hi_action(struct softirq_action *a)
68024 +static void tasklet_hi_action(void)
68025 {
68026 struct tasklet_struct *list;
68027
68028 diff --git a/kernel/sys.c b/kernel/sys.c
68029 index e7006eb..8fb7c51 100644
68030 --- a/kernel/sys.c
68031 +++ b/kernel/sys.c
68032 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68033 error = -EACCES;
68034 goto out;
68035 }
68036 +
68037 + if (gr_handle_chroot_setpriority(p, niceval)) {
68038 + error = -EACCES;
68039 + goto out;
68040 + }
68041 +
68042 no_nice = security_task_setnice(p, niceval);
68043 if (no_nice) {
68044 error = no_nice;
68045 @@ -581,6 +587,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68046 goto error;
68047 }
68048
68049 + if (gr_check_group_change(new->gid, new->egid, -1))
68050 + goto error;
68051 +
68052 if (rgid != (gid_t) -1 ||
68053 (egid != (gid_t) -1 && egid != old->gid))
68054 new->sgid = new->egid;
68055 @@ -610,6 +619,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68056 old = current_cred();
68057
68058 retval = -EPERM;
68059 +
68060 + if (gr_check_group_change(gid, gid, gid))
68061 + goto error;
68062 +
68063 if (nsown_capable(CAP_SETGID))
68064 new->gid = new->egid = new->sgid = new->fsgid = gid;
68065 else if (gid == old->gid || gid == old->sgid)
68066 @@ -627,7 +640,7 @@ error:
68067 /*
68068 * change the user struct in a credentials set to match the new UID
68069 */
68070 -static int set_user(struct cred *new)
68071 +int set_user(struct cred *new)
68072 {
68073 struct user_struct *new_user;
68074
68075 @@ -697,6 +710,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68076 goto error;
68077 }
68078
68079 + if (gr_check_user_change(new->uid, new->euid, -1))
68080 + goto error;
68081 +
68082 if (new->uid != old->uid) {
68083 retval = set_user(new);
68084 if (retval < 0)
68085 @@ -741,6 +757,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68086 old = current_cred();
68087
68088 retval = -EPERM;
68089 +
68090 + if (gr_check_crash_uid(uid))
68091 + goto error;
68092 + if (gr_check_user_change(uid, uid, uid))
68093 + goto error;
68094 +
68095 if (nsown_capable(CAP_SETUID)) {
68096 new->suid = new->uid = uid;
68097 if (uid != old->uid) {
68098 @@ -795,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68099 goto error;
68100 }
68101
68102 + if (gr_check_user_change(ruid, euid, -1))
68103 + goto error;
68104 +
68105 if (ruid != (uid_t) -1) {
68106 new->uid = ruid;
68107 if (ruid != old->uid) {
68108 @@ -859,6 +884,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68109 goto error;
68110 }
68111
68112 + if (gr_check_group_change(rgid, egid, -1))
68113 + goto error;
68114 +
68115 if (rgid != (gid_t) -1)
68116 new->gid = rgid;
68117 if (egid != (gid_t) -1)
68118 @@ -905,6 +933,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68119 old = current_cred();
68120 old_fsuid = old->fsuid;
68121
68122 + if (gr_check_user_change(-1, -1, uid))
68123 + goto error;
68124 +
68125 if (uid == old->uid || uid == old->euid ||
68126 uid == old->suid || uid == old->fsuid ||
68127 nsown_capable(CAP_SETUID)) {
68128 @@ -915,6 +946,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68129 }
68130 }
68131
68132 +error:
68133 abort_creds(new);
68134 return old_fsuid;
68135
68136 @@ -941,12 +973,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68137 if (gid == old->gid || gid == old->egid ||
68138 gid == old->sgid || gid == old->fsgid ||
68139 nsown_capable(CAP_SETGID)) {
68140 + if (gr_check_group_change(-1, -1, gid))
68141 + goto error;
68142 +
68143 if (gid != old_fsgid) {
68144 new->fsgid = gid;
68145 goto change_okay;
68146 }
68147 }
68148
68149 +error:
68150 abort_creds(new);
68151 return old_fsgid;
68152
68153 @@ -1198,7 +1234,10 @@ static int override_release(char __user *release, int len)
68154 }
68155 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68156 snprintf(buf, len, "2.6.%u%s", v, rest);
68157 - ret = copy_to_user(release, buf, len);
68158 + if (len > sizeof(buf))
68159 + ret = -EFAULT;
68160 + else
68161 + ret = copy_to_user(release, buf, len);
68162 }
68163 return ret;
68164 }
68165 @@ -1252,19 +1291,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68166 return -EFAULT;
68167
68168 down_read(&uts_sem);
68169 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
68170 + error = __copy_to_user(name->sysname, &utsname()->sysname,
68171 __OLD_UTS_LEN);
68172 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68173 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68174 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
68175 __OLD_UTS_LEN);
68176 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68177 - error |= __copy_to_user(&name->release, &utsname()->release,
68178 + error |= __copy_to_user(name->release, &utsname()->release,
68179 __OLD_UTS_LEN);
68180 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68181 - error |= __copy_to_user(&name->version, &utsname()->version,
68182 + error |= __copy_to_user(name->version, &utsname()->version,
68183 __OLD_UTS_LEN);
68184 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68185 - error |= __copy_to_user(&name->machine, &utsname()->machine,
68186 + error |= __copy_to_user(name->machine, &utsname()->machine,
68187 __OLD_UTS_LEN);
68188 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68189 up_read(&uts_sem);
68190 @@ -1847,7 +1886,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68191 error = get_dumpable(me->mm);
68192 break;
68193 case PR_SET_DUMPABLE:
68194 - if (arg2 < 0 || arg2 > 1) {
68195 + if (arg2 > 1) {
68196 error = -EINVAL;
68197 break;
68198 }
68199 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68200 index 4ab1187..0b75ced 100644
68201 --- a/kernel/sysctl.c
68202 +++ b/kernel/sysctl.c
68203 @@ -91,7 +91,6 @@
68204
68205
68206 #if defined(CONFIG_SYSCTL)
68207 -
68208 /* External variables not in a header file. */
68209 extern int sysctl_overcommit_memory;
68210 extern int sysctl_overcommit_ratio;
68211 @@ -169,10 +168,8 @@ static int proc_taint(struct ctl_table *table, int write,
68212 void __user *buffer, size_t *lenp, loff_t *ppos);
68213 #endif
68214
68215 -#ifdef CONFIG_PRINTK
68216 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68217 void __user *buffer, size_t *lenp, loff_t *ppos);
68218 -#endif
68219
68220 #ifdef CONFIG_MAGIC_SYSRQ
68221 /* Note: sysrq code uses it's own private copy */
68222 @@ -196,6 +193,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68223
68224 #endif
68225
68226 +extern struct ctl_table grsecurity_table[];
68227 +
68228 static struct ctl_table kern_table[];
68229 static struct ctl_table vm_table[];
68230 static struct ctl_table fs_table[];
68231 @@ -210,6 +209,20 @@ extern struct ctl_table epoll_table[];
68232 int sysctl_legacy_va_layout;
68233 #endif
68234
68235 +#ifdef CONFIG_PAX_SOFTMODE
68236 +static ctl_table pax_table[] = {
68237 + {
68238 + .procname = "softmode",
68239 + .data = &pax_softmode,
68240 + .maxlen = sizeof(unsigned int),
68241 + .mode = 0600,
68242 + .proc_handler = &proc_dointvec,
68243 + },
68244 +
68245 + { }
68246 +};
68247 +#endif
68248 +
68249 /* The default sysctl tables: */
68250
68251 static struct ctl_table sysctl_base_table[] = {
68252 @@ -256,6 +269,22 @@ static int max_extfrag_threshold = 1000;
68253 #endif
68254
68255 static struct ctl_table kern_table[] = {
68256 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68257 + {
68258 + .procname = "grsecurity",
68259 + .mode = 0500,
68260 + .child = grsecurity_table,
68261 + },
68262 +#endif
68263 +
68264 +#ifdef CONFIG_PAX_SOFTMODE
68265 + {
68266 + .procname = "pax",
68267 + .mode = 0500,
68268 + .child = pax_table,
68269 + },
68270 +#endif
68271 +
68272 {
68273 .procname = "sched_child_runs_first",
68274 .data = &sysctl_sched_child_runs_first,
68275 @@ -540,7 +569,7 @@ static struct ctl_table kern_table[] = {
68276 .data = &modprobe_path,
68277 .maxlen = KMOD_PATH_LEN,
68278 .mode = 0644,
68279 - .proc_handler = proc_dostring,
68280 + .proc_handler = proc_dostring_modpriv,
68281 },
68282 {
68283 .procname = "modules_disabled",
68284 @@ -707,16 +736,20 @@ static struct ctl_table kern_table[] = {
68285 .extra1 = &zero,
68286 .extra2 = &one,
68287 },
68288 +#endif
68289 {
68290 .procname = "kptr_restrict",
68291 .data = &kptr_restrict,
68292 .maxlen = sizeof(int),
68293 .mode = 0644,
68294 .proc_handler = proc_dointvec_minmax_sysadmin,
68295 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68296 + .extra1 = &two,
68297 +#else
68298 .extra1 = &zero,
68299 +#endif
68300 .extra2 = &two,
68301 },
68302 -#endif
68303 {
68304 .procname = "ngroups_max",
68305 .data = &ngroups_max,
68306 @@ -1215,6 +1248,13 @@ static struct ctl_table vm_table[] = {
68307 .proc_handler = proc_dointvec_minmax,
68308 .extra1 = &zero,
68309 },
68310 + {
68311 + .procname = "heap_stack_gap",
68312 + .data = &sysctl_heap_stack_gap,
68313 + .maxlen = sizeof(sysctl_heap_stack_gap),
68314 + .mode = 0644,
68315 + .proc_handler = proc_doulongvec_minmax,
68316 + },
68317 #else
68318 {
68319 .procname = "nr_trim_pages",
68320 @@ -1645,6 +1685,16 @@ int proc_dostring(struct ctl_table *table, int write,
68321 buffer, lenp, ppos);
68322 }
68323
68324 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68325 + void __user *buffer, size_t *lenp, loff_t *ppos)
68326 +{
68327 + if (write && !capable(CAP_SYS_MODULE))
68328 + return -EPERM;
68329 +
68330 + return _proc_do_string(table->data, table->maxlen, write,
68331 + buffer, lenp, ppos);
68332 +}
68333 +
68334 static size_t proc_skip_spaces(char **buf)
68335 {
68336 size_t ret;
68337 @@ -1750,6 +1800,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68338 len = strlen(tmp);
68339 if (len > *size)
68340 len = *size;
68341 + if (len > sizeof(tmp))
68342 + len = sizeof(tmp);
68343 if (copy_to_user(*buf, tmp, len))
68344 return -EFAULT;
68345 *size -= len;
68346 @@ -1942,7 +1994,6 @@ static int proc_taint(struct ctl_table *table, int write,
68347 return err;
68348 }
68349
68350 -#ifdef CONFIG_PRINTK
68351 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68352 void __user *buffer, size_t *lenp, loff_t *ppos)
68353 {
68354 @@ -1951,7 +2002,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68355
68356 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
68357 }
68358 -#endif
68359
68360 struct do_proc_dointvec_minmax_conv_param {
68361 int *min;
68362 @@ -2066,8 +2116,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68363 *i = val;
68364 } else {
68365 val = convdiv * (*i) / convmul;
68366 - if (!first)
68367 + if (!first) {
68368 err = proc_put_char(&buffer, &left, '\t');
68369 + if (err)
68370 + break;
68371 + }
68372 err = proc_put_long(&buffer, &left, val, false);
68373 if (err)
68374 break;
68375 @@ -2459,6 +2512,12 @@ int proc_dostring(struct ctl_table *table, int write,
68376 return -ENOSYS;
68377 }
68378
68379 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68380 + void __user *buffer, size_t *lenp, loff_t *ppos)
68381 +{
68382 + return -ENOSYS;
68383 +}
68384 +
68385 int proc_dointvec(struct ctl_table *table, int write,
68386 void __user *buffer, size_t *lenp, loff_t *ppos)
68387 {
68388 @@ -2515,5 +2574,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68389 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68390 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68391 EXPORT_SYMBOL(proc_dostring);
68392 +EXPORT_SYMBOL(proc_dostring_modpriv);
68393 EXPORT_SYMBOL(proc_doulongvec_minmax);
68394 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68395 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68396 index a650694..aaeeb20 100644
68397 --- a/kernel/sysctl_binary.c
68398 +++ b/kernel/sysctl_binary.c
68399 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68400 int i;
68401
68402 set_fs(KERNEL_DS);
68403 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68404 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68405 set_fs(old_fs);
68406 if (result < 0)
68407 goto out_kfree;
68408 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68409 }
68410
68411 set_fs(KERNEL_DS);
68412 - result = vfs_write(file, buffer, str - buffer, &pos);
68413 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68414 set_fs(old_fs);
68415 if (result < 0)
68416 goto out_kfree;
68417 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68418 int i;
68419
68420 set_fs(KERNEL_DS);
68421 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68422 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68423 set_fs(old_fs);
68424 if (result < 0)
68425 goto out_kfree;
68426 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68427 }
68428
68429 set_fs(KERNEL_DS);
68430 - result = vfs_write(file, buffer, str - buffer, &pos);
68431 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68432 set_fs(old_fs);
68433 if (result < 0)
68434 goto out_kfree;
68435 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68436 int i;
68437
68438 set_fs(KERNEL_DS);
68439 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68440 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68441 set_fs(old_fs);
68442 if (result < 0)
68443 goto out;
68444 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68445 __le16 dnaddr;
68446
68447 set_fs(KERNEL_DS);
68448 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68449 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68450 set_fs(old_fs);
68451 if (result < 0)
68452 goto out;
68453 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68454 le16_to_cpu(dnaddr) & 0x3ff);
68455
68456 set_fs(KERNEL_DS);
68457 - result = vfs_write(file, buf, len, &pos);
68458 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68459 set_fs(old_fs);
68460 if (result < 0)
68461 goto out;
68462 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68463 index e660464..c8b9e67 100644
68464 --- a/kernel/taskstats.c
68465 +++ b/kernel/taskstats.c
68466 @@ -27,9 +27,12 @@
68467 #include <linux/cgroup.h>
68468 #include <linux/fs.h>
68469 #include <linux/file.h>
68470 +#include <linux/grsecurity.h>
68471 #include <net/genetlink.h>
68472 #include <linux/atomic.h>
68473
68474 +extern int gr_is_taskstats_denied(int pid);
68475 +
68476 /*
68477 * Maximum length of a cpumask that can be specified in
68478 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68479 @@ -556,6 +559,9 @@ err:
68480
68481 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68482 {
68483 + if (gr_is_taskstats_denied(current->pid))
68484 + return -EACCES;
68485 +
68486 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68487 return cmd_attr_register_cpumask(info);
68488 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68489 diff --git a/kernel/time.c b/kernel/time.c
68490 index ba744cf..267b7c5 100644
68491 --- a/kernel/time.c
68492 +++ b/kernel/time.c
68493 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68494 return error;
68495
68496 if (tz) {
68497 + /* we log in do_settimeofday called below, so don't log twice
68498 + */
68499 + if (!tv)
68500 + gr_log_timechange();
68501 +
68502 sys_tz = *tz;
68503 update_vsyscall_tz();
68504 if (firsttime) {
68505 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68506 index 8a538c5..def79d4 100644
68507 --- a/kernel/time/alarmtimer.c
68508 +++ b/kernel/time/alarmtimer.c
68509 @@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
68510 struct platform_device *pdev;
68511 int error = 0;
68512 int i;
68513 - struct k_clock alarm_clock = {
68514 + static struct k_clock alarm_clock = {
68515 .clock_getres = alarm_clock_getres,
68516 .clock_get = alarm_clock_get,
68517 .timer_create = alarm_timer_create,
68518 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68519 index f113755..ec24223 100644
68520 --- a/kernel/time/tick-broadcast.c
68521 +++ b/kernel/time/tick-broadcast.c
68522 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68523 * then clear the broadcast bit.
68524 */
68525 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68526 - int cpu = smp_processor_id();
68527 + cpu = smp_processor_id();
68528
68529 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68530 tick_broadcast_clear_oneshot(cpu);
68531 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68532 index d66b213..6947686 100644
68533 --- a/kernel/time/timekeeping.c
68534 +++ b/kernel/time/timekeeping.c
68535 @@ -14,6 +14,7 @@
68536 #include <linux/init.h>
68537 #include <linux/mm.h>
68538 #include <linux/sched.h>
68539 +#include <linux/grsecurity.h>
68540 #include <linux/syscore_ops.h>
68541 #include <linux/clocksource.h>
68542 #include <linux/jiffies.h>
68543 @@ -373,6 +374,8 @@ int do_settimeofday(const struct timespec *tv)
68544 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68545 return -EINVAL;
68546
68547 + gr_log_timechange();
68548 +
68549 write_seqlock_irqsave(&timekeeper.lock, flags);
68550
68551 timekeeping_forward_now();
68552 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68553 index 3258455..f35227d 100644
68554 --- a/kernel/time/timer_list.c
68555 +++ b/kernel/time/timer_list.c
68556 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68557
68558 static void print_name_offset(struct seq_file *m, void *sym)
68559 {
68560 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68561 + SEQ_printf(m, "<%p>", NULL);
68562 +#else
68563 char symname[KSYM_NAME_LEN];
68564
68565 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68566 SEQ_printf(m, "<%pK>", sym);
68567 else
68568 SEQ_printf(m, "%s", symname);
68569 +#endif
68570 }
68571
68572 static void
68573 @@ -112,7 +116,11 @@ next_one:
68574 static void
68575 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68576 {
68577 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68578 + SEQ_printf(m, " .base: %p\n", NULL);
68579 +#else
68580 SEQ_printf(m, " .base: %pK\n", base);
68581 +#endif
68582 SEQ_printf(m, " .index: %d\n",
68583 base->index);
68584 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68585 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68586 {
68587 struct proc_dir_entry *pe;
68588
68589 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68590 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68591 +#else
68592 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68593 +#endif
68594 if (!pe)
68595 return -ENOMEM;
68596 return 0;
68597 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68598 index 0b537f2..9e71eca 100644
68599 --- a/kernel/time/timer_stats.c
68600 +++ b/kernel/time/timer_stats.c
68601 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68602 static unsigned long nr_entries;
68603 static struct entry entries[MAX_ENTRIES];
68604
68605 -static atomic_t overflow_count;
68606 +static atomic_unchecked_t overflow_count;
68607
68608 /*
68609 * The entries are in a hash-table, for fast lookup:
68610 @@ -140,7 +140,7 @@ static void reset_entries(void)
68611 nr_entries = 0;
68612 memset(entries, 0, sizeof(entries));
68613 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68614 - atomic_set(&overflow_count, 0);
68615 + atomic_set_unchecked(&overflow_count, 0);
68616 }
68617
68618 static struct entry *alloc_entry(void)
68619 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68620 if (likely(entry))
68621 entry->count++;
68622 else
68623 - atomic_inc(&overflow_count);
68624 + atomic_inc_unchecked(&overflow_count);
68625
68626 out_unlock:
68627 raw_spin_unlock_irqrestore(lock, flags);
68628 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68629
68630 static void print_name_offset(struct seq_file *m, unsigned long addr)
68631 {
68632 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68633 + seq_printf(m, "<%p>", NULL);
68634 +#else
68635 char symname[KSYM_NAME_LEN];
68636
68637 if (lookup_symbol_name(addr, symname) < 0)
68638 seq_printf(m, "<%p>", (void *)addr);
68639 else
68640 seq_printf(m, "%s", symname);
68641 +#endif
68642 }
68643
68644 static int tstats_show(struct seq_file *m, void *v)
68645 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68646
68647 seq_puts(m, "Timer Stats Version: v0.2\n");
68648 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68649 - if (atomic_read(&overflow_count))
68650 + if (atomic_read_unchecked(&overflow_count))
68651 seq_printf(m, "Overflow: %d entries\n",
68652 - atomic_read(&overflow_count));
68653 + atomic_read_unchecked(&overflow_count));
68654
68655 for (i = 0; i < nr_entries; i++) {
68656 entry = entries + i;
68657 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68658 {
68659 struct proc_dir_entry *pe;
68660
68661 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68662 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68663 +#else
68664 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68665 +#endif
68666 if (!pe)
68667 return -ENOMEM;
68668 return 0;
68669 diff --git a/kernel/timer.c b/kernel/timer.c
68670 index a297ffc..5e16b0b 100644
68671 --- a/kernel/timer.c
68672 +++ b/kernel/timer.c
68673 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
68674 /*
68675 * This function runs timers and the timer-tq in bottom half context.
68676 */
68677 -static void run_timer_softirq(struct softirq_action *h)
68678 +static void run_timer_softirq(void)
68679 {
68680 struct tvec_base *base = __this_cpu_read(tvec_bases);
68681
68682 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68683 index c0bd030..62a1927 100644
68684 --- a/kernel/trace/blktrace.c
68685 +++ b/kernel/trace/blktrace.c
68686 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68687 struct blk_trace *bt = filp->private_data;
68688 char buf[16];
68689
68690 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68691 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68692
68693 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68694 }
68695 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68696 return 1;
68697
68698 bt = buf->chan->private_data;
68699 - atomic_inc(&bt->dropped);
68700 + atomic_inc_unchecked(&bt->dropped);
68701 return 0;
68702 }
68703
68704 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68705
68706 bt->dir = dir;
68707 bt->dev = dev;
68708 - atomic_set(&bt->dropped, 0);
68709 + atomic_set_unchecked(&bt->dropped, 0);
68710
68711 ret = -EIO;
68712 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68713 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68714 index 0fa92f6..89950b2 100644
68715 --- a/kernel/trace/ftrace.c
68716 +++ b/kernel/trace/ftrace.c
68717 @@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68718 if (unlikely(ftrace_disabled))
68719 return 0;
68720
68721 + ret = ftrace_arch_code_modify_prepare();
68722 + FTRACE_WARN_ON(ret);
68723 + if (ret)
68724 + return 0;
68725 +
68726 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68727 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68728 if (ret) {
68729 ftrace_bug(ret, ip);
68730 - return 0;
68731 }
68732 - return 1;
68733 + return ret ? 0 : 1;
68734 }
68735
68736 /*
68737 @@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68738
68739 int
68740 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68741 - void *data)
68742 + void *data)
68743 {
68744 struct ftrace_func_probe *entry;
68745 struct ftrace_page *pg;
68746 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68747 index 2a22255..cdcdd06 100644
68748 --- a/kernel/trace/trace.c
68749 +++ b/kernel/trace/trace.c
68750 @@ -4312,10 +4312,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68751 };
68752 #endif
68753
68754 -static struct dentry *d_tracer;
68755 -
68756 struct dentry *tracing_init_dentry(void)
68757 {
68758 + static struct dentry *d_tracer;
68759 static int once;
68760
68761 if (d_tracer)
68762 @@ -4335,10 +4334,9 @@ struct dentry *tracing_init_dentry(void)
68763 return d_tracer;
68764 }
68765
68766 -static struct dentry *d_percpu;
68767 -
68768 struct dentry *tracing_dentry_percpu(void)
68769 {
68770 + static struct dentry *d_percpu;
68771 static int once;
68772 struct dentry *d_tracer;
68773
68774 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68775 index 29111da..d190fe2 100644
68776 --- a/kernel/trace/trace_events.c
68777 +++ b/kernel/trace/trace_events.c
68778 @@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
68779 struct ftrace_module_file_ops {
68780 struct list_head list;
68781 struct module *mod;
68782 - struct file_operations id;
68783 - struct file_operations enable;
68784 - struct file_operations format;
68785 - struct file_operations filter;
68786 };
68787
68788 static struct ftrace_module_file_ops *
68789 @@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
68790
68791 file_ops->mod = mod;
68792
68793 - file_ops->id = ftrace_event_id_fops;
68794 - file_ops->id.owner = mod;
68795 -
68796 - file_ops->enable = ftrace_enable_fops;
68797 - file_ops->enable.owner = mod;
68798 -
68799 - file_ops->filter = ftrace_event_filter_fops;
68800 - file_ops->filter.owner = mod;
68801 -
68802 - file_ops->format = ftrace_event_format_fops;
68803 - file_ops->format.owner = mod;
68804 + pax_open_kernel();
68805 + *(void **)&mod->trace_id.owner = mod;
68806 + *(void **)&mod->trace_enable.owner = mod;
68807 + *(void **)&mod->trace_filter.owner = mod;
68808 + *(void **)&mod->trace_format.owner = mod;
68809 + pax_close_kernel();
68810
68811 list_add(&file_ops->list, &ftrace_module_file_list);
68812
68813 @@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
68814
68815 for_each_event(call, start, end) {
68816 __trace_add_event_call(*call, mod,
68817 - &file_ops->id, &file_ops->enable,
68818 - &file_ops->filter, &file_ops->format);
68819 + &mod->trace_id, &mod->trace_enable,
68820 + &mod->trace_filter, &mod->trace_format);
68821 }
68822 }
68823
68824 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
68825 index 580a05e..9b31acb 100644
68826 --- a/kernel/trace/trace_kprobe.c
68827 +++ b/kernel/trace/trace_kprobe.c
68828 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68829 long ret;
68830 int maxlen = get_rloc_len(*(u32 *)dest);
68831 u8 *dst = get_rloc_data(dest);
68832 - u8 *src = addr;
68833 + const u8 __user *src = (const u8 __force_user *)addr;
68834 mm_segment_t old_fs = get_fs();
68835 if (!maxlen)
68836 return;
68837 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68838 pagefault_disable();
68839 do
68840 ret = __copy_from_user_inatomic(dst++, src++, 1);
68841 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
68842 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
68843 dst[-1] = '\0';
68844 pagefault_enable();
68845 set_fs(old_fs);
68846 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68847 ((u8 *)get_rloc_data(dest))[0] = '\0';
68848 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
68849 } else
68850 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
68851 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
68852 get_rloc_offs(*(u32 *)dest));
68853 }
68854 /* Return the length of string -- including null terminal byte */
68855 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
68856 set_fs(KERNEL_DS);
68857 pagefault_disable();
68858 do {
68859 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
68860 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
68861 len++;
68862 } while (c && ret == 0 && len < MAX_STRING_SIZE);
68863 pagefault_enable();
68864 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
68865 index fd3c8aa..5f324a6 100644
68866 --- a/kernel/trace/trace_mmiotrace.c
68867 +++ b/kernel/trace/trace_mmiotrace.c
68868 @@ -24,7 +24,7 @@ struct header_iter {
68869 static struct trace_array *mmio_trace_array;
68870 static bool overrun_detected;
68871 static unsigned long prev_overruns;
68872 -static atomic_t dropped_count;
68873 +static atomic_unchecked_t dropped_count;
68874
68875 static void mmio_reset_data(struct trace_array *tr)
68876 {
68877 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
68878
68879 static unsigned long count_overruns(struct trace_iterator *iter)
68880 {
68881 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
68882 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
68883 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
68884
68885 if (over > prev_overruns)
68886 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
68887 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
68888 sizeof(*entry), 0, pc);
68889 if (!event) {
68890 - atomic_inc(&dropped_count);
68891 + atomic_inc_unchecked(&dropped_count);
68892 return;
68893 }
68894 entry = ring_buffer_event_data(event);
68895 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
68896 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
68897 sizeof(*entry), 0, pc);
68898 if (!event) {
68899 - atomic_inc(&dropped_count);
68900 + atomic_inc_unchecked(&dropped_count);
68901 return;
68902 }
68903 entry = ring_buffer_event_data(event);
68904 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
68905 index df611a0..10d8b32 100644
68906 --- a/kernel/trace/trace_output.c
68907 +++ b/kernel/trace/trace_output.c
68908 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
68909
68910 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
68911 if (!IS_ERR(p)) {
68912 - p = mangle_path(s->buffer + s->len, p, "\n");
68913 + p = mangle_path(s->buffer + s->len, p, "\n\\");
68914 if (p) {
68915 s->len = p - s->buffer;
68916 return 1;
68917 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
68918 index d4545f4..a9010a1 100644
68919 --- a/kernel/trace/trace_stack.c
68920 +++ b/kernel/trace/trace_stack.c
68921 @@ -53,7 +53,7 @@ static inline void check_stack(void)
68922 return;
68923
68924 /* we do not handle interrupt stacks yet */
68925 - if (!object_is_on_stack(&this_size))
68926 + if (!object_starts_on_stack(&this_size))
68927 return;
68928
68929 local_irq_save(flags);
68930 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
68931 index 209b379..7f76423 100644
68932 --- a/kernel/trace/trace_workqueue.c
68933 +++ b/kernel/trace/trace_workqueue.c
68934 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
68935 int cpu;
68936 pid_t pid;
68937 /* Can be inserted from interrupt or user context, need to be atomic */
68938 - atomic_t inserted;
68939 + atomic_unchecked_t inserted;
68940 /*
68941 * Don't need to be atomic, works are serialized in a single workqueue thread
68942 * on a single CPU.
68943 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
68944 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
68945 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
68946 if (node->pid == wq_thread->pid) {
68947 - atomic_inc(&node->inserted);
68948 + atomic_inc_unchecked(&node->inserted);
68949 goto found;
68950 }
68951 }
68952 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
68953 tsk = get_pid_task(pid, PIDTYPE_PID);
68954 if (tsk) {
68955 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
68956 - atomic_read(&cws->inserted), cws->executed,
68957 + atomic_read_unchecked(&cws->inserted), cws->executed,
68958 tsk->comm);
68959 put_task_struct(tsk);
68960 }
68961 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
68962 index 6777153..8519f60 100644
68963 --- a/lib/Kconfig.debug
68964 +++ b/lib/Kconfig.debug
68965 @@ -1132,6 +1132,7 @@ config LATENCYTOP
68966 depends on DEBUG_KERNEL
68967 depends on STACKTRACE_SUPPORT
68968 depends on PROC_FS
68969 + depends on !GRKERNSEC_HIDESYM
68970 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
68971 select KALLSYMS
68972 select KALLSYMS_ALL
68973 diff --git a/lib/bitmap.c b/lib/bitmap.c
68974 index b5a8b6a..a69623c 100644
68975 --- a/lib/bitmap.c
68976 +++ b/lib/bitmap.c
68977 @@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
68978 {
68979 int c, old_c, totaldigits, ndigits, nchunks, nbits;
68980 u32 chunk;
68981 - const char __user __force *ubuf = (const char __user __force *)buf;
68982 + const char __user *ubuf = (const char __force_user *)buf;
68983
68984 bitmap_zero(maskp, nmaskbits);
68985
68986 @@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user *ubuf,
68987 {
68988 if (!access_ok(VERIFY_READ, ubuf, ulen))
68989 return -EFAULT;
68990 - return __bitmap_parse((const char __force *)ubuf,
68991 + return __bitmap_parse((const char __force_kernel *)ubuf,
68992 ulen, 1, maskp, nmaskbits);
68993
68994 }
68995 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
68996 {
68997 unsigned a, b;
68998 int c, old_c, totaldigits;
68999 - const char __user __force *ubuf = (const char __user __force *)buf;
69000 + const char __user *ubuf = (const char __force_user *)buf;
69001 int exp_digit, in_range;
69002
69003 totaldigits = c = 0;
69004 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69005 {
69006 if (!access_ok(VERIFY_READ, ubuf, ulen))
69007 return -EFAULT;
69008 - return __bitmap_parselist((const char __force *)ubuf,
69009 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69010 ulen, 1, maskp, nmaskbits);
69011 }
69012 EXPORT_SYMBOL(bitmap_parselist_user);
69013 diff --git a/lib/bug.c b/lib/bug.c
69014 index a28c141..2bd3d95 100644
69015 --- a/lib/bug.c
69016 +++ b/lib/bug.c
69017 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69018 return BUG_TRAP_TYPE_NONE;
69019
69020 bug = find_bug(bugaddr);
69021 + if (!bug)
69022 + return BUG_TRAP_TYPE_NONE;
69023
69024 file = NULL;
69025 line = 0;
69026 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69027 index 0ab9ae8..f01ceca 100644
69028 --- a/lib/debugobjects.c
69029 +++ b/lib/debugobjects.c
69030 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69031 if (limit > 4)
69032 return;
69033
69034 - is_on_stack = object_is_on_stack(addr);
69035 + is_on_stack = object_starts_on_stack(addr);
69036 if (is_on_stack == onstack)
69037 return;
69038
69039 diff --git a/lib/devres.c b/lib/devres.c
69040 index 80b9c76..9e32279 100644
69041 --- a/lib/devres.c
69042 +++ b/lib/devres.c
69043 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69044 void devm_iounmap(struct device *dev, void __iomem *addr)
69045 {
69046 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69047 - (void *)addr));
69048 + (void __force *)addr));
69049 iounmap(addr);
69050 }
69051 EXPORT_SYMBOL(devm_iounmap);
69052 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69053 {
69054 ioport_unmap(addr);
69055 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69056 - devm_ioport_map_match, (void *)addr));
69057 + devm_ioport_map_match, (void __force *)addr));
69058 }
69059 EXPORT_SYMBOL(devm_ioport_unmap);
69060
69061 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69062 index 13ef233..5241683 100644
69063 --- a/lib/dma-debug.c
69064 +++ b/lib/dma-debug.c
69065 @@ -924,7 +924,7 @@ out:
69066
69067 static void check_for_stack(struct device *dev, void *addr)
69068 {
69069 - if (object_is_on_stack(addr))
69070 + if (object_starts_on_stack(addr))
69071 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69072 "stack [addr=%p]\n", addr);
69073 }
69074 diff --git a/lib/extable.c b/lib/extable.c
69075 index 4cac81e..63e9b8f 100644
69076 --- a/lib/extable.c
69077 +++ b/lib/extable.c
69078 @@ -13,6 +13,7 @@
69079 #include <linux/init.h>
69080 #include <linux/sort.h>
69081 #include <asm/uaccess.h>
69082 +#include <asm/pgtable.h>
69083
69084 #ifndef ARCH_HAS_SORT_EXTABLE
69085 /*
69086 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69087 void sort_extable(struct exception_table_entry *start,
69088 struct exception_table_entry *finish)
69089 {
69090 + pax_open_kernel();
69091 sort(start, finish - start, sizeof(struct exception_table_entry),
69092 cmp_ex, NULL);
69093 + pax_close_kernel();
69094 }
69095
69096 #ifdef CONFIG_MODULES
69097 diff --git a/lib/inflate.c b/lib/inflate.c
69098 index 013a761..c28f3fc 100644
69099 --- a/lib/inflate.c
69100 +++ b/lib/inflate.c
69101 @@ -269,7 +269,7 @@ static void free(void *where)
69102 malloc_ptr = free_mem_ptr;
69103 }
69104 #else
69105 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69106 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69107 #define free(a) kfree(a)
69108 #endif
69109
69110 diff --git a/lib/ioremap.c b/lib/ioremap.c
69111 index 0c9216c..863bd89 100644
69112 --- a/lib/ioremap.c
69113 +++ b/lib/ioremap.c
69114 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
69115 unsigned long next;
69116
69117 phys_addr -= addr;
69118 - pmd = pmd_alloc(&init_mm, pud, addr);
69119 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
69120 if (!pmd)
69121 return -ENOMEM;
69122 do {
69123 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
69124 unsigned long next;
69125
69126 phys_addr -= addr;
69127 - pud = pud_alloc(&init_mm, pgd, addr);
69128 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
69129 if (!pud)
69130 return -ENOMEM;
69131 do {
69132 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69133 index bd2bea9..6b3c95e 100644
69134 --- a/lib/is_single_threaded.c
69135 +++ b/lib/is_single_threaded.c
69136 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69137 struct task_struct *p, *t;
69138 bool ret;
69139
69140 + if (!mm)
69141 + return true;
69142 +
69143 if (atomic_read(&task->signal->live) != 1)
69144 return false;
69145
69146 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69147 index 86516f5..73395ca 100644
69148 --- a/lib/radix-tree.c
69149 +++ b/lib/radix-tree.c
69150 @@ -79,7 +79,7 @@ struct radix_tree_preload {
69151 int nr;
69152 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69153 };
69154 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69155 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69156
69157 static inline void *ptr_to_indirect(void *ptr)
69158 {
69159 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69160 index abbabec..362988d 100644
69161 --- a/lib/vsprintf.c
69162 +++ b/lib/vsprintf.c
69163 @@ -16,6 +16,9 @@
69164 * - scnprintf and vscnprintf
69165 */
69166
69167 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69168 +#define __INCLUDED_BY_HIDESYM 1
69169 +#endif
69170 #include <stdarg.h>
69171 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
69172 #include <linux/types.h>
69173 @@ -433,7 +436,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69174 char sym[KSYM_SYMBOL_LEN];
69175 if (ext == 'B')
69176 sprint_backtrace(sym, value);
69177 - else if (ext != 'f' && ext != 's')
69178 + else if (ext != 'f' && ext != 's' && ext != 'a')
69179 sprint_symbol(sym, value);
69180 else
69181 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69182 @@ -809,7 +812,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
69183 return number(buf, end, *(const netdev_features_t *)addr, spec);
69184 }
69185
69186 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69187 +int kptr_restrict __read_mostly = 2;
69188 +#else
69189 int kptr_restrict __read_mostly;
69190 +#endif
69191
69192 /*
69193 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69194 @@ -823,6 +830,8 @@ int kptr_restrict __read_mostly;
69195 * - 'S' For symbolic direct pointers with offset
69196 * - 's' For symbolic direct pointers without offset
69197 * - 'B' For backtraced symbolic direct pointers with offset
69198 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69199 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69200 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69201 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69202 * - 'M' For a 6-byte MAC address, it prints the address in the
69203 @@ -868,12 +877,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69204 {
69205 if (!ptr && *fmt != 'K') {
69206 /*
69207 - * Print (null) with the same width as a pointer so it makes
69208 + * Print (nil) with the same width as a pointer so it makes
69209 * tabular output look nice.
69210 */
69211 if (spec.field_width == -1)
69212 spec.field_width = 2 * sizeof(void *);
69213 - return string(buf, end, "(null)", spec);
69214 + return string(buf, end, "(nil)", spec);
69215 }
69216
69217 switch (*fmt) {
69218 @@ -883,6 +892,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69219 /* Fallthrough */
69220 case 'S':
69221 case 's':
69222 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69223 + break;
69224 +#else
69225 + return symbol_string(buf, end, ptr, spec, *fmt);
69226 +#endif
69227 + case 'A':
69228 + case 'a':
69229 case 'B':
69230 return symbol_string(buf, end, ptr, spec, *fmt);
69231 case 'R':
69232 @@ -1653,11 +1669,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69233 typeof(type) value; \
69234 if (sizeof(type) == 8) { \
69235 args = PTR_ALIGN(args, sizeof(u32)); \
69236 - *(u32 *)&value = *(u32 *)args; \
69237 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69238 + *(u32 *)&value = *(const u32 *)args; \
69239 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69240 } else { \
69241 args = PTR_ALIGN(args, sizeof(type)); \
69242 - value = *(typeof(type) *)args; \
69243 + value = *(const typeof(type) *)args; \
69244 } \
69245 args += sizeof(type); \
69246 value; \
69247 @@ -1720,7 +1736,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69248 case FORMAT_TYPE_STR: {
69249 const char *str_arg = args;
69250 args += strlen(str_arg) + 1;
69251 - str = string(str, end, (char *)str_arg, spec);
69252 + str = string(str, end, str_arg, spec);
69253 break;
69254 }
69255
69256 diff --git a/localversion-grsec b/localversion-grsec
69257 new file mode 100644
69258 index 0000000..7cd6065
69259 --- /dev/null
69260 +++ b/localversion-grsec
69261 @@ -0,0 +1 @@
69262 +-grsec
69263 diff --git a/mm/Kconfig b/mm/Kconfig
69264 index e338407..4210331 100644
69265 --- a/mm/Kconfig
69266 +++ b/mm/Kconfig
69267 @@ -247,10 +247,10 @@ config KSM
69268 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69269
69270 config DEFAULT_MMAP_MIN_ADDR
69271 - int "Low address space to protect from user allocation"
69272 + int "Low address space to protect from user allocation"
69273 depends on MMU
69274 - default 4096
69275 - help
69276 + default 65536
69277 + help
69278 This is the portion of low virtual memory which should be protected
69279 from userspace allocation. Keeping a user from writing to low pages
69280 can help reduce the impact of kernel NULL pointer bugs.
69281 @@ -280,7 +280,7 @@ config MEMORY_FAILURE
69282
69283 config HWPOISON_INJECT
69284 tristate "HWPoison pages injector"
69285 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
69286 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
69287 select PROC_PAGE_MONITOR
69288
69289 config NOMMU_INITIAL_TRIM_EXCESS
69290 diff --git a/mm/filemap.c b/mm/filemap.c
69291 index 79c4b2b..596b417 100644
69292 --- a/mm/filemap.c
69293 +++ b/mm/filemap.c
69294 @@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69295 struct address_space *mapping = file->f_mapping;
69296
69297 if (!mapping->a_ops->readpage)
69298 - return -ENOEXEC;
69299 + return -ENODEV;
69300 file_accessed(file);
69301 vma->vm_ops = &generic_file_vm_ops;
69302 vma->vm_flags |= VM_CAN_NONLINEAR;
69303 @@ -2168,6 +2168,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69304 *pos = i_size_read(inode);
69305
69306 if (limit != RLIM_INFINITY) {
69307 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69308 if (*pos >= limit) {
69309 send_sig(SIGXFSZ, current, 0);
69310 return -EFBIG;
69311 diff --git a/mm/fremap.c b/mm/fremap.c
69312 index 9ed4fd4..c42648d 100644
69313 --- a/mm/fremap.c
69314 +++ b/mm/fremap.c
69315 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69316 retry:
69317 vma = find_vma(mm, start);
69318
69319 +#ifdef CONFIG_PAX_SEGMEXEC
69320 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69321 + goto out;
69322 +#endif
69323 +
69324 /*
69325 * Make sure the vma is shared, that it supports prefaulting,
69326 * and that the remapped range is valid and fully within
69327 diff --git a/mm/highmem.c b/mm/highmem.c
69328 index 57d82c6..e9e0552 100644
69329 --- a/mm/highmem.c
69330 +++ b/mm/highmem.c
69331 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69332 * So no dangers, even with speculative execution.
69333 */
69334 page = pte_page(pkmap_page_table[i]);
69335 + pax_open_kernel();
69336 pte_clear(&init_mm, (unsigned long)page_address(page),
69337 &pkmap_page_table[i]);
69338 -
69339 + pax_close_kernel();
69340 set_page_address(page, NULL);
69341 need_flush = 1;
69342 }
69343 @@ -186,9 +187,11 @@ start:
69344 }
69345 }
69346 vaddr = PKMAP_ADDR(last_pkmap_nr);
69347 +
69348 + pax_open_kernel();
69349 set_pte_at(&init_mm, vaddr,
69350 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69351 -
69352 + pax_close_kernel();
69353 pkmap_count[last_pkmap_nr] = 1;
69354 set_page_address(page, (void *)vaddr);
69355
69356 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69357 index f0e5306..cb9398e 100644
69358 --- a/mm/huge_memory.c
69359 +++ b/mm/huge_memory.c
69360 @@ -733,7 +733,7 @@ out:
69361 * run pte_offset_map on the pmd, if an huge pmd could
69362 * materialize from under us from a different thread.
69363 */
69364 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69365 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69366 return VM_FAULT_OOM;
69367 /* if an huge pmd materialized from under us just retry later */
69368 if (unlikely(pmd_trans_huge(*pmd)))
69369 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69370 index ae8f708..3f36aec 100644
69371 --- a/mm/hugetlb.c
69372 +++ b/mm/hugetlb.c
69373 @@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
69374 kref_get(&reservations->refs);
69375 }
69376
69377 +static void resv_map_put(struct vm_area_struct *vma)
69378 +{
69379 + struct resv_map *reservations = vma_resv_map(vma);
69380 +
69381 + if (!reservations)
69382 + return;
69383 + kref_put(&reservations->refs, resv_map_release);
69384 +}
69385 +
69386 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
69387 {
69388 struct hstate *h = hstate_vma(vma);
69389 @@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
69390 reserve = (end - start) -
69391 region_count(&reservations->regions, start, end);
69392
69393 - kref_put(&reservations->refs, resv_map_release);
69394 + resv_map_put(vma);
69395
69396 if (reserve) {
69397 hugetlb_acct_memory(h, -reserve);
69398 @@ -2437,6 +2446,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69399 return 1;
69400 }
69401
69402 +#ifdef CONFIG_PAX_SEGMEXEC
69403 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69404 +{
69405 + struct mm_struct *mm = vma->vm_mm;
69406 + struct vm_area_struct *vma_m;
69407 + unsigned long address_m;
69408 + pte_t *ptep_m;
69409 +
69410 + vma_m = pax_find_mirror_vma(vma);
69411 + if (!vma_m)
69412 + return;
69413 +
69414 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69415 + address_m = address + SEGMEXEC_TASK_SIZE;
69416 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69417 + get_page(page_m);
69418 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
69419 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69420 +}
69421 +#endif
69422 +
69423 /*
69424 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69425 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
69426 @@ -2549,6 +2579,11 @@ retry_avoidcopy:
69427 make_huge_pte(vma, new_page, 1));
69428 page_remove_rmap(old_page);
69429 hugepage_add_new_anon_rmap(new_page, vma, address);
69430 +
69431 +#ifdef CONFIG_PAX_SEGMEXEC
69432 + pax_mirror_huge_pte(vma, address, new_page);
69433 +#endif
69434 +
69435 /* Make the old page be freed below */
69436 new_page = old_page;
69437 mmu_notifier_invalidate_range_end(mm,
69438 @@ -2703,6 +2738,10 @@ retry:
69439 && (vma->vm_flags & VM_SHARED)));
69440 set_huge_pte_at(mm, address, ptep, new_pte);
69441
69442 +#ifdef CONFIG_PAX_SEGMEXEC
69443 + pax_mirror_huge_pte(vma, address, page);
69444 +#endif
69445 +
69446 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69447 /* Optimization, do the COW without a second fault */
69448 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69449 @@ -2732,6 +2771,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69450 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69451 struct hstate *h = hstate_vma(vma);
69452
69453 +#ifdef CONFIG_PAX_SEGMEXEC
69454 + struct vm_area_struct *vma_m;
69455 +#endif
69456 +
69457 address &= huge_page_mask(h);
69458
69459 ptep = huge_pte_offset(mm, address);
69460 @@ -2745,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69461 VM_FAULT_SET_HINDEX(h - hstates);
69462 }
69463
69464 +#ifdef CONFIG_PAX_SEGMEXEC
69465 + vma_m = pax_find_mirror_vma(vma);
69466 + if (vma_m) {
69467 + unsigned long address_m;
69468 +
69469 + if (vma->vm_start > vma_m->vm_start) {
69470 + address_m = address;
69471 + address -= SEGMEXEC_TASK_SIZE;
69472 + vma = vma_m;
69473 + h = hstate_vma(vma);
69474 + } else
69475 + address_m = address + SEGMEXEC_TASK_SIZE;
69476 +
69477 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69478 + return VM_FAULT_OOM;
69479 + address_m &= HPAGE_MASK;
69480 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69481 + }
69482 +#endif
69483 +
69484 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69485 if (!ptep)
69486 return VM_FAULT_OOM;
69487 @@ -2990,12 +3053,16 @@ int hugetlb_reserve_pages(struct inode *inode,
69488 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
69489 }
69490
69491 - if (chg < 0)
69492 - return chg;
69493 + if (chg < 0) {
69494 + ret = chg;
69495 + goto out_err;
69496 + }
69497
69498 /* There must be enough pages in the subpool for the mapping */
69499 - if (hugepage_subpool_get_pages(spool, chg))
69500 - return -ENOSPC;
69501 + if (hugepage_subpool_get_pages(spool, chg)) {
69502 + ret = -ENOSPC;
69503 + goto out_err;
69504 + }
69505
69506 /*
69507 * Check enough hugepages are available for the reservation.
69508 @@ -3004,7 +3071,7 @@ int hugetlb_reserve_pages(struct inode *inode,
69509 ret = hugetlb_acct_memory(h, chg);
69510 if (ret < 0) {
69511 hugepage_subpool_put_pages(spool, chg);
69512 - return ret;
69513 + goto out_err;
69514 }
69515
69516 /*
69517 @@ -3021,6 +3088,10 @@ int hugetlb_reserve_pages(struct inode *inode,
69518 if (!vma || vma->vm_flags & VM_MAYSHARE)
69519 region_add(&inode->i_mapping->private_list, from, to);
69520 return 0;
69521 +out_err:
69522 + if (vma)
69523 + resv_map_put(vma);
69524 + return ret;
69525 }
69526
69527 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
69528 diff --git a/mm/internal.h b/mm/internal.h
69529 index 2189af4..f2ca332 100644
69530 --- a/mm/internal.h
69531 +++ b/mm/internal.h
69532 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69533 * in mm/page_alloc.c
69534 */
69535 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69536 +extern void free_compound_page(struct page *page);
69537 extern void prep_compound_page(struct page *page, unsigned long order);
69538 #ifdef CONFIG_MEMORY_FAILURE
69539 extern bool is_free_buddy_page(struct page *page);
69540 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69541 index 45eb621..6ccd8ea 100644
69542 --- a/mm/kmemleak.c
69543 +++ b/mm/kmemleak.c
69544 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
69545
69546 for (i = 0; i < object->trace_len; i++) {
69547 void *ptr = (void *)object->trace[i];
69548 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69549 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69550 }
69551 }
69552
69553 diff --git a/mm/maccess.c b/mm/maccess.c
69554 index d53adf9..03a24bf 100644
69555 --- a/mm/maccess.c
69556 +++ b/mm/maccess.c
69557 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69558 set_fs(KERNEL_DS);
69559 pagefault_disable();
69560 ret = __copy_from_user_inatomic(dst,
69561 - (__force const void __user *)src, size);
69562 + (const void __force_user *)src, size);
69563 pagefault_enable();
69564 set_fs(old_fs);
69565
69566 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69567
69568 set_fs(KERNEL_DS);
69569 pagefault_disable();
69570 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69571 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69572 pagefault_enable();
69573 set_fs(old_fs);
69574
69575 diff --git a/mm/madvise.c b/mm/madvise.c
69576 index 1ccbba5..79e16f9 100644
69577 --- a/mm/madvise.c
69578 +++ b/mm/madvise.c
69579 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69580 pgoff_t pgoff;
69581 unsigned long new_flags = vma->vm_flags;
69582
69583 +#ifdef CONFIG_PAX_SEGMEXEC
69584 + struct vm_area_struct *vma_m;
69585 +#endif
69586 +
69587 switch (behavior) {
69588 case MADV_NORMAL:
69589 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69590 @@ -116,6 +120,13 @@ success:
69591 /*
69592 * vm_flags is protected by the mmap_sem held in write mode.
69593 */
69594 +
69595 +#ifdef CONFIG_PAX_SEGMEXEC
69596 + vma_m = pax_find_mirror_vma(vma);
69597 + if (vma_m)
69598 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69599 +#endif
69600 +
69601 vma->vm_flags = new_flags;
69602
69603 out:
69604 @@ -174,6 +185,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69605 struct vm_area_struct ** prev,
69606 unsigned long start, unsigned long end)
69607 {
69608 +
69609 +#ifdef CONFIG_PAX_SEGMEXEC
69610 + struct vm_area_struct *vma_m;
69611 +#endif
69612 +
69613 *prev = vma;
69614 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69615 return -EINVAL;
69616 @@ -186,6 +202,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69617 zap_page_range(vma, start, end - start, &details);
69618 } else
69619 zap_page_range(vma, start, end - start, NULL);
69620 +
69621 +#ifdef CONFIG_PAX_SEGMEXEC
69622 + vma_m = pax_find_mirror_vma(vma);
69623 + if (vma_m) {
69624 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69625 + struct zap_details details = {
69626 + .nonlinear_vma = vma_m,
69627 + .last_index = ULONG_MAX,
69628 + };
69629 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69630 + } else
69631 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69632 + }
69633 +#endif
69634 +
69635 return 0;
69636 }
69637
69638 @@ -384,6 +415,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69639 if (end < start)
69640 goto out;
69641
69642 +#ifdef CONFIG_PAX_SEGMEXEC
69643 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69644 + if (end > SEGMEXEC_TASK_SIZE)
69645 + goto out;
69646 + } else
69647 +#endif
69648 +
69649 + if (end > TASK_SIZE)
69650 + goto out;
69651 +
69652 error = 0;
69653 if (end == start)
69654 goto out;
69655 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69656 index 97cc273..6ed703f 100644
69657 --- a/mm/memory-failure.c
69658 +++ b/mm/memory-failure.c
69659 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69660
69661 int sysctl_memory_failure_recovery __read_mostly = 1;
69662
69663 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69664 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69665
69666 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69667
69668 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
69669 pfn, t->comm, t->pid);
69670 si.si_signo = SIGBUS;
69671 si.si_errno = 0;
69672 - si.si_addr = (void *)addr;
69673 + si.si_addr = (void __user *)addr;
69674 #ifdef __ARCH_SI_TRAPNO
69675 si.si_trapno = trapno;
69676 #endif
69677 @@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69678 }
69679
69680 nr_pages = 1 << compound_trans_order(hpage);
69681 - atomic_long_add(nr_pages, &mce_bad_pages);
69682 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69683
69684 /*
69685 * We need/can do nothing about count=0 pages.
69686 @@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69687 if (!PageHWPoison(hpage)
69688 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69689 || (p != hpage && TestSetPageHWPoison(hpage))) {
69690 - atomic_long_sub(nr_pages, &mce_bad_pages);
69691 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69692 return 0;
69693 }
69694 set_page_hwpoison_huge_page(hpage);
69695 @@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
69696 }
69697 if (hwpoison_filter(p)) {
69698 if (TestClearPageHWPoison(p))
69699 - atomic_long_sub(nr_pages, &mce_bad_pages);
69700 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69701 unlock_page(hpage);
69702 put_page(hpage);
69703 return 0;
69704 @@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
69705 return 0;
69706 }
69707 if (TestClearPageHWPoison(p))
69708 - atomic_long_sub(nr_pages, &mce_bad_pages);
69709 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69710 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69711 return 0;
69712 }
69713 @@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
69714 */
69715 if (TestClearPageHWPoison(page)) {
69716 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69717 - atomic_long_sub(nr_pages, &mce_bad_pages);
69718 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69719 freeit = 1;
69720 if (PageHuge(page))
69721 clear_page_hwpoison_huge_page(page);
69722 @@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69723 }
69724 done:
69725 if (!PageHWPoison(hpage))
69726 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69727 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69728 set_page_hwpoison_huge_page(hpage);
69729 dequeue_hwpoisoned_huge_page(hpage);
69730 /* keep elevated page count for bad page */
69731 @@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page, int flags)
69732 return ret;
69733
69734 done:
69735 - atomic_long_add(1, &mce_bad_pages);
69736 + atomic_long_add_unchecked(1, &mce_bad_pages);
69737 SetPageHWPoison(page);
69738 /* keep elevated page count for bad page */
69739 return ret;
69740 diff --git a/mm/memory.c b/mm/memory.c
69741 index 6105f47..3363489 100644
69742 --- a/mm/memory.c
69743 +++ b/mm/memory.c
69744 @@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69745 return;
69746
69747 pmd = pmd_offset(pud, start);
69748 +
69749 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69750 pud_clear(pud);
69751 pmd_free_tlb(tlb, pmd, start);
69752 +#endif
69753 +
69754 }
69755
69756 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69757 @@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69758 if (end - 1 > ceiling - 1)
69759 return;
69760
69761 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69762 pud = pud_offset(pgd, start);
69763 pgd_clear(pgd);
69764 pud_free_tlb(tlb, pud, start);
69765 +#endif
69766 +
69767 }
69768
69769 /*
69770 @@ -1597,12 +1604,6 @@ no_page_table:
69771 return page;
69772 }
69773
69774 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69775 -{
69776 - return stack_guard_page_start(vma, addr) ||
69777 - stack_guard_page_end(vma, addr+PAGE_SIZE);
69778 -}
69779 -
69780 /**
69781 * __get_user_pages() - pin user pages in memory
69782 * @tsk: task_struct of target task
69783 @@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69784 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69785 i = 0;
69786
69787 - do {
69788 + while (nr_pages) {
69789 struct vm_area_struct *vma;
69790
69791 - vma = find_extend_vma(mm, start);
69792 + vma = find_vma(mm, start);
69793 if (!vma && in_gate_area(mm, start)) {
69794 unsigned long pg = start & PAGE_MASK;
69795 pgd_t *pgd;
69796 @@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69797 goto next_page;
69798 }
69799
69800 - if (!vma ||
69801 + if (!vma || start < vma->vm_start ||
69802 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69803 !(vm_flags & vma->vm_flags))
69804 return i ? : -EFAULT;
69805 @@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69806 int ret;
69807 unsigned int fault_flags = 0;
69808
69809 - /* For mlock, just skip the stack guard page. */
69810 - if (foll_flags & FOLL_MLOCK) {
69811 - if (stack_guard_page(vma, start))
69812 - goto next_page;
69813 - }
69814 if (foll_flags & FOLL_WRITE)
69815 fault_flags |= FAULT_FLAG_WRITE;
69816 if (nonblocking)
69817 @@ -1831,7 +1827,7 @@ next_page:
69818 start += PAGE_SIZE;
69819 nr_pages--;
69820 } while (nr_pages && start < vma->vm_end);
69821 - } while (nr_pages);
69822 + }
69823 return i;
69824 }
69825 EXPORT_SYMBOL(__get_user_pages);
69826 @@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69827 page_add_file_rmap(page);
69828 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69829
69830 +#ifdef CONFIG_PAX_SEGMEXEC
69831 + pax_mirror_file_pte(vma, addr, page, ptl);
69832 +#endif
69833 +
69834 retval = 0;
69835 pte_unmap_unlock(pte, ptl);
69836 return retval;
69837 @@ -2072,10 +2072,22 @@ out:
69838 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69839 struct page *page)
69840 {
69841 +
69842 +#ifdef CONFIG_PAX_SEGMEXEC
69843 + struct vm_area_struct *vma_m;
69844 +#endif
69845 +
69846 if (addr < vma->vm_start || addr >= vma->vm_end)
69847 return -EFAULT;
69848 if (!page_count(page))
69849 return -EINVAL;
69850 +
69851 +#ifdef CONFIG_PAX_SEGMEXEC
69852 + vma_m = pax_find_mirror_vma(vma);
69853 + if (vma_m)
69854 + vma_m->vm_flags |= VM_INSERTPAGE;
69855 +#endif
69856 +
69857 vma->vm_flags |= VM_INSERTPAGE;
69858 return insert_page(vma, addr, page, vma->vm_page_prot);
69859 }
69860 @@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
69861 unsigned long pfn)
69862 {
69863 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
69864 + BUG_ON(vma->vm_mirror);
69865
69866 if (addr < vma->vm_start || addr >= vma->vm_end)
69867 return -EFAULT;
69868 @@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
69869
69870 BUG_ON(pud_huge(*pud));
69871
69872 - pmd = pmd_alloc(mm, pud, addr);
69873 + pmd = (mm == &init_mm) ?
69874 + pmd_alloc_kernel(mm, pud, addr) :
69875 + pmd_alloc(mm, pud, addr);
69876 if (!pmd)
69877 return -ENOMEM;
69878 do {
69879 @@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
69880 unsigned long next;
69881 int err;
69882
69883 - pud = pud_alloc(mm, pgd, addr);
69884 + pud = (mm == &init_mm) ?
69885 + pud_alloc_kernel(mm, pgd, addr) :
69886 + pud_alloc(mm, pgd, addr);
69887 if (!pud)
69888 return -ENOMEM;
69889 do {
69890 @@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
69891 copy_user_highpage(dst, src, va, vma);
69892 }
69893
69894 +#ifdef CONFIG_PAX_SEGMEXEC
69895 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
69896 +{
69897 + struct mm_struct *mm = vma->vm_mm;
69898 + spinlock_t *ptl;
69899 + pte_t *pte, entry;
69900 +
69901 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
69902 + entry = *pte;
69903 + if (!pte_present(entry)) {
69904 + if (!pte_none(entry)) {
69905 + BUG_ON(pte_file(entry));
69906 + free_swap_and_cache(pte_to_swp_entry(entry));
69907 + pte_clear_not_present_full(mm, address, pte, 0);
69908 + }
69909 + } else {
69910 + struct page *page;
69911 +
69912 + flush_cache_page(vma, address, pte_pfn(entry));
69913 + entry = ptep_clear_flush(vma, address, pte);
69914 + BUG_ON(pte_dirty(entry));
69915 + page = vm_normal_page(vma, address, entry);
69916 + if (page) {
69917 + update_hiwater_rss(mm);
69918 + if (PageAnon(page))
69919 + dec_mm_counter_fast(mm, MM_ANONPAGES);
69920 + else
69921 + dec_mm_counter_fast(mm, MM_FILEPAGES);
69922 + page_remove_rmap(page);
69923 + page_cache_release(page);
69924 + }
69925 + }
69926 + pte_unmap_unlock(pte, ptl);
69927 +}
69928 +
69929 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
69930 + *
69931 + * the ptl of the lower mapped page is held on entry and is not released on exit
69932 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
69933 + */
69934 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69935 +{
69936 + struct mm_struct *mm = vma->vm_mm;
69937 + unsigned long address_m;
69938 + spinlock_t *ptl_m;
69939 + struct vm_area_struct *vma_m;
69940 + pmd_t *pmd_m;
69941 + pte_t *pte_m, entry_m;
69942 +
69943 + BUG_ON(!page_m || !PageAnon(page_m));
69944 +
69945 + vma_m = pax_find_mirror_vma(vma);
69946 + if (!vma_m)
69947 + return;
69948 +
69949 + BUG_ON(!PageLocked(page_m));
69950 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69951 + address_m = address + SEGMEXEC_TASK_SIZE;
69952 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69953 + pte_m = pte_offset_map(pmd_m, address_m);
69954 + ptl_m = pte_lockptr(mm, pmd_m);
69955 + if (ptl != ptl_m) {
69956 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69957 + if (!pte_none(*pte_m))
69958 + goto out;
69959 + }
69960 +
69961 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69962 + page_cache_get(page_m);
69963 + page_add_anon_rmap(page_m, vma_m, address_m);
69964 + inc_mm_counter_fast(mm, MM_ANONPAGES);
69965 + set_pte_at(mm, address_m, pte_m, entry_m);
69966 + update_mmu_cache(vma_m, address_m, entry_m);
69967 +out:
69968 + if (ptl != ptl_m)
69969 + spin_unlock(ptl_m);
69970 + pte_unmap(pte_m);
69971 + unlock_page(page_m);
69972 +}
69973 +
69974 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69975 +{
69976 + struct mm_struct *mm = vma->vm_mm;
69977 + unsigned long address_m;
69978 + spinlock_t *ptl_m;
69979 + struct vm_area_struct *vma_m;
69980 + pmd_t *pmd_m;
69981 + pte_t *pte_m, entry_m;
69982 +
69983 + BUG_ON(!page_m || PageAnon(page_m));
69984 +
69985 + vma_m = pax_find_mirror_vma(vma);
69986 + if (!vma_m)
69987 + return;
69988 +
69989 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69990 + address_m = address + SEGMEXEC_TASK_SIZE;
69991 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69992 + pte_m = pte_offset_map(pmd_m, address_m);
69993 + ptl_m = pte_lockptr(mm, pmd_m);
69994 + if (ptl != ptl_m) {
69995 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69996 + if (!pte_none(*pte_m))
69997 + goto out;
69998 + }
69999 +
70000 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70001 + page_cache_get(page_m);
70002 + page_add_file_rmap(page_m);
70003 + inc_mm_counter_fast(mm, MM_FILEPAGES);
70004 + set_pte_at(mm, address_m, pte_m, entry_m);
70005 + update_mmu_cache(vma_m, address_m, entry_m);
70006 +out:
70007 + if (ptl != ptl_m)
70008 + spin_unlock(ptl_m);
70009 + pte_unmap(pte_m);
70010 +}
70011 +
70012 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70013 +{
70014 + struct mm_struct *mm = vma->vm_mm;
70015 + unsigned long address_m;
70016 + spinlock_t *ptl_m;
70017 + struct vm_area_struct *vma_m;
70018 + pmd_t *pmd_m;
70019 + pte_t *pte_m, entry_m;
70020 +
70021 + vma_m = pax_find_mirror_vma(vma);
70022 + if (!vma_m)
70023 + return;
70024 +
70025 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70026 + address_m = address + SEGMEXEC_TASK_SIZE;
70027 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70028 + pte_m = pte_offset_map(pmd_m, address_m);
70029 + ptl_m = pte_lockptr(mm, pmd_m);
70030 + if (ptl != ptl_m) {
70031 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70032 + if (!pte_none(*pte_m))
70033 + goto out;
70034 + }
70035 +
70036 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70037 + set_pte_at(mm, address_m, pte_m, entry_m);
70038 +out:
70039 + if (ptl != ptl_m)
70040 + spin_unlock(ptl_m);
70041 + pte_unmap(pte_m);
70042 +}
70043 +
70044 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70045 +{
70046 + struct page *page_m;
70047 + pte_t entry;
70048 +
70049 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70050 + goto out;
70051 +
70052 + entry = *pte;
70053 + page_m = vm_normal_page(vma, address, entry);
70054 + if (!page_m)
70055 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70056 + else if (PageAnon(page_m)) {
70057 + if (pax_find_mirror_vma(vma)) {
70058 + pte_unmap_unlock(pte, ptl);
70059 + lock_page(page_m);
70060 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70061 + if (pte_same(entry, *pte))
70062 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70063 + else
70064 + unlock_page(page_m);
70065 + }
70066 + } else
70067 + pax_mirror_file_pte(vma, address, page_m, ptl);
70068 +
70069 +out:
70070 + pte_unmap_unlock(pte, ptl);
70071 +}
70072 +#endif
70073 +
70074 /*
70075 * This routine handles present pages, when users try to write
70076 * to a shared page. It is done by copying the page to a new address
70077 @@ -2687,6 +2884,12 @@ gotten:
70078 */
70079 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70080 if (likely(pte_same(*page_table, orig_pte))) {
70081 +
70082 +#ifdef CONFIG_PAX_SEGMEXEC
70083 + if (pax_find_mirror_vma(vma))
70084 + BUG_ON(!trylock_page(new_page));
70085 +#endif
70086 +
70087 if (old_page) {
70088 if (!PageAnon(old_page)) {
70089 dec_mm_counter_fast(mm, MM_FILEPAGES);
70090 @@ -2738,6 +2941,10 @@ gotten:
70091 page_remove_rmap(old_page);
70092 }
70093
70094 +#ifdef CONFIG_PAX_SEGMEXEC
70095 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70096 +#endif
70097 +
70098 /* Free the old page.. */
70099 new_page = old_page;
70100 ret |= VM_FAULT_WRITE;
70101 @@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70102 swap_free(entry);
70103 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70104 try_to_free_swap(page);
70105 +
70106 +#ifdef CONFIG_PAX_SEGMEXEC
70107 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70108 +#endif
70109 +
70110 unlock_page(page);
70111 if (swapcache) {
70112 /*
70113 @@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70114
70115 /* No need to invalidate - it was non-present before */
70116 update_mmu_cache(vma, address, page_table);
70117 +
70118 +#ifdef CONFIG_PAX_SEGMEXEC
70119 + pax_mirror_anon_pte(vma, address, page, ptl);
70120 +#endif
70121 +
70122 unlock:
70123 pte_unmap_unlock(page_table, ptl);
70124 out:
70125 @@ -3059,40 +3276,6 @@ out_release:
70126 }
70127
70128 /*
70129 - * This is like a special single-page "expand_{down|up}wards()",
70130 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70131 - * doesn't hit another vma.
70132 - */
70133 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70134 -{
70135 - address &= PAGE_MASK;
70136 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70137 - struct vm_area_struct *prev = vma->vm_prev;
70138 -
70139 - /*
70140 - * Is there a mapping abutting this one below?
70141 - *
70142 - * That's only ok if it's the same stack mapping
70143 - * that has gotten split..
70144 - */
70145 - if (prev && prev->vm_end == address)
70146 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70147 -
70148 - expand_downwards(vma, address - PAGE_SIZE);
70149 - }
70150 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70151 - struct vm_area_struct *next = vma->vm_next;
70152 -
70153 - /* As VM_GROWSDOWN but s/below/above/ */
70154 - if (next && next->vm_start == address + PAGE_SIZE)
70155 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70156 -
70157 - expand_upwards(vma, address + PAGE_SIZE);
70158 - }
70159 - return 0;
70160 -}
70161 -
70162 -/*
70163 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70164 * but allow concurrent faults), and pte mapped but not yet locked.
70165 * We return with mmap_sem still held, but pte unmapped and unlocked.
70166 @@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70167 unsigned long address, pte_t *page_table, pmd_t *pmd,
70168 unsigned int flags)
70169 {
70170 - struct page *page;
70171 + struct page *page = NULL;
70172 spinlock_t *ptl;
70173 pte_t entry;
70174
70175 - pte_unmap(page_table);
70176 -
70177 - /* Check if we need to add a guard page to the stack */
70178 - if (check_stack_guard_page(vma, address) < 0)
70179 - return VM_FAULT_SIGBUS;
70180 -
70181 - /* Use the zero-page for reads */
70182 if (!(flags & FAULT_FLAG_WRITE)) {
70183 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70184 vma->vm_page_prot));
70185 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70186 + ptl = pte_lockptr(mm, pmd);
70187 + spin_lock(ptl);
70188 if (!pte_none(*page_table))
70189 goto unlock;
70190 goto setpte;
70191 }
70192
70193 /* Allocate our own private page. */
70194 + pte_unmap(page_table);
70195 +
70196 if (unlikely(anon_vma_prepare(vma)))
70197 goto oom;
70198 page = alloc_zeroed_user_highpage_movable(vma, address);
70199 @@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70200 if (!pte_none(*page_table))
70201 goto release;
70202
70203 +#ifdef CONFIG_PAX_SEGMEXEC
70204 + if (pax_find_mirror_vma(vma))
70205 + BUG_ON(!trylock_page(page));
70206 +#endif
70207 +
70208 inc_mm_counter_fast(mm, MM_ANONPAGES);
70209 page_add_new_anon_rmap(page, vma, address);
70210 setpte:
70211 @@ -3147,6 +3331,12 @@ setpte:
70212
70213 /* No need to invalidate - it was non-present before */
70214 update_mmu_cache(vma, address, page_table);
70215 +
70216 +#ifdef CONFIG_PAX_SEGMEXEC
70217 + if (page)
70218 + pax_mirror_anon_pte(vma, address, page, ptl);
70219 +#endif
70220 +
70221 unlock:
70222 pte_unmap_unlock(page_table, ptl);
70223 return 0;
70224 @@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70225 */
70226 /* Only go through if we didn't race with anybody else... */
70227 if (likely(pte_same(*page_table, orig_pte))) {
70228 +
70229 +#ifdef CONFIG_PAX_SEGMEXEC
70230 + if (anon && pax_find_mirror_vma(vma))
70231 + BUG_ON(!trylock_page(page));
70232 +#endif
70233 +
70234 flush_icache_page(vma, page);
70235 entry = mk_pte(page, vma->vm_page_prot);
70236 if (flags & FAULT_FLAG_WRITE)
70237 @@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70238
70239 /* no need to invalidate: a not-present page won't be cached */
70240 update_mmu_cache(vma, address, page_table);
70241 +
70242 +#ifdef CONFIG_PAX_SEGMEXEC
70243 + if (anon)
70244 + pax_mirror_anon_pte(vma, address, page, ptl);
70245 + else
70246 + pax_mirror_file_pte(vma, address, page, ptl);
70247 +#endif
70248 +
70249 } else {
70250 if (cow_page)
70251 mem_cgroup_uncharge_page(cow_page);
70252 @@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *mm,
70253 if (flags & FAULT_FLAG_WRITE)
70254 flush_tlb_fix_spurious_fault(vma, address);
70255 }
70256 +
70257 +#ifdef CONFIG_PAX_SEGMEXEC
70258 + pax_mirror_pte(vma, address, pte, pmd, ptl);
70259 + return 0;
70260 +#endif
70261 +
70262 unlock:
70263 pte_unmap_unlock(pte, ptl);
70264 return 0;
70265 @@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70266 pmd_t *pmd;
70267 pte_t *pte;
70268
70269 +#ifdef CONFIG_PAX_SEGMEXEC
70270 + struct vm_area_struct *vma_m;
70271 +#endif
70272 +
70273 __set_current_state(TASK_RUNNING);
70274
70275 count_vm_event(PGFAULT);
70276 @@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70277 if (unlikely(is_vm_hugetlb_page(vma)))
70278 return hugetlb_fault(mm, vma, address, flags);
70279
70280 +#ifdef CONFIG_PAX_SEGMEXEC
70281 + vma_m = pax_find_mirror_vma(vma);
70282 + if (vma_m) {
70283 + unsigned long address_m;
70284 + pgd_t *pgd_m;
70285 + pud_t *pud_m;
70286 + pmd_t *pmd_m;
70287 +
70288 + if (vma->vm_start > vma_m->vm_start) {
70289 + address_m = address;
70290 + address -= SEGMEXEC_TASK_SIZE;
70291 + vma = vma_m;
70292 + } else
70293 + address_m = address + SEGMEXEC_TASK_SIZE;
70294 +
70295 + pgd_m = pgd_offset(mm, address_m);
70296 + pud_m = pud_alloc(mm, pgd_m, address_m);
70297 + if (!pud_m)
70298 + return VM_FAULT_OOM;
70299 + pmd_m = pmd_alloc(mm, pud_m, address_m);
70300 + if (!pmd_m)
70301 + return VM_FAULT_OOM;
70302 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70303 + return VM_FAULT_OOM;
70304 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70305 + }
70306 +#endif
70307 +
70308 pgd = pgd_offset(mm, address);
70309 pud = pud_alloc(mm, pgd, address);
70310 if (!pud)
70311 @@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70312 * run pte_offset_map on the pmd, if an huge pmd could
70313 * materialize from under us from a different thread.
70314 */
70315 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70316 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70317 return VM_FAULT_OOM;
70318 /* if an huge pmd materialized from under us just retry later */
70319 if (unlikely(pmd_trans_huge(*pmd)))
70320 @@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70321 spin_unlock(&mm->page_table_lock);
70322 return 0;
70323 }
70324 +
70325 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70326 +{
70327 + pud_t *new = pud_alloc_one(mm, address);
70328 + if (!new)
70329 + return -ENOMEM;
70330 +
70331 + smp_wmb(); /* See comment in __pte_alloc */
70332 +
70333 + spin_lock(&mm->page_table_lock);
70334 + if (pgd_present(*pgd)) /* Another has populated it */
70335 + pud_free(mm, new);
70336 + else
70337 + pgd_populate_kernel(mm, pgd, new);
70338 + spin_unlock(&mm->page_table_lock);
70339 + return 0;
70340 +}
70341 #endif /* __PAGETABLE_PUD_FOLDED */
70342
70343 #ifndef __PAGETABLE_PMD_FOLDED
70344 @@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70345 spin_unlock(&mm->page_table_lock);
70346 return 0;
70347 }
70348 +
70349 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70350 +{
70351 + pmd_t *new = pmd_alloc_one(mm, address);
70352 + if (!new)
70353 + return -ENOMEM;
70354 +
70355 + smp_wmb(); /* See comment in __pte_alloc */
70356 +
70357 + spin_lock(&mm->page_table_lock);
70358 +#ifndef __ARCH_HAS_4LEVEL_HACK
70359 + if (pud_present(*pud)) /* Another has populated it */
70360 + pmd_free(mm, new);
70361 + else
70362 + pud_populate_kernel(mm, pud, new);
70363 +#else
70364 + if (pgd_present(*pud)) /* Another has populated it */
70365 + pmd_free(mm, new);
70366 + else
70367 + pgd_populate_kernel(mm, pud, new);
70368 +#endif /* __ARCH_HAS_4LEVEL_HACK */
70369 + spin_unlock(&mm->page_table_lock);
70370 + return 0;
70371 +}
70372 #endif /* __PAGETABLE_PMD_FOLDED */
70373
70374 int make_pages_present(unsigned long addr, unsigned long end)
70375 @@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
70376 gate_vma.vm_start = FIXADDR_USER_START;
70377 gate_vma.vm_end = FIXADDR_USER_END;
70378 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70379 - gate_vma.vm_page_prot = __P101;
70380 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70381
70382 return 0;
70383 }
70384 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70385 index bf5b485..088e1e5 100644
70386 --- a/mm/mempolicy.c
70387 +++ b/mm/mempolicy.c
70388 @@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70389 unsigned long vmstart;
70390 unsigned long vmend;
70391
70392 +#ifdef CONFIG_PAX_SEGMEXEC
70393 + struct vm_area_struct *vma_m;
70394 +#endif
70395 +
70396 vma = find_vma(mm, start);
70397 if (!vma || vma->vm_start > start)
70398 return -EFAULT;
70399 @@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70400 if (err)
70401 goto out;
70402 }
70403 +
70404 +#ifdef CONFIG_PAX_SEGMEXEC
70405 + vma_m = pax_find_mirror_vma(vma);
70406 + if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
70407 + err = vma->vm_ops->set_policy(vma_m, new_pol);
70408 + if (err)
70409 + goto out;
70410 + }
70411 +#endif
70412 +
70413 }
70414
70415 out:
70416 @@ -1105,6 +1119,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70417
70418 if (end < start)
70419 return -EINVAL;
70420 +
70421 +#ifdef CONFIG_PAX_SEGMEXEC
70422 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70423 + if (end > SEGMEXEC_TASK_SIZE)
70424 + return -EINVAL;
70425 + } else
70426 +#endif
70427 +
70428 + if (end > TASK_SIZE)
70429 + return -EINVAL;
70430 +
70431 if (end == start)
70432 return 0;
70433
70434 @@ -1328,8 +1353,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70435 */
70436 tcred = __task_cred(task);
70437 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70438 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70439 - !capable(CAP_SYS_NICE)) {
70440 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70441 rcu_read_unlock();
70442 err = -EPERM;
70443 goto out_put;
70444 @@ -1360,6 +1384,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70445 goto out;
70446 }
70447
70448 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70449 + if (mm != current->mm &&
70450 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70451 + mmput(mm);
70452 + err = -EPERM;
70453 + goto out;
70454 + }
70455 +#endif
70456 +
70457 err = do_migrate_pages(mm, old, new,
70458 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
70459
70460 diff --git a/mm/mlock.c b/mm/mlock.c
70461 index ef726e8..13e0901 100644
70462 --- a/mm/mlock.c
70463 +++ b/mm/mlock.c
70464 @@ -13,6 +13,7 @@
70465 #include <linux/pagemap.h>
70466 #include <linux/mempolicy.h>
70467 #include <linux/syscalls.h>
70468 +#include <linux/security.h>
70469 #include <linux/sched.h>
70470 #include <linux/export.h>
70471 #include <linux/rmap.h>
70472 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70473 return -EINVAL;
70474 if (end == start)
70475 return 0;
70476 + if (end > TASK_SIZE)
70477 + return -EINVAL;
70478 +
70479 vma = find_vma(current->mm, start);
70480 if (!vma || vma->vm_start > start)
70481 return -ENOMEM;
70482 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70483 for (nstart = start ; ; ) {
70484 vm_flags_t newflags;
70485
70486 +#ifdef CONFIG_PAX_SEGMEXEC
70487 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70488 + break;
70489 +#endif
70490 +
70491 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70492
70493 newflags = vma->vm_flags | VM_LOCKED;
70494 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70495 lock_limit >>= PAGE_SHIFT;
70496
70497 /* check against resource limits */
70498 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70499 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70500 error = do_mlock(start, len, 1);
70501 up_write(&current->mm->mmap_sem);
70502 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70503 static int do_mlockall(int flags)
70504 {
70505 struct vm_area_struct * vma, * prev = NULL;
70506 - unsigned int def_flags = 0;
70507
70508 if (flags & MCL_FUTURE)
70509 - def_flags = VM_LOCKED;
70510 - current->mm->def_flags = def_flags;
70511 + current->mm->def_flags |= VM_LOCKED;
70512 + else
70513 + current->mm->def_flags &= ~VM_LOCKED;
70514 if (flags == MCL_FUTURE)
70515 goto out;
70516
70517 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70518 vm_flags_t newflags;
70519
70520 +#ifdef CONFIG_PAX_SEGMEXEC
70521 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70522 + break;
70523 +#endif
70524 +
70525 + BUG_ON(vma->vm_end > TASK_SIZE);
70526 newflags = vma->vm_flags | VM_LOCKED;
70527 if (!(flags & MCL_CURRENT))
70528 newflags &= ~VM_LOCKED;
70529 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70530 lock_limit >>= PAGE_SHIFT;
70531
70532 ret = -ENOMEM;
70533 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70534 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70535 capable(CAP_IPC_LOCK))
70536 ret = do_mlockall(flags);
70537 diff --git a/mm/mmap.c b/mm/mmap.c
70538 index 848ef52..d2b586c 100644
70539 --- a/mm/mmap.c
70540 +++ b/mm/mmap.c
70541 @@ -46,6 +46,16 @@
70542 #define arch_rebalance_pgtables(addr, len) (addr)
70543 #endif
70544
70545 +static inline void verify_mm_writelocked(struct mm_struct *mm)
70546 +{
70547 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70548 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70549 + up_read(&mm->mmap_sem);
70550 + BUG();
70551 + }
70552 +#endif
70553 +}
70554 +
70555 static void unmap_region(struct mm_struct *mm,
70556 struct vm_area_struct *vma, struct vm_area_struct *prev,
70557 unsigned long start, unsigned long end);
70558 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70559 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70560 *
70561 */
70562 -pgprot_t protection_map[16] = {
70563 +pgprot_t protection_map[16] __read_only = {
70564 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70565 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70566 };
70567
70568 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
70569 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70570 {
70571 - return __pgprot(pgprot_val(protection_map[vm_flags &
70572 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70573 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70574 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70575 +
70576 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70577 + if (!(__supported_pte_mask & _PAGE_NX) &&
70578 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70579 + (vm_flags & (VM_READ | VM_WRITE)))
70580 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70581 +#endif
70582 +
70583 + return prot;
70584 }
70585 EXPORT_SYMBOL(vm_get_page_prot);
70586
70587 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70588 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70589 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70590 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70591 /*
70592 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70593 * other variables. It can be updated by several CPUs frequently.
70594 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70595 struct vm_area_struct *next = vma->vm_next;
70596
70597 might_sleep();
70598 + BUG_ON(vma->vm_mirror);
70599 if (vma->vm_ops && vma->vm_ops->close)
70600 vma->vm_ops->close(vma);
70601 if (vma->vm_file) {
70602 @@ -274,6 +295,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70603 * not page aligned -Ram Gupta
70604 */
70605 rlim = rlimit(RLIMIT_DATA);
70606 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70607 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70608 (mm->end_data - mm->start_data) > rlim)
70609 goto out;
70610 @@ -690,6 +712,12 @@ static int
70611 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70612 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70613 {
70614 +
70615 +#ifdef CONFIG_PAX_SEGMEXEC
70616 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70617 + return 0;
70618 +#endif
70619 +
70620 if (is_mergeable_vma(vma, file, vm_flags) &&
70621 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70622 if (vma->vm_pgoff == vm_pgoff)
70623 @@ -709,6 +737,12 @@ static int
70624 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70625 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70626 {
70627 +
70628 +#ifdef CONFIG_PAX_SEGMEXEC
70629 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70630 + return 0;
70631 +#endif
70632 +
70633 if (is_mergeable_vma(vma, file, vm_flags) &&
70634 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70635 pgoff_t vm_pglen;
70636 @@ -751,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70637 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70638 struct vm_area_struct *prev, unsigned long addr,
70639 unsigned long end, unsigned long vm_flags,
70640 - struct anon_vma *anon_vma, struct file *file,
70641 + struct anon_vma *anon_vma, struct file *file,
70642 pgoff_t pgoff, struct mempolicy *policy)
70643 {
70644 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70645 struct vm_area_struct *area, *next;
70646 int err;
70647
70648 +#ifdef CONFIG_PAX_SEGMEXEC
70649 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70650 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70651 +
70652 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70653 +#endif
70654 +
70655 /*
70656 * We later require that vma->vm_flags == vm_flags,
70657 * so this tests vma->vm_flags & VM_SPECIAL, too.
70658 @@ -773,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70659 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70660 next = next->vm_next;
70661
70662 +#ifdef CONFIG_PAX_SEGMEXEC
70663 + if (prev)
70664 + prev_m = pax_find_mirror_vma(prev);
70665 + if (area)
70666 + area_m = pax_find_mirror_vma(area);
70667 + if (next)
70668 + next_m = pax_find_mirror_vma(next);
70669 +#endif
70670 +
70671 /*
70672 * Can it merge with the predecessor?
70673 */
70674 @@ -792,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70675 /* cases 1, 6 */
70676 err = vma_adjust(prev, prev->vm_start,
70677 next->vm_end, prev->vm_pgoff, NULL);
70678 - } else /* cases 2, 5, 7 */
70679 +
70680 +#ifdef CONFIG_PAX_SEGMEXEC
70681 + if (!err && prev_m)
70682 + err = vma_adjust(prev_m, prev_m->vm_start,
70683 + next_m->vm_end, prev_m->vm_pgoff, NULL);
70684 +#endif
70685 +
70686 + } else { /* cases 2, 5, 7 */
70687 err = vma_adjust(prev, prev->vm_start,
70688 end, prev->vm_pgoff, NULL);
70689 +
70690 +#ifdef CONFIG_PAX_SEGMEXEC
70691 + if (!err && prev_m)
70692 + err = vma_adjust(prev_m, prev_m->vm_start,
70693 + end_m, prev_m->vm_pgoff, NULL);
70694 +#endif
70695 +
70696 + }
70697 if (err)
70698 return NULL;
70699 khugepaged_enter_vma_merge(prev);
70700 @@ -808,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70701 mpol_equal(policy, vma_policy(next)) &&
70702 can_vma_merge_before(next, vm_flags,
70703 anon_vma, file, pgoff+pglen)) {
70704 - if (prev && addr < prev->vm_end) /* case 4 */
70705 + if (prev && addr < prev->vm_end) { /* case 4 */
70706 err = vma_adjust(prev, prev->vm_start,
70707 addr, prev->vm_pgoff, NULL);
70708 - else /* cases 3, 8 */
70709 +
70710 +#ifdef CONFIG_PAX_SEGMEXEC
70711 + if (!err && prev_m)
70712 + err = vma_adjust(prev_m, prev_m->vm_start,
70713 + addr_m, prev_m->vm_pgoff, NULL);
70714 +#endif
70715 +
70716 + } else { /* cases 3, 8 */
70717 err = vma_adjust(area, addr, next->vm_end,
70718 next->vm_pgoff - pglen, NULL);
70719 +
70720 +#ifdef CONFIG_PAX_SEGMEXEC
70721 + if (!err && area_m)
70722 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
70723 + next_m->vm_pgoff - pglen, NULL);
70724 +#endif
70725 +
70726 + }
70727 if (err)
70728 return NULL;
70729 khugepaged_enter_vma_merge(area);
70730 @@ -922,14 +1002,11 @@ none:
70731 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70732 struct file *file, long pages)
70733 {
70734 - const unsigned long stack_flags
70735 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70736 -
70737 if (file) {
70738 mm->shared_vm += pages;
70739 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70740 mm->exec_vm += pages;
70741 - } else if (flags & stack_flags)
70742 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70743 mm->stack_vm += pages;
70744 if (flags & (VM_RESERVED|VM_IO))
70745 mm->reserved_vm += pages;
70746 @@ -969,7 +1046,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70747 * (the exception is when the underlying filesystem is noexec
70748 * mounted, in which case we dont add PROT_EXEC.)
70749 */
70750 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70751 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70752 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70753 prot |= PROT_EXEC;
70754
70755 @@ -995,7 +1072,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70756 /* Obtain the address to map to. we verify (or select) it and ensure
70757 * that it represents a valid section of the address space.
70758 */
70759 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
70760 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70761 if (addr & ~PAGE_MASK)
70762 return addr;
70763
70764 @@ -1006,6 +1083,36 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70765 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70766 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70767
70768 +#ifdef CONFIG_PAX_MPROTECT
70769 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70770 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70771 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70772 + gr_log_rwxmmap(file);
70773 +
70774 +#ifdef CONFIG_PAX_EMUPLT
70775 + vm_flags &= ~VM_EXEC;
70776 +#else
70777 + return -EPERM;
70778 +#endif
70779 +
70780 + }
70781 +
70782 + if (!(vm_flags & VM_EXEC))
70783 + vm_flags &= ~VM_MAYEXEC;
70784 +#else
70785 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70786 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70787 +#endif
70788 + else
70789 + vm_flags &= ~VM_MAYWRITE;
70790 + }
70791 +#endif
70792 +
70793 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70794 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70795 + vm_flags &= ~VM_PAGEEXEC;
70796 +#endif
70797 +
70798 if (flags & MAP_LOCKED)
70799 if (!can_do_mlock())
70800 return -EPERM;
70801 @@ -1017,6 +1124,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70802 locked += mm->locked_vm;
70803 lock_limit = rlimit(RLIMIT_MEMLOCK);
70804 lock_limit >>= PAGE_SHIFT;
70805 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70806 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70807 return -EAGAIN;
70808 }
70809 @@ -1087,6 +1195,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70810 if (error)
70811 return error;
70812
70813 + if (!gr_acl_handle_mmap(file, prot))
70814 + return -EACCES;
70815 +
70816 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70817 }
70818
70819 @@ -1192,7 +1303,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70820 vm_flags_t vm_flags = vma->vm_flags;
70821
70822 /* If it was private or non-writable, the write bit is already clear */
70823 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70824 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70825 return 0;
70826
70827 /* The backer wishes to know when pages are first written to? */
70828 @@ -1241,14 +1352,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70829 unsigned long charged = 0;
70830 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70831
70832 +#ifdef CONFIG_PAX_SEGMEXEC
70833 + struct vm_area_struct *vma_m = NULL;
70834 +#endif
70835 +
70836 + /*
70837 + * mm->mmap_sem is required to protect against another thread
70838 + * changing the mappings in case we sleep.
70839 + */
70840 + verify_mm_writelocked(mm);
70841 +
70842 /* Clear old maps */
70843 error = -ENOMEM;
70844 -munmap_back:
70845 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70846 if (vma && vma->vm_start < addr + len) {
70847 if (do_munmap(mm, addr, len))
70848 return -ENOMEM;
70849 - goto munmap_back;
70850 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70851 + BUG_ON(vma && vma->vm_start < addr + len);
70852 }
70853
70854 /* Check against address space limit. */
70855 @@ -1297,6 +1418,16 @@ munmap_back:
70856 goto unacct_error;
70857 }
70858
70859 +#ifdef CONFIG_PAX_SEGMEXEC
70860 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
70861 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70862 + if (!vma_m) {
70863 + error = -ENOMEM;
70864 + goto free_vma;
70865 + }
70866 + }
70867 +#endif
70868 +
70869 vma->vm_mm = mm;
70870 vma->vm_start = addr;
70871 vma->vm_end = addr + len;
70872 @@ -1321,6 +1452,19 @@ munmap_back:
70873 error = file->f_op->mmap(file, vma);
70874 if (error)
70875 goto unmap_and_free_vma;
70876 +
70877 +#ifdef CONFIG_PAX_SEGMEXEC
70878 + if (vma_m && (vm_flags & VM_EXECUTABLE))
70879 + added_exe_file_vma(mm);
70880 +#endif
70881 +
70882 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70883 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
70884 + vma->vm_flags |= VM_PAGEEXEC;
70885 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70886 + }
70887 +#endif
70888 +
70889 if (vm_flags & VM_EXECUTABLE)
70890 added_exe_file_vma(mm);
70891
70892 @@ -1358,6 +1502,11 @@ munmap_back:
70893 vma_link(mm, vma, prev, rb_link, rb_parent);
70894 file = vma->vm_file;
70895
70896 +#ifdef CONFIG_PAX_SEGMEXEC
70897 + if (vma_m)
70898 + BUG_ON(pax_mirror_vma(vma_m, vma));
70899 +#endif
70900 +
70901 /* Once vma denies write, undo our temporary denial count */
70902 if (correct_wcount)
70903 atomic_inc(&inode->i_writecount);
70904 @@ -1366,6 +1515,7 @@ out:
70905
70906 mm->total_vm += len >> PAGE_SHIFT;
70907 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
70908 + track_exec_limit(mm, addr, addr + len, vm_flags);
70909 if (vm_flags & VM_LOCKED) {
70910 if (!mlock_vma_pages_range(vma, addr, addr + len))
70911 mm->locked_vm += (len >> PAGE_SHIFT);
70912 @@ -1383,6 +1533,12 @@ unmap_and_free_vma:
70913 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
70914 charged = 0;
70915 free_vma:
70916 +
70917 +#ifdef CONFIG_PAX_SEGMEXEC
70918 + if (vma_m)
70919 + kmem_cache_free(vm_area_cachep, vma_m);
70920 +#endif
70921 +
70922 kmem_cache_free(vm_area_cachep, vma);
70923 unacct_error:
70924 if (charged)
70925 @@ -1390,6 +1546,44 @@ unacct_error:
70926 return error;
70927 }
70928
70929 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
70930 +{
70931 + if (!vma) {
70932 +#ifdef CONFIG_STACK_GROWSUP
70933 + if (addr > sysctl_heap_stack_gap)
70934 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
70935 + else
70936 + vma = find_vma(current->mm, 0);
70937 + if (vma && (vma->vm_flags & VM_GROWSUP))
70938 + return false;
70939 +#endif
70940 + return true;
70941 + }
70942 +
70943 + if (addr + len > vma->vm_start)
70944 + return false;
70945 +
70946 + if (vma->vm_flags & VM_GROWSDOWN)
70947 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
70948 +#ifdef CONFIG_STACK_GROWSUP
70949 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
70950 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
70951 +#endif
70952 +
70953 + return true;
70954 +}
70955 +
70956 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
70957 +{
70958 + if (vma->vm_start < len)
70959 + return -ENOMEM;
70960 + if (!(vma->vm_flags & VM_GROWSDOWN))
70961 + return vma->vm_start - len;
70962 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
70963 + return vma->vm_start - len - sysctl_heap_stack_gap;
70964 + return -ENOMEM;
70965 +}
70966 +
70967 /* Get an address range which is currently unmapped.
70968 * For shmat() with addr=0.
70969 *
70970 @@ -1416,18 +1610,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
70971 if (flags & MAP_FIXED)
70972 return addr;
70973
70974 +#ifdef CONFIG_PAX_RANDMMAP
70975 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70976 +#endif
70977 +
70978 if (addr) {
70979 addr = PAGE_ALIGN(addr);
70980 - vma = find_vma(mm, addr);
70981 - if (TASK_SIZE - len >= addr &&
70982 - (!vma || addr + len <= vma->vm_start))
70983 - return addr;
70984 + if (TASK_SIZE - len >= addr) {
70985 + vma = find_vma(mm, addr);
70986 + if (check_heap_stack_gap(vma, addr, len))
70987 + return addr;
70988 + }
70989 }
70990 if (len > mm->cached_hole_size) {
70991 - start_addr = addr = mm->free_area_cache;
70992 + start_addr = addr = mm->free_area_cache;
70993 } else {
70994 - start_addr = addr = TASK_UNMAPPED_BASE;
70995 - mm->cached_hole_size = 0;
70996 + start_addr = addr = mm->mmap_base;
70997 + mm->cached_hole_size = 0;
70998 }
70999
71000 full_search:
71001 @@ -1438,34 +1637,40 @@ full_search:
71002 * Start a new search - just in case we missed
71003 * some holes.
71004 */
71005 - if (start_addr != TASK_UNMAPPED_BASE) {
71006 - addr = TASK_UNMAPPED_BASE;
71007 - start_addr = addr;
71008 + if (start_addr != mm->mmap_base) {
71009 + start_addr = addr = mm->mmap_base;
71010 mm->cached_hole_size = 0;
71011 goto full_search;
71012 }
71013 return -ENOMEM;
71014 }
71015 - if (!vma || addr + len <= vma->vm_start) {
71016 - /*
71017 - * Remember the place where we stopped the search:
71018 - */
71019 - mm->free_area_cache = addr + len;
71020 - return addr;
71021 - }
71022 + if (check_heap_stack_gap(vma, addr, len))
71023 + break;
71024 if (addr + mm->cached_hole_size < vma->vm_start)
71025 mm->cached_hole_size = vma->vm_start - addr;
71026 addr = vma->vm_end;
71027 }
71028 +
71029 + /*
71030 + * Remember the place where we stopped the search:
71031 + */
71032 + mm->free_area_cache = addr + len;
71033 + return addr;
71034 }
71035 #endif
71036
71037 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71038 {
71039 +
71040 +#ifdef CONFIG_PAX_SEGMEXEC
71041 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71042 + return;
71043 +#endif
71044 +
71045 /*
71046 * Is this a new hole at the lowest possible address?
71047 */
71048 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
71049 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
71050 mm->free_area_cache = addr;
71051 }
71052
71053 @@ -1481,7 +1686,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71054 {
71055 struct vm_area_struct *vma;
71056 struct mm_struct *mm = current->mm;
71057 - unsigned long addr = addr0, start_addr;
71058 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
71059
71060 /* requested length too big for entire address space */
71061 if (len > TASK_SIZE)
71062 @@ -1490,13 +1695,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71063 if (flags & MAP_FIXED)
71064 return addr;
71065
71066 +#ifdef CONFIG_PAX_RANDMMAP
71067 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71068 +#endif
71069 +
71070 /* requesting a specific address */
71071 if (addr) {
71072 addr = PAGE_ALIGN(addr);
71073 - vma = find_vma(mm, addr);
71074 - if (TASK_SIZE - len >= addr &&
71075 - (!vma || addr + len <= vma->vm_start))
71076 - return addr;
71077 + if (TASK_SIZE - len >= addr) {
71078 + vma = find_vma(mm, addr);
71079 + if (check_heap_stack_gap(vma, addr, len))
71080 + return addr;
71081 + }
71082 }
71083
71084 /* check if free_area_cache is useful for us */
71085 @@ -1520,7 +1730,7 @@ try_again:
71086 * return with success:
71087 */
71088 vma = find_vma(mm, addr);
71089 - if (!vma || addr+len <= vma->vm_start)
71090 + if (check_heap_stack_gap(vma, addr, len))
71091 /* remember the address as a hint for next time */
71092 return (mm->free_area_cache = addr);
71093
71094 @@ -1529,8 +1739,8 @@ try_again:
71095 mm->cached_hole_size = vma->vm_start - addr;
71096
71097 /* try just below the current vma->vm_start */
71098 - addr = vma->vm_start-len;
71099 - } while (len < vma->vm_start);
71100 + addr = skip_heap_stack_gap(vma, len);
71101 + } while (!IS_ERR_VALUE(addr));
71102
71103 fail:
71104 /*
71105 @@ -1553,13 +1763,21 @@ fail:
71106 * can happen with large stack limits and large mmap()
71107 * allocations.
71108 */
71109 + mm->mmap_base = TASK_UNMAPPED_BASE;
71110 +
71111 +#ifdef CONFIG_PAX_RANDMMAP
71112 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71113 + mm->mmap_base += mm->delta_mmap;
71114 +#endif
71115 +
71116 + mm->free_area_cache = mm->mmap_base;
71117 mm->cached_hole_size = ~0UL;
71118 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71119 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71120 /*
71121 * Restore the topdown base:
71122 */
71123 - mm->free_area_cache = mm->mmap_base;
71124 + mm->mmap_base = base;
71125 + mm->free_area_cache = base;
71126 mm->cached_hole_size = ~0UL;
71127
71128 return addr;
71129 @@ -1568,6 +1786,12 @@ fail:
71130
71131 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71132 {
71133 +
71134 +#ifdef CONFIG_PAX_SEGMEXEC
71135 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71136 + return;
71137 +#endif
71138 +
71139 /*
71140 * Is this a new hole at the highest possible address?
71141 */
71142 @@ -1575,8 +1799,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71143 mm->free_area_cache = addr;
71144
71145 /* dont allow allocations above current base */
71146 - if (mm->free_area_cache > mm->mmap_base)
71147 + if (mm->free_area_cache > mm->mmap_base) {
71148 mm->free_area_cache = mm->mmap_base;
71149 + mm->cached_hole_size = ~0UL;
71150 + }
71151 }
71152
71153 unsigned long
71154 @@ -1672,6 +1898,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71155 return vma;
71156 }
71157
71158 +#ifdef CONFIG_PAX_SEGMEXEC
71159 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71160 +{
71161 + struct vm_area_struct *vma_m;
71162 +
71163 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71164 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71165 + BUG_ON(vma->vm_mirror);
71166 + return NULL;
71167 + }
71168 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71169 + vma_m = vma->vm_mirror;
71170 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71171 + BUG_ON(vma->vm_file != vma_m->vm_file);
71172 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71173 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71174 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71175 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71176 + return vma_m;
71177 +}
71178 +#endif
71179 +
71180 /*
71181 * Verify that the stack growth is acceptable and
71182 * update accounting. This is shared with both the
71183 @@ -1688,6 +1936,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71184 return -ENOMEM;
71185
71186 /* Stack limit test */
71187 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
71188 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71189 return -ENOMEM;
71190
71191 @@ -1698,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71192 locked = mm->locked_vm + grow;
71193 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71194 limit >>= PAGE_SHIFT;
71195 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71196 if (locked > limit && !capable(CAP_IPC_LOCK))
71197 return -ENOMEM;
71198 }
71199 @@ -1728,37 +1978,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71200 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71201 * vma is the last one with address > vma->vm_end. Have to extend vma.
71202 */
71203 +#ifndef CONFIG_IA64
71204 +static
71205 +#endif
71206 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71207 {
71208 int error;
71209 + bool locknext;
71210
71211 if (!(vma->vm_flags & VM_GROWSUP))
71212 return -EFAULT;
71213
71214 + /* Also guard against wrapping around to address 0. */
71215 + if (address < PAGE_ALIGN(address+1))
71216 + address = PAGE_ALIGN(address+1);
71217 + else
71218 + return -ENOMEM;
71219 +
71220 /*
71221 * We must make sure the anon_vma is allocated
71222 * so that the anon_vma locking is not a noop.
71223 */
71224 if (unlikely(anon_vma_prepare(vma)))
71225 return -ENOMEM;
71226 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71227 + if (locknext && anon_vma_prepare(vma->vm_next))
71228 + return -ENOMEM;
71229 vma_lock_anon_vma(vma);
71230 + if (locknext)
71231 + vma_lock_anon_vma(vma->vm_next);
71232
71233 /*
71234 * vma->vm_start/vm_end cannot change under us because the caller
71235 * is required to hold the mmap_sem in read mode. We need the
71236 - * anon_vma lock to serialize against concurrent expand_stacks.
71237 - * Also guard against wrapping around to address 0.
71238 + * anon_vma locks to serialize against concurrent expand_stacks
71239 + * and expand_upwards.
71240 */
71241 - if (address < PAGE_ALIGN(address+4))
71242 - address = PAGE_ALIGN(address+4);
71243 - else {
71244 - vma_unlock_anon_vma(vma);
71245 - return -ENOMEM;
71246 - }
71247 error = 0;
71248
71249 /* Somebody else might have raced and expanded it already */
71250 - if (address > vma->vm_end) {
71251 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71252 + error = -ENOMEM;
71253 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71254 unsigned long size, grow;
71255
71256 size = address - vma->vm_start;
71257 @@ -1773,6 +2034,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71258 }
71259 }
71260 }
71261 + if (locknext)
71262 + vma_unlock_anon_vma(vma->vm_next);
71263 vma_unlock_anon_vma(vma);
71264 khugepaged_enter_vma_merge(vma);
71265 return error;
71266 @@ -1786,6 +2049,8 @@ int expand_downwards(struct vm_area_struct *vma,
71267 unsigned long address)
71268 {
71269 int error;
71270 + bool lockprev = false;
71271 + struct vm_area_struct *prev;
71272
71273 /*
71274 * We must make sure the anon_vma is allocated
71275 @@ -1799,6 +2064,15 @@ int expand_downwards(struct vm_area_struct *vma,
71276 if (error)
71277 return error;
71278
71279 + prev = vma->vm_prev;
71280 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71281 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71282 +#endif
71283 + if (lockprev && anon_vma_prepare(prev))
71284 + return -ENOMEM;
71285 + if (lockprev)
71286 + vma_lock_anon_vma(prev);
71287 +
71288 vma_lock_anon_vma(vma);
71289
71290 /*
71291 @@ -1808,9 +2082,17 @@ int expand_downwards(struct vm_area_struct *vma,
71292 */
71293
71294 /* Somebody else might have raced and expanded it already */
71295 - if (address < vma->vm_start) {
71296 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71297 + error = -ENOMEM;
71298 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71299 unsigned long size, grow;
71300
71301 +#ifdef CONFIG_PAX_SEGMEXEC
71302 + struct vm_area_struct *vma_m;
71303 +
71304 + vma_m = pax_find_mirror_vma(vma);
71305 +#endif
71306 +
71307 size = vma->vm_end - address;
71308 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71309
71310 @@ -1820,11 +2102,22 @@ int expand_downwards(struct vm_area_struct *vma,
71311 if (!error) {
71312 vma->vm_start = address;
71313 vma->vm_pgoff -= grow;
71314 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71315 +
71316 +#ifdef CONFIG_PAX_SEGMEXEC
71317 + if (vma_m) {
71318 + vma_m->vm_start -= grow << PAGE_SHIFT;
71319 + vma_m->vm_pgoff -= grow;
71320 + }
71321 +#endif
71322 +
71323 perf_event_mmap(vma);
71324 }
71325 }
71326 }
71327 vma_unlock_anon_vma(vma);
71328 + if (lockprev)
71329 + vma_unlock_anon_vma(prev);
71330 khugepaged_enter_vma_merge(vma);
71331 return error;
71332 }
71333 @@ -1894,6 +2187,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71334 do {
71335 long nrpages = vma_pages(vma);
71336
71337 +#ifdef CONFIG_PAX_SEGMEXEC
71338 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71339 + vma = remove_vma(vma);
71340 + continue;
71341 + }
71342 +#endif
71343 +
71344 mm->total_vm -= nrpages;
71345 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71346 vma = remove_vma(vma);
71347 @@ -1939,6 +2239,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71348 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71349 vma->vm_prev = NULL;
71350 do {
71351 +
71352 +#ifdef CONFIG_PAX_SEGMEXEC
71353 + if (vma->vm_mirror) {
71354 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71355 + vma->vm_mirror->vm_mirror = NULL;
71356 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
71357 + vma->vm_mirror = NULL;
71358 + }
71359 +#endif
71360 +
71361 rb_erase(&vma->vm_rb, &mm->mm_rb);
71362 mm->map_count--;
71363 tail_vma = vma;
71364 @@ -1967,14 +2277,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71365 struct vm_area_struct *new;
71366 int err = -ENOMEM;
71367
71368 +#ifdef CONFIG_PAX_SEGMEXEC
71369 + struct vm_area_struct *vma_m, *new_m = NULL;
71370 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71371 +#endif
71372 +
71373 if (is_vm_hugetlb_page(vma) && (addr &
71374 ~(huge_page_mask(hstate_vma(vma)))))
71375 return -EINVAL;
71376
71377 +#ifdef CONFIG_PAX_SEGMEXEC
71378 + vma_m = pax_find_mirror_vma(vma);
71379 +#endif
71380 +
71381 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71382 if (!new)
71383 goto out_err;
71384
71385 +#ifdef CONFIG_PAX_SEGMEXEC
71386 + if (vma_m) {
71387 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71388 + if (!new_m) {
71389 + kmem_cache_free(vm_area_cachep, new);
71390 + goto out_err;
71391 + }
71392 + }
71393 +#endif
71394 +
71395 /* most fields are the same, copy all, and then fixup */
71396 *new = *vma;
71397
71398 @@ -1987,6 +2316,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71399 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71400 }
71401
71402 +#ifdef CONFIG_PAX_SEGMEXEC
71403 + if (vma_m) {
71404 + *new_m = *vma_m;
71405 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71406 + new_m->vm_mirror = new;
71407 + new->vm_mirror = new_m;
71408 +
71409 + if (new_below)
71410 + new_m->vm_end = addr_m;
71411 + else {
71412 + new_m->vm_start = addr_m;
71413 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71414 + }
71415 + }
71416 +#endif
71417 +
71418 pol = mpol_dup(vma_policy(vma));
71419 if (IS_ERR(pol)) {
71420 err = PTR_ERR(pol);
71421 @@ -2012,6 +2357,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71422 else
71423 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71424
71425 +#ifdef CONFIG_PAX_SEGMEXEC
71426 + if (!err && vma_m) {
71427 + if (anon_vma_clone(new_m, vma_m))
71428 + goto out_free_mpol;
71429 +
71430 + mpol_get(pol);
71431 + vma_set_policy(new_m, pol);
71432 +
71433 + if (new_m->vm_file) {
71434 + get_file(new_m->vm_file);
71435 + if (vma_m->vm_flags & VM_EXECUTABLE)
71436 + added_exe_file_vma(mm);
71437 + }
71438 +
71439 + if (new_m->vm_ops && new_m->vm_ops->open)
71440 + new_m->vm_ops->open(new_m);
71441 +
71442 + if (new_below)
71443 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71444 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71445 + else
71446 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71447 +
71448 + if (err) {
71449 + if (new_m->vm_ops && new_m->vm_ops->close)
71450 + new_m->vm_ops->close(new_m);
71451 + if (new_m->vm_file) {
71452 + if (vma_m->vm_flags & VM_EXECUTABLE)
71453 + removed_exe_file_vma(mm);
71454 + fput(new_m->vm_file);
71455 + }
71456 + mpol_put(pol);
71457 + }
71458 + }
71459 +#endif
71460 +
71461 /* Success. */
71462 if (!err)
71463 return 0;
71464 @@ -2024,10 +2405,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71465 removed_exe_file_vma(mm);
71466 fput(new->vm_file);
71467 }
71468 - unlink_anon_vmas(new);
71469 out_free_mpol:
71470 mpol_put(pol);
71471 out_free_vma:
71472 +
71473 +#ifdef CONFIG_PAX_SEGMEXEC
71474 + if (new_m) {
71475 + unlink_anon_vmas(new_m);
71476 + kmem_cache_free(vm_area_cachep, new_m);
71477 + }
71478 +#endif
71479 +
71480 + unlink_anon_vmas(new);
71481 kmem_cache_free(vm_area_cachep, new);
71482 out_err:
71483 return err;
71484 @@ -2040,6 +2429,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71485 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71486 unsigned long addr, int new_below)
71487 {
71488 +
71489 +#ifdef CONFIG_PAX_SEGMEXEC
71490 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71491 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71492 + if (mm->map_count >= sysctl_max_map_count-1)
71493 + return -ENOMEM;
71494 + } else
71495 +#endif
71496 +
71497 if (mm->map_count >= sysctl_max_map_count)
71498 return -ENOMEM;
71499
71500 @@ -2051,11 +2449,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71501 * work. This now handles partial unmappings.
71502 * Jeremy Fitzhardinge <jeremy@goop.org>
71503 */
71504 +#ifdef CONFIG_PAX_SEGMEXEC
71505 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71506 {
71507 + int ret = __do_munmap(mm, start, len);
71508 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71509 + return ret;
71510 +
71511 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71512 +}
71513 +
71514 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71515 +#else
71516 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71517 +#endif
71518 +{
71519 unsigned long end;
71520 struct vm_area_struct *vma, *prev, *last;
71521
71522 + /*
71523 + * mm->mmap_sem is required to protect against another thread
71524 + * changing the mappings in case we sleep.
71525 + */
71526 + verify_mm_writelocked(mm);
71527 +
71528 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71529 return -EINVAL;
71530
71531 @@ -2130,6 +2547,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71532 /* Fix up all other VM information */
71533 remove_vma_list(mm, vma);
71534
71535 + track_exec_limit(mm, start, end, 0UL);
71536 +
71537 return 0;
71538 }
71539 EXPORT_SYMBOL(do_munmap);
71540 @@ -2139,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
71541 int ret;
71542 struct mm_struct *mm = current->mm;
71543
71544 +
71545 +#ifdef CONFIG_PAX_SEGMEXEC
71546 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71547 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
71548 + return -EINVAL;
71549 +#endif
71550 +
71551 down_write(&mm->mmap_sem);
71552 ret = do_munmap(mm, start, len);
71553 up_write(&mm->mmap_sem);
71554 @@ -2152,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71555 return vm_munmap(addr, len);
71556 }
71557
71558 -static inline void verify_mm_writelocked(struct mm_struct *mm)
71559 -{
71560 -#ifdef CONFIG_DEBUG_VM
71561 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71562 - WARN_ON(1);
71563 - up_read(&mm->mmap_sem);
71564 - }
71565 -#endif
71566 -}
71567 -
71568 /*
71569 * this is really a simplified "do_mmap". it only handles
71570 * anonymous maps. eventually we may be able to do some
71571 @@ -2175,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71572 struct rb_node ** rb_link, * rb_parent;
71573 pgoff_t pgoff = addr >> PAGE_SHIFT;
71574 int error;
71575 + unsigned long charged;
71576
71577 len = PAGE_ALIGN(len);
71578 if (!len)
71579 @@ -2186,16 +2603,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71580
71581 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71582
71583 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71584 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71585 + flags &= ~VM_EXEC;
71586 +
71587 +#ifdef CONFIG_PAX_MPROTECT
71588 + if (mm->pax_flags & MF_PAX_MPROTECT)
71589 + flags &= ~VM_MAYEXEC;
71590 +#endif
71591 +
71592 + }
71593 +#endif
71594 +
71595 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71596 if (error & ~PAGE_MASK)
71597 return error;
71598
71599 + charged = len >> PAGE_SHIFT;
71600 +
71601 /*
71602 * mlock MCL_FUTURE?
71603 */
71604 if (mm->def_flags & VM_LOCKED) {
71605 unsigned long locked, lock_limit;
71606 - locked = len >> PAGE_SHIFT;
71607 + locked = charged;
71608 locked += mm->locked_vm;
71609 lock_limit = rlimit(RLIMIT_MEMLOCK);
71610 lock_limit >>= PAGE_SHIFT;
71611 @@ -2212,22 +2643,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71612 /*
71613 * Clear old maps. this also does some error checking for us
71614 */
71615 - munmap_back:
71616 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71617 if (vma && vma->vm_start < addr + len) {
71618 if (do_munmap(mm, addr, len))
71619 return -ENOMEM;
71620 - goto munmap_back;
71621 - }
71622 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71623 + BUG_ON(vma && vma->vm_start < addr + len);
71624 + }
71625
71626 /* Check against address space limits *after* clearing old maps... */
71627 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71628 + if (!may_expand_vm(mm, charged))
71629 return -ENOMEM;
71630
71631 if (mm->map_count > sysctl_max_map_count)
71632 return -ENOMEM;
71633
71634 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
71635 + if (security_vm_enough_memory_mm(mm, charged))
71636 return -ENOMEM;
71637
71638 /* Can we just expand an old private anonymous mapping? */
71639 @@ -2241,7 +2672,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71640 */
71641 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71642 if (!vma) {
71643 - vm_unacct_memory(len >> PAGE_SHIFT);
71644 + vm_unacct_memory(charged);
71645 return -ENOMEM;
71646 }
71647
71648 @@ -2255,11 +2686,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
71649 vma_link(mm, vma, prev, rb_link, rb_parent);
71650 out:
71651 perf_event_mmap(vma);
71652 - mm->total_vm += len >> PAGE_SHIFT;
71653 + mm->total_vm += charged;
71654 if (flags & VM_LOCKED) {
71655 if (!mlock_vma_pages_range(vma, addr, addr + len))
71656 - mm->locked_vm += (len >> PAGE_SHIFT);
71657 + mm->locked_vm += charged;
71658 }
71659 + track_exec_limit(mm, addr, addr + len, flags);
71660 return addr;
71661 }
71662
71663 @@ -2315,8 +2747,10 @@ void exit_mmap(struct mm_struct *mm)
71664 * Walk the list again, actually closing and freeing it,
71665 * with preemption enabled, without holding any MM locks.
71666 */
71667 - while (vma)
71668 + while (vma) {
71669 + vma->vm_mirror = NULL;
71670 vma = remove_vma(vma);
71671 + }
71672
71673 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71674 }
71675 @@ -2330,6 +2764,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71676 struct vm_area_struct * __vma, * prev;
71677 struct rb_node ** rb_link, * rb_parent;
71678
71679 +#ifdef CONFIG_PAX_SEGMEXEC
71680 + struct vm_area_struct *vma_m = NULL;
71681 +#endif
71682 +
71683 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71684 + return -EPERM;
71685 +
71686 /*
71687 * The vm_pgoff of a purely anonymous vma should be irrelevant
71688 * until its first write fault, when page's anon_vma and index
71689 @@ -2352,7 +2793,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71690 if ((vma->vm_flags & VM_ACCOUNT) &&
71691 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71692 return -ENOMEM;
71693 +
71694 +#ifdef CONFIG_PAX_SEGMEXEC
71695 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71696 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71697 + if (!vma_m)
71698 + return -ENOMEM;
71699 + }
71700 +#endif
71701 +
71702 vma_link(mm, vma, prev, rb_link, rb_parent);
71703 +
71704 +#ifdef CONFIG_PAX_SEGMEXEC
71705 + if (vma_m)
71706 + BUG_ON(pax_mirror_vma(vma_m, vma));
71707 +#endif
71708 +
71709 return 0;
71710 }
71711
71712 @@ -2371,6 +2827,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71713 struct mempolicy *pol;
71714 bool faulted_in_anon_vma = true;
71715
71716 + BUG_ON(vma->vm_mirror);
71717 +
71718 /*
71719 * If anonymous vma has not yet been faulted, update new pgoff
71720 * to match new location, to increase its chance of merging.
71721 @@ -2438,6 +2896,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71722 return NULL;
71723 }
71724
71725 +#ifdef CONFIG_PAX_SEGMEXEC
71726 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71727 +{
71728 + struct vm_area_struct *prev_m;
71729 + struct rb_node **rb_link_m, *rb_parent_m;
71730 + struct mempolicy *pol_m;
71731 +
71732 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71733 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71734 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71735 + *vma_m = *vma;
71736 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71737 + if (anon_vma_clone(vma_m, vma))
71738 + return -ENOMEM;
71739 + pol_m = vma_policy(vma_m);
71740 + mpol_get(pol_m);
71741 + vma_set_policy(vma_m, pol_m);
71742 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71743 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71744 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71745 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71746 + if (vma_m->vm_file)
71747 + get_file(vma_m->vm_file);
71748 + if (vma_m->vm_ops && vma_m->vm_ops->open)
71749 + vma_m->vm_ops->open(vma_m);
71750 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71751 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71752 + vma_m->vm_mirror = vma;
71753 + vma->vm_mirror = vma_m;
71754 + return 0;
71755 +}
71756 +#endif
71757 +
71758 /*
71759 * Return true if the calling process may expand its vm space by the passed
71760 * number of pages
71761 @@ -2449,6 +2940,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71762
71763 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71764
71765 +#ifdef CONFIG_PAX_RANDMMAP
71766 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71767 + cur -= mm->brk_gap;
71768 +#endif
71769 +
71770 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71771 if (cur + npages > lim)
71772 return 0;
71773 return 1;
71774 @@ -2519,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
71775 vma->vm_start = addr;
71776 vma->vm_end = addr + len;
71777
71778 +#ifdef CONFIG_PAX_MPROTECT
71779 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71780 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71781 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71782 + return -EPERM;
71783 + if (!(vm_flags & VM_EXEC))
71784 + vm_flags &= ~VM_MAYEXEC;
71785 +#else
71786 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71787 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71788 +#endif
71789 + else
71790 + vm_flags &= ~VM_MAYWRITE;
71791 + }
71792 +#endif
71793 +
71794 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71795 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71796
71797 diff --git a/mm/mprotect.c b/mm/mprotect.c
71798 index a409926..8b32e6d 100644
71799 --- a/mm/mprotect.c
71800 +++ b/mm/mprotect.c
71801 @@ -23,10 +23,17 @@
71802 #include <linux/mmu_notifier.h>
71803 #include <linux/migrate.h>
71804 #include <linux/perf_event.h>
71805 +
71806 +#ifdef CONFIG_PAX_MPROTECT
71807 +#include <linux/elf.h>
71808 +#include <linux/binfmts.h>
71809 +#endif
71810 +
71811 #include <asm/uaccess.h>
71812 #include <asm/pgtable.h>
71813 #include <asm/cacheflush.h>
71814 #include <asm/tlbflush.h>
71815 +#include <asm/mmu_context.h>
71816
71817 #ifndef pgprot_modify
71818 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71819 @@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
71820 flush_tlb_range(vma, start, end);
71821 }
71822
71823 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71824 +/* called while holding the mmap semaphor for writing except stack expansion */
71825 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71826 +{
71827 + unsigned long oldlimit, newlimit = 0UL;
71828 +
71829 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71830 + return;
71831 +
71832 + spin_lock(&mm->page_table_lock);
71833 + oldlimit = mm->context.user_cs_limit;
71834 + if ((prot & VM_EXEC) && oldlimit < end)
71835 + /* USER_CS limit moved up */
71836 + newlimit = end;
71837 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71838 + /* USER_CS limit moved down */
71839 + newlimit = start;
71840 +
71841 + if (newlimit) {
71842 + mm->context.user_cs_limit = newlimit;
71843 +
71844 +#ifdef CONFIG_SMP
71845 + wmb();
71846 + cpus_clear(mm->context.cpu_user_cs_mask);
71847 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71848 +#endif
71849 +
71850 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71851 + }
71852 + spin_unlock(&mm->page_table_lock);
71853 + if (newlimit == end) {
71854 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
71855 +
71856 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
71857 + if (is_vm_hugetlb_page(vma))
71858 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
71859 + else
71860 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
71861 + }
71862 +}
71863 +#endif
71864 +
71865 int
71866 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71867 unsigned long start, unsigned long end, unsigned long newflags)
71868 @@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71869 int error;
71870 int dirty_accountable = 0;
71871
71872 +#ifdef CONFIG_PAX_SEGMEXEC
71873 + struct vm_area_struct *vma_m = NULL;
71874 + unsigned long start_m, end_m;
71875 +
71876 + start_m = start + SEGMEXEC_TASK_SIZE;
71877 + end_m = end + SEGMEXEC_TASK_SIZE;
71878 +#endif
71879 +
71880 if (newflags == oldflags) {
71881 *pprev = vma;
71882 return 0;
71883 }
71884
71885 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
71886 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
71887 +
71888 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
71889 + return -ENOMEM;
71890 +
71891 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
71892 + return -ENOMEM;
71893 + }
71894 +
71895 /*
71896 * If we make a private mapping writable we increase our commit;
71897 * but (without finer accounting) cannot reduce our commit if we
71898 @@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71899 }
71900 }
71901
71902 +#ifdef CONFIG_PAX_SEGMEXEC
71903 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
71904 + if (start != vma->vm_start) {
71905 + error = split_vma(mm, vma, start, 1);
71906 + if (error)
71907 + goto fail;
71908 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
71909 + *pprev = (*pprev)->vm_next;
71910 + }
71911 +
71912 + if (end != vma->vm_end) {
71913 + error = split_vma(mm, vma, end, 0);
71914 + if (error)
71915 + goto fail;
71916 + }
71917 +
71918 + if (pax_find_mirror_vma(vma)) {
71919 + error = __do_munmap(mm, start_m, end_m - start_m);
71920 + if (error)
71921 + goto fail;
71922 + } else {
71923 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71924 + if (!vma_m) {
71925 + error = -ENOMEM;
71926 + goto fail;
71927 + }
71928 + vma->vm_flags = newflags;
71929 + error = pax_mirror_vma(vma_m, vma);
71930 + if (error) {
71931 + vma->vm_flags = oldflags;
71932 + goto fail;
71933 + }
71934 + }
71935 + }
71936 +#endif
71937 +
71938 /*
71939 * First try to merge with previous and/or next vma.
71940 */
71941 @@ -204,9 +307,21 @@ success:
71942 * vm_flags and vm_page_prot are protected by the mmap_sem
71943 * held in write mode.
71944 */
71945 +
71946 +#ifdef CONFIG_PAX_SEGMEXEC
71947 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
71948 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
71949 +#endif
71950 +
71951 vma->vm_flags = newflags;
71952 +
71953 +#ifdef CONFIG_PAX_MPROTECT
71954 + if (mm->binfmt && mm->binfmt->handle_mprotect)
71955 + mm->binfmt->handle_mprotect(vma, newflags);
71956 +#endif
71957 +
71958 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
71959 - vm_get_page_prot(newflags));
71960 + vm_get_page_prot(vma->vm_flags));
71961
71962 if (vma_wants_writenotify(vma)) {
71963 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
71964 @@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71965 end = start + len;
71966 if (end <= start)
71967 return -ENOMEM;
71968 +
71969 +#ifdef CONFIG_PAX_SEGMEXEC
71970 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71971 + if (end > SEGMEXEC_TASK_SIZE)
71972 + return -EINVAL;
71973 + } else
71974 +#endif
71975 +
71976 + if (end > TASK_SIZE)
71977 + return -EINVAL;
71978 +
71979 if (!arch_validate_prot(prot))
71980 return -EINVAL;
71981
71982 @@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71983 /*
71984 * Does the application expect PROT_READ to imply PROT_EXEC:
71985 */
71986 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71987 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71988 prot |= PROT_EXEC;
71989
71990 vm_flags = calc_vm_prot_bits(prot);
71991 @@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71992 if (start > vma->vm_start)
71993 prev = vma;
71994
71995 +#ifdef CONFIG_PAX_MPROTECT
71996 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
71997 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
71998 +#endif
71999 +
72000 for (nstart = start ; ; ) {
72001 unsigned long newflags;
72002
72003 @@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72004
72005 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72006 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72007 + if (prot & (PROT_WRITE | PROT_EXEC))
72008 + gr_log_rwxmprotect(vma->vm_file);
72009 +
72010 + error = -EACCES;
72011 + goto out;
72012 + }
72013 +
72014 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72015 error = -EACCES;
72016 goto out;
72017 }
72018 @@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72019 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72020 if (error)
72021 goto out;
72022 +
72023 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72024 +
72025 nstart = tmp;
72026
72027 if (nstart < prev->vm_end)
72028 diff --git a/mm/mremap.c b/mm/mremap.c
72029 index db8d983..76506cb 100644
72030 --- a/mm/mremap.c
72031 +++ b/mm/mremap.c
72032 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72033 continue;
72034 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72035 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72036 +
72037 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72038 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72039 + pte = pte_exprotect(pte);
72040 +#endif
72041 +
72042 set_pte_at(mm, new_addr, new_pte, pte);
72043 }
72044
72045 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72046 if (is_vm_hugetlb_page(vma))
72047 goto Einval;
72048
72049 +#ifdef CONFIG_PAX_SEGMEXEC
72050 + if (pax_find_mirror_vma(vma))
72051 + goto Einval;
72052 +#endif
72053 +
72054 /* We can't remap across vm area boundaries */
72055 if (old_len > vma->vm_end - addr)
72056 goto Efault;
72057 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72058 unsigned long ret = -EINVAL;
72059 unsigned long charged = 0;
72060 unsigned long map_flags;
72061 + unsigned long pax_task_size = TASK_SIZE;
72062
72063 if (new_addr & ~PAGE_MASK)
72064 goto out;
72065
72066 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72067 +#ifdef CONFIG_PAX_SEGMEXEC
72068 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72069 + pax_task_size = SEGMEXEC_TASK_SIZE;
72070 +#endif
72071 +
72072 + pax_task_size -= PAGE_SIZE;
72073 +
72074 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72075 goto out;
72076
72077 /* Check if the location we're moving into overlaps the
72078 * old location at all, and fail if it does.
72079 */
72080 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72081 - goto out;
72082 -
72083 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72084 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72085 goto out;
72086
72087 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72088 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72089 struct vm_area_struct *vma;
72090 unsigned long ret = -EINVAL;
72091 unsigned long charged = 0;
72092 + unsigned long pax_task_size = TASK_SIZE;
72093
72094 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72095 goto out;
72096 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72097 if (!new_len)
72098 goto out;
72099
72100 +#ifdef CONFIG_PAX_SEGMEXEC
72101 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72102 + pax_task_size = SEGMEXEC_TASK_SIZE;
72103 +#endif
72104 +
72105 + pax_task_size -= PAGE_SIZE;
72106 +
72107 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72108 + old_len > pax_task_size || addr > pax_task_size-old_len)
72109 + goto out;
72110 +
72111 if (flags & MREMAP_FIXED) {
72112 if (flags & MREMAP_MAYMOVE)
72113 ret = mremap_to(addr, old_len, new_addr, new_len);
72114 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72115 addr + new_len);
72116 }
72117 ret = addr;
72118 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72119 goto out;
72120 }
72121 }
72122 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72123 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72124 if (ret)
72125 goto out;
72126 +
72127 + map_flags = vma->vm_flags;
72128 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72129 + if (!(ret & ~PAGE_MASK)) {
72130 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72131 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72132 + }
72133 }
72134 out:
72135 if (ret & ~PAGE_MASK)
72136 diff --git a/mm/nommu.c b/mm/nommu.c
72137 index bb8f4f0..40d3e02 100644
72138 --- a/mm/nommu.c
72139 +++ b/mm/nommu.c
72140 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72141 int sysctl_overcommit_ratio = 50; /* default is 50% */
72142 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72143 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72144 -int heap_stack_gap = 0;
72145
72146 atomic_long_t mmap_pages_allocated;
72147
72148 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72149 EXPORT_SYMBOL(find_vma);
72150
72151 /*
72152 - * find a VMA
72153 - * - we don't extend stack VMAs under NOMMU conditions
72154 - */
72155 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72156 -{
72157 - return find_vma(mm, addr);
72158 -}
72159 -
72160 -/*
72161 * expand a stack to a given address
72162 * - not supported under NOMMU conditions
72163 */
72164 @@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72165
72166 /* most fields are the same, copy all, and then fixup */
72167 *new = *vma;
72168 + INIT_LIST_HEAD(&new->anon_vma_chain);
72169 *region = *vma->vm_region;
72170 new->vm_region = region;
72171
72172 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72173 index 918330f..ae99ae1 100644
72174 --- a/mm/page_alloc.c
72175 +++ b/mm/page_alloc.c
72176 @@ -335,7 +335,7 @@ out:
72177 * This usage means that zero-order pages may not be compound.
72178 */
72179
72180 -static void free_compound_page(struct page *page)
72181 +void free_compound_page(struct page *page)
72182 {
72183 __free_pages_ok(page, compound_order(page));
72184 }
72185 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72186 int i;
72187 int bad = 0;
72188
72189 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72190 + unsigned long index = 1UL << order;
72191 +#endif
72192 +
72193 trace_mm_page_free(page, order);
72194 kmemcheck_free_shadow(page, order);
72195
72196 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72197 debug_check_no_obj_freed(page_address(page),
72198 PAGE_SIZE << order);
72199 }
72200 +
72201 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72202 + for (; index; --index)
72203 + sanitize_highpage(page + index - 1);
72204 +#endif
72205 +
72206 arch_free_page(page, order);
72207 kernel_map_pages(page, 1 << order, 0);
72208
72209 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72210 arch_alloc_page(page, order);
72211 kernel_map_pages(page, 1 << order, 1);
72212
72213 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
72214 if (gfp_flags & __GFP_ZERO)
72215 prep_zero_page(page, order, gfp_flags);
72216 +#endif
72217
72218 if (order && (gfp_flags & __GFP_COMP))
72219 prep_compound_page(page, order);
72220 @@ -3523,7 +3535,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72221 unsigned long pfn;
72222
72223 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72224 +#ifdef CONFIG_X86_32
72225 + /* boot failures in VMware 8 on 32bit vanilla since
72226 + this change */
72227 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72228 +#else
72229 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72230 +#endif
72231 return 1;
72232 }
72233 return 0;
72234 diff --git a/mm/percpu.c b/mm/percpu.c
72235 index bb4be74..a43ea85 100644
72236 --- a/mm/percpu.c
72237 +++ b/mm/percpu.c
72238 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
72239 static unsigned int pcpu_high_unit_cpu __read_mostly;
72240
72241 /* the address of the first chunk which starts with the kernel static area */
72242 -void *pcpu_base_addr __read_mostly;
72243 +void *pcpu_base_addr __read_only;
72244 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72245
72246 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72247 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
72248 index c20ff48..137702a 100644
72249 --- a/mm/process_vm_access.c
72250 +++ b/mm/process_vm_access.c
72251 @@ -13,6 +13,7 @@
72252 #include <linux/uio.h>
72253 #include <linux/sched.h>
72254 #include <linux/highmem.h>
72255 +#include <linux/security.h>
72256 #include <linux/ptrace.h>
72257 #include <linux/slab.h>
72258 #include <linux/syscalls.h>
72259 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72260 size_t iov_l_curr_offset = 0;
72261 ssize_t iov_len;
72262
72263 + return -ENOSYS; // PaX: until properly audited
72264 +
72265 /*
72266 * Work out how many pages of struct pages we're going to need
72267 * when eventually calling get_user_pages
72268 */
72269 for (i = 0; i < riovcnt; i++) {
72270 iov_len = rvec[i].iov_len;
72271 - if (iov_len > 0) {
72272 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
72273 - + iov_len)
72274 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
72275 - / PAGE_SIZE + 1;
72276 - nr_pages = max(nr_pages, nr_pages_iov);
72277 - }
72278 + if (iov_len <= 0)
72279 + continue;
72280 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
72281 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
72282 + nr_pages = max(nr_pages, nr_pages_iov);
72283 }
72284
72285 if (nr_pages == 0)
72286 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72287 goto free_proc_pages;
72288 }
72289
72290 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
72291 + rc = -EPERM;
72292 + goto put_task_struct;
72293 + }
72294 +
72295 mm = mm_access(task, PTRACE_MODE_ATTACH);
72296 if (!mm || IS_ERR(mm)) {
72297 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
72298 diff --git a/mm/rmap.c b/mm/rmap.c
72299 index 5b5ad58..0f77903 100644
72300 --- a/mm/rmap.c
72301 +++ b/mm/rmap.c
72302 @@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72303 struct anon_vma *anon_vma = vma->anon_vma;
72304 struct anon_vma_chain *avc;
72305
72306 +#ifdef CONFIG_PAX_SEGMEXEC
72307 + struct anon_vma_chain *avc_m = NULL;
72308 +#endif
72309 +
72310 might_sleep();
72311 if (unlikely(!anon_vma)) {
72312 struct mm_struct *mm = vma->vm_mm;
72313 @@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72314 if (!avc)
72315 goto out_enomem;
72316
72317 +#ifdef CONFIG_PAX_SEGMEXEC
72318 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72319 + if (!avc_m)
72320 + goto out_enomem_free_avc;
72321 +#endif
72322 +
72323 anon_vma = find_mergeable_anon_vma(vma);
72324 allocated = NULL;
72325 if (!anon_vma) {
72326 @@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72327 /* page_table_lock to protect against threads */
72328 spin_lock(&mm->page_table_lock);
72329 if (likely(!vma->anon_vma)) {
72330 +
72331 +#ifdef CONFIG_PAX_SEGMEXEC
72332 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72333 +
72334 + if (vma_m) {
72335 + BUG_ON(vma_m->anon_vma);
72336 + vma_m->anon_vma = anon_vma;
72337 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
72338 + avc_m = NULL;
72339 + }
72340 +#endif
72341 +
72342 vma->anon_vma = anon_vma;
72343 anon_vma_chain_link(vma, avc, anon_vma);
72344 allocated = NULL;
72345 @@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72346
72347 if (unlikely(allocated))
72348 put_anon_vma(allocated);
72349 +
72350 +#ifdef CONFIG_PAX_SEGMEXEC
72351 + if (unlikely(avc_m))
72352 + anon_vma_chain_free(avc_m);
72353 +#endif
72354 +
72355 if (unlikely(avc))
72356 anon_vma_chain_free(avc);
72357 }
72358 return 0;
72359
72360 out_enomem_free_avc:
72361 +
72362 +#ifdef CONFIG_PAX_SEGMEXEC
72363 + if (avc_m)
72364 + anon_vma_chain_free(avc_m);
72365 +#endif
72366 +
72367 anon_vma_chain_free(avc);
72368 out_enomem:
72369 return -ENOMEM;
72370 @@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
72371 * Attach the anon_vmas from src to dst.
72372 * Returns 0 on success, -ENOMEM on failure.
72373 */
72374 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72375 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72376 {
72377 struct anon_vma_chain *avc, *pavc;
72378 struct anon_vma *root = NULL;
72379 @@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
72380 * the corresponding VMA in the parent process is attached to.
72381 * Returns 0 on success, non-zero on failure.
72382 */
72383 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72384 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72385 {
72386 struct anon_vma_chain *avc;
72387 struct anon_vma *anon_vma;
72388 diff --git a/mm/shmem.c b/mm/shmem.c
72389 index f99ff3e..faea8b6 100644
72390 --- a/mm/shmem.c
72391 +++ b/mm/shmem.c
72392 @@ -31,7 +31,7 @@
72393 #include <linux/export.h>
72394 #include <linux/swap.h>
72395
72396 -static struct vfsmount *shm_mnt;
72397 +struct vfsmount *shm_mnt;
72398
72399 #ifdef CONFIG_SHMEM
72400 /*
72401 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72402 #define BOGO_DIRENT_SIZE 20
72403
72404 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72405 -#define SHORT_SYMLINK_LEN 128
72406 +#define SHORT_SYMLINK_LEN 64
72407
72408 struct shmem_xattr {
72409 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72410 @@ -2235,8 +2235,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72411 int err = -ENOMEM;
72412
72413 /* Round up to L1_CACHE_BYTES to resist false sharing */
72414 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72415 - L1_CACHE_BYTES), GFP_KERNEL);
72416 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72417 if (!sbinfo)
72418 return -ENOMEM;
72419
72420 diff --git a/mm/slab.c b/mm/slab.c
72421 index e901a36..ee8fe97 100644
72422 --- a/mm/slab.c
72423 +++ b/mm/slab.c
72424 @@ -153,7 +153,7 @@
72425
72426 /* Legal flag mask for kmem_cache_create(). */
72427 #if DEBUG
72428 -# define CREATE_MASK (SLAB_RED_ZONE | \
72429 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72430 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72431 SLAB_CACHE_DMA | \
72432 SLAB_STORE_USER | \
72433 @@ -161,7 +161,7 @@
72434 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72435 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72436 #else
72437 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72438 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72439 SLAB_CACHE_DMA | \
72440 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72441 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72442 @@ -290,7 +290,7 @@ struct kmem_list3 {
72443 * Need this for bootstrapping a per node allocator.
72444 */
72445 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72446 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72447 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72448 #define CACHE_CACHE 0
72449 #define SIZE_AC MAX_NUMNODES
72450 #define SIZE_L3 (2 * MAX_NUMNODES)
72451 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72452 if ((x)->max_freeable < i) \
72453 (x)->max_freeable = i; \
72454 } while (0)
72455 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72456 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72457 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72458 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72459 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72460 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72461 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72462 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72463 #else
72464 #define STATS_INC_ACTIVE(x) do { } while (0)
72465 #define STATS_DEC_ACTIVE(x) do { } while (0)
72466 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72467 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72468 */
72469 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72470 - const struct slab *slab, void *obj)
72471 + const struct slab *slab, const void *obj)
72472 {
72473 u32 offset = (obj - slab->s_mem);
72474 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72475 @@ -568,7 +568,7 @@ struct cache_names {
72476 static struct cache_names __initdata cache_names[] = {
72477 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72478 #include <linux/kmalloc_sizes.h>
72479 - {NULL,}
72480 + {NULL}
72481 #undef CACHE
72482 };
72483
72484 @@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
72485 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72486 sizes[INDEX_AC].cs_size,
72487 ARCH_KMALLOC_MINALIGN,
72488 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72489 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72490 NULL);
72491
72492 if (INDEX_AC != INDEX_L3) {
72493 @@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
72494 kmem_cache_create(names[INDEX_L3].name,
72495 sizes[INDEX_L3].cs_size,
72496 ARCH_KMALLOC_MINALIGN,
72497 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72498 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72499 NULL);
72500 }
72501
72502 @@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
72503 sizes->cs_cachep = kmem_cache_create(names->name,
72504 sizes->cs_size,
72505 ARCH_KMALLOC_MINALIGN,
72506 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72507 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72508 NULL);
72509 }
72510 #ifdef CONFIG_ZONE_DMA
72511 @@ -4390,10 +4390,10 @@ static int s_show(struct seq_file *m, void *p)
72512 }
72513 /* cpu stats */
72514 {
72515 - unsigned long allochit = atomic_read(&cachep->allochit);
72516 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72517 - unsigned long freehit = atomic_read(&cachep->freehit);
72518 - unsigned long freemiss = atomic_read(&cachep->freemiss);
72519 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72520 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72521 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72522 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72523
72524 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72525 allochit, allocmiss, freehit, freemiss);
72526 @@ -4652,13 +4652,62 @@ static int __init slab_proc_init(void)
72527 {
72528 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72529 #ifdef CONFIG_DEBUG_SLAB_LEAK
72530 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72531 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72532 #endif
72533 return 0;
72534 }
72535 module_init(slab_proc_init);
72536 #endif
72537
72538 +void check_object_size(const void *ptr, unsigned long n, bool to)
72539 +{
72540 +
72541 +#ifdef CONFIG_PAX_USERCOPY
72542 + struct page *page;
72543 + struct kmem_cache *cachep = NULL;
72544 + struct slab *slabp;
72545 + unsigned int objnr;
72546 + unsigned long offset;
72547 + const char *type;
72548 +
72549 + if (!n)
72550 + return;
72551 +
72552 + type = "<null>";
72553 + if (ZERO_OR_NULL_PTR(ptr))
72554 + goto report;
72555 +
72556 + if (!virt_addr_valid(ptr))
72557 + return;
72558 +
72559 + page = virt_to_head_page(ptr);
72560 +
72561 + type = "<process stack>";
72562 + if (!PageSlab(page)) {
72563 + if (object_is_on_stack(ptr, n) == -1)
72564 + goto report;
72565 + return;
72566 + }
72567 +
72568 + cachep = page_get_cache(page);
72569 + type = cachep->name;
72570 + if (!(cachep->flags & SLAB_USERCOPY))
72571 + goto report;
72572 +
72573 + slabp = page_get_slab(page);
72574 + objnr = obj_to_index(cachep, slabp, ptr);
72575 + BUG_ON(objnr >= cachep->num);
72576 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72577 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72578 + return;
72579 +
72580 +report:
72581 + pax_report_usercopy(ptr, n, to, type);
72582 +#endif
72583 +
72584 +}
72585 +EXPORT_SYMBOL(check_object_size);
72586 +
72587 /**
72588 * ksize - get the actual amount of memory allocated for a given object
72589 * @objp: Pointer to the object
72590 diff --git a/mm/slob.c b/mm/slob.c
72591 index 8105be4..e045f96 100644
72592 --- a/mm/slob.c
72593 +++ b/mm/slob.c
72594 @@ -29,7 +29,7 @@
72595 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72596 * alloc_pages() directly, allocating compound pages so the page order
72597 * does not have to be separately tracked, and also stores the exact
72598 - * allocation size in page->private so that it can be used to accurately
72599 + * allocation size in slob_page->size so that it can be used to accurately
72600 * provide ksize(). These objects are detected in kfree() because slob_page()
72601 * is false for them.
72602 *
72603 @@ -58,6 +58,7 @@
72604 */
72605
72606 #include <linux/kernel.h>
72607 +#include <linux/sched.h>
72608 #include <linux/slab.h>
72609 #include <linux/mm.h>
72610 #include <linux/swap.h> /* struct reclaim_state */
72611 @@ -102,7 +103,8 @@ struct slob_page {
72612 unsigned long flags; /* mandatory */
72613 atomic_t _count; /* mandatory */
72614 slobidx_t units; /* free units left in page */
72615 - unsigned long pad[2];
72616 + unsigned long pad[1];
72617 + unsigned long size; /* size when >=PAGE_SIZE */
72618 slob_t *free; /* first free slob_t in page */
72619 struct list_head list; /* linked list of free pages */
72620 };
72621 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72622 */
72623 static inline int is_slob_page(struct slob_page *sp)
72624 {
72625 - return PageSlab((struct page *)sp);
72626 + return PageSlab((struct page *)sp) && !sp->size;
72627 }
72628
72629 static inline void set_slob_page(struct slob_page *sp)
72630 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72631
72632 static inline struct slob_page *slob_page(const void *addr)
72633 {
72634 - return (struct slob_page *)virt_to_page(addr);
72635 + return (struct slob_page *)virt_to_head_page(addr);
72636 }
72637
72638 /*
72639 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72640 /*
72641 * Return the size of a slob block.
72642 */
72643 -static slobidx_t slob_units(slob_t *s)
72644 +static slobidx_t slob_units(const slob_t *s)
72645 {
72646 if (s->units > 0)
72647 return s->units;
72648 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72649 /*
72650 * Return the next free slob block pointer after this one.
72651 */
72652 -static slob_t *slob_next(slob_t *s)
72653 +static slob_t *slob_next(const slob_t *s)
72654 {
72655 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72656 slobidx_t next;
72657 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72658 /*
72659 * Returns true if s is the last free block in its page.
72660 */
72661 -static int slob_last(slob_t *s)
72662 +static int slob_last(const slob_t *s)
72663 {
72664 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72665 }
72666 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72667 if (!page)
72668 return NULL;
72669
72670 + set_slob_page(page);
72671 return page_address(page);
72672 }
72673
72674 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72675 if (!b)
72676 return NULL;
72677 sp = slob_page(b);
72678 - set_slob_page(sp);
72679
72680 spin_lock_irqsave(&slob_lock, flags);
72681 sp->units = SLOB_UNITS(PAGE_SIZE);
72682 sp->free = b;
72683 + sp->size = 0;
72684 INIT_LIST_HEAD(&sp->list);
72685 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72686 set_slob_page_free(sp, slob_list);
72687 @@ -476,10 +479,9 @@ out:
72688 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72689 */
72690
72691 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72692 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72693 {
72694 - unsigned int *m;
72695 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72696 + slob_t *m;
72697 void *ret;
72698
72699 gfp &= gfp_allowed_mask;
72700 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72701
72702 if (!m)
72703 return NULL;
72704 - *m = size;
72705 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72706 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72707 + m[0].units = size;
72708 + m[1].units = align;
72709 ret = (void *)m + align;
72710
72711 trace_kmalloc_node(_RET_IP_, ret,
72712 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72713 gfp |= __GFP_COMP;
72714 ret = slob_new_pages(gfp, order, node);
72715 if (ret) {
72716 - struct page *page;
72717 - page = virt_to_page(ret);
72718 - page->private = size;
72719 + struct slob_page *sp;
72720 + sp = slob_page(ret);
72721 + sp->size = size;
72722 }
72723
72724 trace_kmalloc_node(_RET_IP_, ret,
72725 size, PAGE_SIZE << order, gfp, node);
72726 }
72727
72728 - kmemleak_alloc(ret, size, 1, gfp);
72729 + return ret;
72730 +}
72731 +
72732 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72733 +{
72734 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72735 + void *ret = __kmalloc_node_align(size, gfp, node, align);
72736 +
72737 + if (!ZERO_OR_NULL_PTR(ret))
72738 + kmemleak_alloc(ret, size, 1, gfp);
72739 return ret;
72740 }
72741 EXPORT_SYMBOL(__kmalloc_node);
72742 @@ -533,13 +547,92 @@ void kfree(const void *block)
72743 sp = slob_page(block);
72744 if (is_slob_page(sp)) {
72745 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72746 - unsigned int *m = (unsigned int *)(block - align);
72747 - slob_free(m, *m + align);
72748 - } else
72749 + slob_t *m = (slob_t *)(block - align);
72750 + slob_free(m, m[0].units + align);
72751 + } else {
72752 + clear_slob_page(sp);
72753 + free_slob_page(sp);
72754 + sp->size = 0;
72755 put_page(&sp->page);
72756 + }
72757 }
72758 EXPORT_SYMBOL(kfree);
72759
72760 +void check_object_size(const void *ptr, unsigned long n, bool to)
72761 +{
72762 +
72763 +#ifdef CONFIG_PAX_USERCOPY
72764 + struct slob_page *sp;
72765 + const slob_t *free;
72766 + const void *base;
72767 + unsigned long flags;
72768 + const char *type;
72769 +
72770 + if (!n)
72771 + return;
72772 +
72773 + type = "<null>";
72774 + if (ZERO_OR_NULL_PTR(ptr))
72775 + goto report;
72776 +
72777 + if (!virt_addr_valid(ptr))
72778 + return;
72779 +
72780 + type = "<process stack>";
72781 + sp = slob_page(ptr);
72782 + if (!PageSlab((struct page *)sp)) {
72783 + if (object_is_on_stack(ptr, n) == -1)
72784 + goto report;
72785 + return;
72786 + }
72787 +
72788 + type = "<slob>";
72789 + if (sp->size) {
72790 + base = page_address(&sp->page);
72791 + if (base <= ptr && n <= sp->size - (ptr - base))
72792 + return;
72793 + goto report;
72794 + }
72795 +
72796 + /* some tricky double walking to find the chunk */
72797 + spin_lock_irqsave(&slob_lock, flags);
72798 + base = (void *)((unsigned long)ptr & PAGE_MASK);
72799 + free = sp->free;
72800 +
72801 + while (!slob_last(free) && (void *)free <= ptr) {
72802 + base = free + slob_units(free);
72803 + free = slob_next(free);
72804 + }
72805 +
72806 + while (base < (void *)free) {
72807 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72808 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
72809 + int offset;
72810 +
72811 + if (ptr < base + align)
72812 + break;
72813 +
72814 + offset = ptr - base - align;
72815 + if (offset >= m) {
72816 + base += size;
72817 + continue;
72818 + }
72819 +
72820 + if (n > m - offset)
72821 + break;
72822 +
72823 + spin_unlock_irqrestore(&slob_lock, flags);
72824 + return;
72825 + }
72826 +
72827 + spin_unlock_irqrestore(&slob_lock, flags);
72828 +report:
72829 + pax_report_usercopy(ptr, n, to, type);
72830 +#endif
72831 +
72832 +}
72833 +EXPORT_SYMBOL(check_object_size);
72834 +
72835 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72836 size_t ksize(const void *block)
72837 {
72838 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
72839 sp = slob_page(block);
72840 if (is_slob_page(sp)) {
72841 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72842 - unsigned int *m = (unsigned int *)(block - align);
72843 - return SLOB_UNITS(*m) * SLOB_UNIT;
72844 + slob_t *m = (slob_t *)(block - align);
72845 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72846 } else
72847 - return sp->page.private;
72848 + return sp->size;
72849 }
72850 EXPORT_SYMBOL(ksize);
72851
72852 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72853 {
72854 struct kmem_cache *c;
72855
72856 +#ifdef CONFIG_PAX_USERCOPY
72857 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
72858 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
72859 +#else
72860 c = slob_alloc(sizeof(struct kmem_cache),
72861 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
72862 +#endif
72863
72864 if (c) {
72865 c->name = name;
72866 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
72867
72868 lockdep_trace_alloc(flags);
72869
72870 +#ifdef CONFIG_PAX_USERCOPY
72871 + b = __kmalloc_node_align(c->size, flags, node, c->align);
72872 +#else
72873 if (c->size < PAGE_SIZE) {
72874 b = slob_alloc(c->size, flags, c->align, node);
72875 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72876 SLOB_UNITS(c->size) * SLOB_UNIT,
72877 flags, node);
72878 } else {
72879 + struct slob_page *sp;
72880 +
72881 b = slob_new_pages(flags, get_order(c->size), node);
72882 + sp = slob_page(b);
72883 + sp->size = c->size;
72884 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72885 PAGE_SIZE << get_order(c->size),
72886 flags, node);
72887 }
72888 +#endif
72889
72890 if (c->ctor)
72891 c->ctor(b);
72892 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
72893
72894 static void __kmem_cache_free(void *b, int size)
72895 {
72896 - if (size < PAGE_SIZE)
72897 + struct slob_page *sp = slob_page(b);
72898 +
72899 + if (is_slob_page(sp))
72900 slob_free(b, size);
72901 - else
72902 + else {
72903 + clear_slob_page(sp);
72904 + free_slob_page(sp);
72905 + sp->size = 0;
72906 slob_free_pages(b, get_order(size));
72907 + }
72908 }
72909
72910 static void kmem_rcu_free(struct rcu_head *head)
72911 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
72912
72913 void kmem_cache_free(struct kmem_cache *c, void *b)
72914 {
72915 + int size = c->size;
72916 +
72917 +#ifdef CONFIG_PAX_USERCOPY
72918 + if (size + c->align < PAGE_SIZE) {
72919 + size += c->align;
72920 + b -= c->align;
72921 + }
72922 +#endif
72923 +
72924 kmemleak_free_recursive(b, c->flags);
72925 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
72926 struct slob_rcu *slob_rcu;
72927 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
72928 - slob_rcu->size = c->size;
72929 + slob_rcu = b + (size - sizeof(struct slob_rcu));
72930 + slob_rcu->size = size;
72931 call_rcu(&slob_rcu->head, kmem_rcu_free);
72932 } else {
72933 - __kmem_cache_free(b, c->size);
72934 + __kmem_cache_free(b, size);
72935 }
72936
72937 +#ifdef CONFIG_PAX_USERCOPY
72938 + trace_kfree(_RET_IP_, b);
72939 +#else
72940 trace_kmem_cache_free(_RET_IP_, b);
72941 +#endif
72942 +
72943 }
72944 EXPORT_SYMBOL(kmem_cache_free);
72945
72946 diff --git a/mm/slub.c b/mm/slub.c
72947 index 80848cd..14cd19c 100644
72948 --- a/mm/slub.c
72949 +++ b/mm/slub.c
72950 @@ -209,7 +209,7 @@ struct track {
72951
72952 enum track_item { TRACK_ALLOC, TRACK_FREE };
72953
72954 -#ifdef CONFIG_SYSFS
72955 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72956 static int sysfs_slab_add(struct kmem_cache *);
72957 static int sysfs_slab_alias(struct kmem_cache *, const char *);
72958 static void sysfs_slab_remove(struct kmem_cache *);
72959 @@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
72960 if (!t->addr)
72961 return;
72962
72963 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
72964 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
72965 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
72966 #ifdef CONFIG_STACKTRACE
72967 {
72968 @@ -2600,6 +2600,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
72969
72970 page = virt_to_head_page(x);
72971
72972 + BUG_ON(!PageSlab(page));
72973 +
72974 slab_free(s, page, x, _RET_IP_);
72975
72976 trace_kmem_cache_free(_RET_IP_, x);
72977 @@ -2633,7 +2635,7 @@ static int slub_min_objects;
72978 * Merge control. If this is set then no merging of slab caches will occur.
72979 * (Could be removed. This was introduced to pacify the merge skeptics.)
72980 */
72981 -static int slub_nomerge;
72982 +static int slub_nomerge = 1;
72983
72984 /*
72985 * Calculate the order of allocation given an slab object size.
72986 @@ -3086,7 +3088,7 @@ static int kmem_cache_open(struct kmem_cache *s,
72987 else
72988 s->cpu_partial = 30;
72989
72990 - s->refcount = 1;
72991 + atomic_set(&s->refcount, 1);
72992 #ifdef CONFIG_NUMA
72993 s->remote_node_defrag_ratio = 1000;
72994 #endif
72995 @@ -3190,8 +3192,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
72996 void kmem_cache_destroy(struct kmem_cache *s)
72997 {
72998 down_write(&slub_lock);
72999 - s->refcount--;
73000 - if (!s->refcount) {
73001 + if (atomic_dec_and_test(&s->refcount)) {
73002 list_del(&s->list);
73003 up_write(&slub_lock);
73004 if (kmem_cache_close(s)) {
73005 @@ -3402,6 +3403,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73006 EXPORT_SYMBOL(__kmalloc_node);
73007 #endif
73008
73009 +void check_object_size(const void *ptr, unsigned long n, bool to)
73010 +{
73011 +
73012 +#ifdef CONFIG_PAX_USERCOPY
73013 + struct page *page;
73014 + struct kmem_cache *s = NULL;
73015 + unsigned long offset;
73016 + const char *type;
73017 +
73018 + if (!n)
73019 + return;
73020 +
73021 + type = "<null>";
73022 + if (ZERO_OR_NULL_PTR(ptr))
73023 + goto report;
73024 +
73025 + if (!virt_addr_valid(ptr))
73026 + return;
73027 +
73028 + page = virt_to_head_page(ptr);
73029 +
73030 + type = "<process stack>";
73031 + if (!PageSlab(page)) {
73032 + if (object_is_on_stack(ptr, n) == -1)
73033 + goto report;
73034 + return;
73035 + }
73036 +
73037 + s = page->slab;
73038 + type = s->name;
73039 + if (!(s->flags & SLAB_USERCOPY))
73040 + goto report;
73041 +
73042 + offset = (ptr - page_address(page)) % s->size;
73043 + if (offset <= s->objsize && n <= s->objsize - offset)
73044 + return;
73045 +
73046 +report:
73047 + pax_report_usercopy(ptr, n, to, type);
73048 +#endif
73049 +
73050 +}
73051 +EXPORT_SYMBOL(check_object_size);
73052 +
73053 size_t ksize(const void *object)
73054 {
73055 struct page *page;
73056 @@ -3676,7 +3721,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73057 int node;
73058
73059 list_add(&s->list, &slab_caches);
73060 - s->refcount = -1;
73061 + atomic_set(&s->refcount, -1);
73062
73063 for_each_node_state(node, N_NORMAL_MEMORY) {
73064 struct kmem_cache_node *n = get_node(s, node);
73065 @@ -3796,17 +3841,17 @@ void __init kmem_cache_init(void)
73066
73067 /* Caches that are not of the two-to-the-power-of size */
73068 if (KMALLOC_MIN_SIZE <= 32) {
73069 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73070 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73071 caches++;
73072 }
73073
73074 if (KMALLOC_MIN_SIZE <= 64) {
73075 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73076 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73077 caches++;
73078 }
73079
73080 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73081 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73082 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73083 caches++;
73084 }
73085
73086 @@ -3874,7 +3919,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73087 /*
73088 * We may have set a slab to be unmergeable during bootstrap.
73089 */
73090 - if (s->refcount < 0)
73091 + if (atomic_read(&s->refcount) < 0)
73092 return 1;
73093
73094 return 0;
73095 @@ -3933,7 +3978,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73096 down_write(&slub_lock);
73097 s = find_mergeable(size, align, flags, name, ctor);
73098 if (s) {
73099 - s->refcount++;
73100 + atomic_inc(&s->refcount);
73101 /*
73102 * Adjust the object sizes so that we clear
73103 * the complete object on kzalloc.
73104 @@ -3942,7 +3987,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73105 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73106
73107 if (sysfs_slab_alias(s, name)) {
73108 - s->refcount--;
73109 + atomic_dec(&s->refcount);
73110 goto err;
73111 }
73112 up_write(&slub_lock);
73113 @@ -4071,7 +4116,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73114 }
73115 #endif
73116
73117 -#ifdef CONFIG_SYSFS
73118 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73119 static int count_inuse(struct page *page)
73120 {
73121 return page->inuse;
73122 @@ -4458,12 +4503,12 @@ static void resiliency_test(void)
73123 validate_slab_cache(kmalloc_caches[9]);
73124 }
73125 #else
73126 -#ifdef CONFIG_SYSFS
73127 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73128 static void resiliency_test(void) {};
73129 #endif
73130 #endif
73131
73132 -#ifdef CONFIG_SYSFS
73133 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73134 enum slab_stat_type {
73135 SL_ALL, /* All slabs */
73136 SL_PARTIAL, /* Only partially allocated slabs */
73137 @@ -4706,7 +4751,7 @@ SLAB_ATTR_RO(ctor);
73138
73139 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73140 {
73141 - return sprintf(buf, "%d\n", s->refcount - 1);
73142 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73143 }
73144 SLAB_ATTR_RO(aliases);
73145
73146 @@ -5277,6 +5322,7 @@ static char *create_unique_id(struct kmem_cache *s)
73147 return name;
73148 }
73149
73150 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73151 static int sysfs_slab_add(struct kmem_cache *s)
73152 {
73153 int err;
73154 @@ -5339,6 +5385,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73155 kobject_del(&s->kobj);
73156 kobject_put(&s->kobj);
73157 }
73158 +#endif
73159
73160 /*
73161 * Need to buffer aliases during bootup until sysfs becomes
73162 @@ -5352,6 +5399,7 @@ struct saved_alias {
73163
73164 static struct saved_alias *alias_list;
73165
73166 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73167 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73168 {
73169 struct saved_alias *al;
73170 @@ -5374,6 +5422,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73171 alias_list = al;
73172 return 0;
73173 }
73174 +#endif
73175
73176 static int __init slab_sysfs_init(void)
73177 {
73178 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
73179 index 1b7e22a..3fcd4f3 100644
73180 --- a/mm/sparse-vmemmap.c
73181 +++ b/mm/sparse-vmemmap.c
73182 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
73183 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73184 if (!p)
73185 return NULL;
73186 - pud_populate(&init_mm, pud, p);
73187 + pud_populate_kernel(&init_mm, pud, p);
73188 }
73189 return pud;
73190 }
73191 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
73192 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73193 if (!p)
73194 return NULL;
73195 - pgd_populate(&init_mm, pgd, p);
73196 + pgd_populate_kernel(&init_mm, pgd, p);
73197 }
73198 return pgd;
73199 }
73200 diff --git a/mm/swap.c b/mm/swap.c
73201 index 5c13f13..f1cfc13 100644
73202 --- a/mm/swap.c
73203 +++ b/mm/swap.c
73204 @@ -30,6 +30,7 @@
73205 #include <linux/backing-dev.h>
73206 #include <linux/memcontrol.h>
73207 #include <linux/gfp.h>
73208 +#include <linux/hugetlb.h>
73209
73210 #include "internal.h"
73211
73212 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
73213
73214 __page_cache_release(page);
73215 dtor = get_compound_page_dtor(page);
73216 + if (!PageHuge(page))
73217 + BUG_ON(dtor != free_compound_page);
73218 (*dtor)(page);
73219 }
73220
73221 diff --git a/mm/swapfile.c b/mm/swapfile.c
73222 index fafc26d..1b7493e 100644
73223 --- a/mm/swapfile.c
73224 +++ b/mm/swapfile.c
73225 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73226
73227 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73228 /* Activity counter to indicate that a swapon or swapoff has occurred */
73229 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
73230 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73231
73232 static inline unsigned char swap_count(unsigned char ent)
73233 {
73234 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73235 }
73236 filp_close(swap_file, NULL);
73237 err = 0;
73238 - atomic_inc(&proc_poll_event);
73239 + atomic_inc_unchecked(&proc_poll_event);
73240 wake_up_interruptible(&proc_poll_wait);
73241
73242 out_dput:
73243 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73244
73245 poll_wait(file, &proc_poll_wait, wait);
73246
73247 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
73248 - seq->poll_event = atomic_read(&proc_poll_event);
73249 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73250 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73251 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73252 }
73253
73254 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73255 return ret;
73256
73257 seq = file->private_data;
73258 - seq->poll_event = atomic_read(&proc_poll_event);
73259 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73260 return 0;
73261 }
73262
73263 @@ -2127,7 +2127,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73264 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73265
73266 mutex_unlock(&swapon_mutex);
73267 - atomic_inc(&proc_poll_event);
73268 + atomic_inc_unchecked(&proc_poll_event);
73269 wake_up_interruptible(&proc_poll_wait);
73270
73271 if (S_ISREG(inode->i_mode))
73272 diff --git a/mm/util.c b/mm/util.c
73273 index ae962b3..0bba886 100644
73274 --- a/mm/util.c
73275 +++ b/mm/util.c
73276 @@ -284,6 +284,12 @@ done:
73277 void arch_pick_mmap_layout(struct mm_struct *mm)
73278 {
73279 mm->mmap_base = TASK_UNMAPPED_BASE;
73280 +
73281 +#ifdef CONFIG_PAX_RANDMMAP
73282 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73283 + mm->mmap_base += mm->delta_mmap;
73284 +#endif
73285 +
73286 mm->get_unmapped_area = arch_get_unmapped_area;
73287 mm->unmap_area = arch_unmap_area;
73288 }
73289 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73290 index 94dff88..7d25ed1 100644
73291 --- a/mm/vmalloc.c
73292 +++ b/mm/vmalloc.c
73293 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73294
73295 pte = pte_offset_kernel(pmd, addr);
73296 do {
73297 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73298 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73299 +
73300 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73301 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73302 + BUG_ON(!pte_exec(*pte));
73303 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73304 + continue;
73305 + }
73306 +#endif
73307 +
73308 + {
73309 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73310 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73311 + }
73312 } while (pte++, addr += PAGE_SIZE, addr != end);
73313 }
73314
73315 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73316 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73317 {
73318 pte_t *pte;
73319 + int ret = -ENOMEM;
73320
73321 /*
73322 * nr is a running index into the array which helps higher level
73323 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73324 pte = pte_alloc_kernel(pmd, addr);
73325 if (!pte)
73326 return -ENOMEM;
73327 +
73328 + pax_open_kernel();
73329 do {
73330 struct page *page = pages[*nr];
73331
73332 - if (WARN_ON(!pte_none(*pte)))
73333 - return -EBUSY;
73334 - if (WARN_ON(!page))
73335 - return -ENOMEM;
73336 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73337 + if (pgprot_val(prot) & _PAGE_NX)
73338 +#endif
73339 +
73340 + if (WARN_ON(!pte_none(*pte))) {
73341 + ret = -EBUSY;
73342 + goto out;
73343 + }
73344 + if (WARN_ON(!page)) {
73345 + ret = -ENOMEM;
73346 + goto out;
73347 + }
73348 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73349 (*nr)++;
73350 } while (pte++, addr += PAGE_SIZE, addr != end);
73351 - return 0;
73352 + ret = 0;
73353 +out:
73354 + pax_close_kernel();
73355 + return ret;
73356 }
73357
73358 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73359 @@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73360 pmd_t *pmd;
73361 unsigned long next;
73362
73363 - pmd = pmd_alloc(&init_mm, pud, addr);
73364 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
73365 if (!pmd)
73366 return -ENOMEM;
73367 do {
73368 @@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
73369 pud_t *pud;
73370 unsigned long next;
73371
73372 - pud = pud_alloc(&init_mm, pgd, addr);
73373 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
73374 if (!pud)
73375 return -ENOMEM;
73376 do {
73377 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
73378 * and fall back on vmalloc() if that fails. Others
73379 * just put it in the vmalloc space.
73380 */
73381 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73382 +#ifdef CONFIG_MODULES
73383 +#ifdef MODULES_VADDR
73384 unsigned long addr = (unsigned long)x;
73385 if (addr >= MODULES_VADDR && addr < MODULES_END)
73386 return 1;
73387 #endif
73388 +
73389 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73390 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73391 + return 1;
73392 +#endif
73393 +
73394 +#endif
73395 +
73396 return is_vmalloc_addr(x);
73397 }
73398
73399 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73400
73401 if (!pgd_none(*pgd)) {
73402 pud_t *pud = pud_offset(pgd, addr);
73403 +#ifdef CONFIG_X86
73404 + if (!pud_large(*pud))
73405 +#endif
73406 if (!pud_none(*pud)) {
73407 pmd_t *pmd = pmd_offset(pud, addr);
73408 +#ifdef CONFIG_X86
73409 + if (!pmd_large(*pmd))
73410 +#endif
73411 if (!pmd_none(*pmd)) {
73412 pte_t *ptep, pte;
73413
73414 @@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
73415 static struct vmap_area *alloc_vmap_area(unsigned long size,
73416 unsigned long align,
73417 unsigned long vstart, unsigned long vend,
73418 + int node, gfp_t gfp_mask) __size_overflow(1);
73419 +static struct vmap_area *alloc_vmap_area(unsigned long size,
73420 + unsigned long align,
73421 + unsigned long vstart, unsigned long vend,
73422 int node, gfp_t gfp_mask)
73423 {
73424 struct vmap_area *va;
73425 @@ -1319,6 +1363,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73426 struct vm_struct *area;
73427
73428 BUG_ON(in_interrupt());
73429 +
73430 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73431 + if (flags & VM_KERNEXEC) {
73432 + if (start != VMALLOC_START || end != VMALLOC_END)
73433 + return NULL;
73434 + start = (unsigned long)MODULES_EXEC_VADDR;
73435 + end = (unsigned long)MODULES_EXEC_END;
73436 + }
73437 +#endif
73438 +
73439 if (flags & VM_IOREMAP) {
73440 int bit = fls(size);
73441
73442 @@ -1551,6 +1605,11 @@ void *vmap(struct page **pages, unsigned int count,
73443 if (count > totalram_pages)
73444 return NULL;
73445
73446 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73447 + if (!(pgprot_val(prot) & _PAGE_NX))
73448 + flags |= VM_KERNEXEC;
73449 +#endif
73450 +
73451 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73452 __builtin_return_address(0));
73453 if (!area)
73454 @@ -1652,6 +1711,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73455 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73456 goto fail;
73457
73458 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73459 + if (!(pgprot_val(prot) & _PAGE_NX))
73460 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73461 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73462 + else
73463 +#endif
73464 +
73465 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73466 start, end, node, gfp_mask, caller);
73467 if (!area)
73468 @@ -1825,10 +1891,9 @@ EXPORT_SYMBOL(vzalloc_node);
73469 * For tight control over page level allocator and protection flags
73470 * use __vmalloc() instead.
73471 */
73472 -
73473 void *vmalloc_exec(unsigned long size)
73474 {
73475 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73476 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73477 -1, __builtin_return_address(0));
73478 }
73479
73480 @@ -2123,6 +2188,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73481 unsigned long uaddr = vma->vm_start;
73482 unsigned long usize = vma->vm_end - vma->vm_start;
73483
73484 + BUG_ON(vma->vm_mirror);
73485 +
73486 if ((PAGE_SIZE-1) & (unsigned long)addr)
73487 return -EINVAL;
73488
73489 @@ -2375,8 +2442,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
73490 return NULL;
73491 }
73492
73493 - vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
73494 - vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
73495 + vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
73496 + vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
73497 if (!vas || !vms)
73498 goto err_free2;
73499
73500 diff --git a/mm/vmstat.c b/mm/vmstat.c
73501 index 7db1b9b..e9f6b07 100644
73502 --- a/mm/vmstat.c
73503 +++ b/mm/vmstat.c
73504 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73505 *
73506 * vm_stat contains the global counters
73507 */
73508 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73509 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73510 EXPORT_SYMBOL(vm_stat);
73511
73512 #ifdef CONFIG_SMP
73513 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73514 v = p->vm_stat_diff[i];
73515 p->vm_stat_diff[i] = 0;
73516 local_irq_restore(flags);
73517 - atomic_long_add(v, &zone->vm_stat[i]);
73518 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73519 global_diff[i] += v;
73520 #ifdef CONFIG_NUMA
73521 /* 3 seconds idle till flush */
73522 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73523
73524 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73525 if (global_diff[i])
73526 - atomic_long_add(global_diff[i], &vm_stat[i]);
73527 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73528 }
73529
73530 #endif
73531 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73532 start_cpu_timer(cpu);
73533 #endif
73534 #ifdef CONFIG_PROC_FS
73535 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73536 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73537 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73538 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73539 + {
73540 + mode_t gr_mode = S_IRUGO;
73541 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73542 + gr_mode = S_IRUSR;
73543 +#endif
73544 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73545 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73546 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73547 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73548 +#else
73549 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73550 +#endif
73551 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73552 + }
73553 #endif
73554 return 0;
73555 }
73556 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73557 index efea35b..9c8dd0b 100644
73558 --- a/net/8021q/vlan.c
73559 +++ b/net/8021q/vlan.c
73560 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73561 err = -EPERM;
73562 if (!capable(CAP_NET_ADMIN))
73563 break;
73564 - if ((args.u.name_type >= 0) &&
73565 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73566 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73567 struct vlan_net *vn;
73568
73569 vn = net_generic(net, vlan_net_id);
73570 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73571 index fccae26..e7ece2f 100644
73572 --- a/net/9p/trans_fd.c
73573 +++ b/net/9p/trans_fd.c
73574 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73575 oldfs = get_fs();
73576 set_fs(get_ds());
73577 /* The cast to a user pointer is valid due to the set_fs() */
73578 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73579 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73580 set_fs(oldfs);
73581
73582 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73583 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73584 index 876fbe8..8bbea9f 100644
73585 --- a/net/atm/atm_misc.c
73586 +++ b/net/atm/atm_misc.c
73587 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73588 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73589 return 1;
73590 atm_return(vcc, truesize);
73591 - atomic_inc(&vcc->stats->rx_drop);
73592 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73593 return 0;
73594 }
73595 EXPORT_SYMBOL(atm_charge);
73596 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73597 }
73598 }
73599 atm_return(vcc, guess);
73600 - atomic_inc(&vcc->stats->rx_drop);
73601 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73602 return NULL;
73603 }
73604 EXPORT_SYMBOL(atm_alloc_charge);
73605 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73606
73607 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73608 {
73609 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73610 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73611 __SONET_ITEMS
73612 #undef __HANDLE_ITEM
73613 }
73614 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73615
73616 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73617 {
73618 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73619 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73620 __SONET_ITEMS
73621 #undef __HANDLE_ITEM
73622 }
73623 diff --git a/net/atm/lec.h b/net/atm/lec.h
73624 index dfc0719..47c5322 100644
73625 --- a/net/atm/lec.h
73626 +++ b/net/atm/lec.h
73627 @@ -48,7 +48,7 @@ struct lane2_ops {
73628 const u8 *tlvs, u32 sizeoftlvs);
73629 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73630 const u8 *tlvs, u32 sizeoftlvs);
73631 -};
73632 +} __no_const;
73633
73634 /*
73635 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73636 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73637 index 0919a88..a23d54e 100644
73638 --- a/net/atm/mpc.h
73639 +++ b/net/atm/mpc.h
73640 @@ -33,7 +33,7 @@ struct mpoa_client {
73641 struct mpc_parameters parameters; /* parameters for this client */
73642
73643 const struct net_device_ops *old_ops;
73644 - struct net_device_ops new_ops;
73645 + net_device_ops_no_const new_ops;
73646 };
73647
73648
73649 diff --git a/net/atm/proc.c b/net/atm/proc.c
73650 index 0d020de..011c7bb 100644
73651 --- a/net/atm/proc.c
73652 +++ b/net/atm/proc.c
73653 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73654 const struct k_atm_aal_stats *stats)
73655 {
73656 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73657 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73658 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73659 - atomic_read(&stats->rx_drop));
73660 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73661 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73662 + atomic_read_unchecked(&stats->rx_drop));
73663 }
73664
73665 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73666 diff --git a/net/atm/resources.c b/net/atm/resources.c
73667 index 23f45ce..c748f1a 100644
73668 --- a/net/atm/resources.c
73669 +++ b/net/atm/resources.c
73670 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73671 static void copy_aal_stats(struct k_atm_aal_stats *from,
73672 struct atm_aal_stats *to)
73673 {
73674 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73675 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73676 __AAL_STAT_ITEMS
73677 #undef __HANDLE_ITEM
73678 }
73679 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73680 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73681 struct atm_aal_stats *to)
73682 {
73683 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73684 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73685 __AAL_STAT_ITEMS
73686 #undef __HANDLE_ITEM
73687 }
73688 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73689 index a6d5d63..1cc6c2b 100644
73690 --- a/net/batman-adv/bat_iv_ogm.c
73691 +++ b/net/batman-adv/bat_iv_ogm.c
73692 @@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73693
73694 /* change sequence number to network order */
73695 batman_ogm_packet->seqno =
73696 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
73697 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73698
73699 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73700 batman_ogm_packet->tt_crc = htons((uint16_t)
73701 @@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
73702 else
73703 batman_ogm_packet->gw_flags = NO_FLAGS;
73704
73705 - atomic_inc(&hard_iface->seqno);
73706 + atomic_inc_unchecked(&hard_iface->seqno);
73707
73708 slide_own_bcast_window(hard_iface);
73709 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73710 @@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
73711 return;
73712
73713 /* could be changed by schedule_own_packet() */
73714 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
73715 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73716
73717 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73718
73719 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73720 index 3778977..f6a9450 100644
73721 --- a/net/batman-adv/hard-interface.c
73722 +++ b/net/batman-adv/hard-interface.c
73723 @@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73724 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73725 dev_add_pack(&hard_iface->batman_adv_ptype);
73726
73727 - atomic_set(&hard_iface->seqno, 1);
73728 - atomic_set(&hard_iface->frag_seqno, 1);
73729 + atomic_set_unchecked(&hard_iface->seqno, 1);
73730 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73731 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73732 hard_iface->net_dev->name);
73733
73734 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73735 index a5590f4..8d31969 100644
73736 --- a/net/batman-adv/soft-interface.c
73737 +++ b/net/batman-adv/soft-interface.c
73738 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73739
73740 /* set broadcast sequence number */
73741 bcast_packet->seqno =
73742 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73743 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73744
73745 add_bcast_packet_to_list(bat_priv, skb, 1);
73746
73747 @@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
73748 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73749
73750 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73751 - atomic_set(&bat_priv->bcast_seqno, 1);
73752 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73753 atomic_set(&bat_priv->ttvn, 0);
73754 atomic_set(&bat_priv->tt_local_changes, 0);
73755 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73756 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73757 index 302efb5..1590365 100644
73758 --- a/net/batman-adv/types.h
73759 +++ b/net/batman-adv/types.h
73760 @@ -38,8 +38,8 @@ struct hard_iface {
73761 int16_t if_num;
73762 char if_status;
73763 struct net_device *net_dev;
73764 - atomic_t seqno;
73765 - atomic_t frag_seqno;
73766 + atomic_unchecked_t seqno;
73767 + atomic_unchecked_t frag_seqno;
73768 unsigned char *packet_buff;
73769 int packet_len;
73770 struct kobject *hardif_obj;
73771 @@ -155,7 +155,7 @@ struct bat_priv {
73772 atomic_t orig_interval; /* uint */
73773 atomic_t hop_penalty; /* uint */
73774 atomic_t log_level; /* uint */
73775 - atomic_t bcast_seqno;
73776 + atomic_unchecked_t bcast_seqno;
73777 atomic_t bcast_queue_left;
73778 atomic_t batman_queue_left;
73779 atomic_t ttvn; /* translation table version number */
73780 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73781 index 676f6a6..3b4e668 100644
73782 --- a/net/batman-adv/unicast.c
73783 +++ b/net/batman-adv/unicast.c
73784 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73785 frag1->flags = UNI_FRAG_HEAD | large_tail;
73786 frag2->flags = large_tail;
73787
73788 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73789 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73790 frag1->seqno = htons(seqno - 1);
73791 frag2->seqno = htons(seqno);
73792
73793 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73794 index 5238b6b..c9798ce 100644
73795 --- a/net/bluetooth/hci_conn.c
73796 +++ b/net/bluetooth/hci_conn.c
73797 @@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73798 memset(&cp, 0, sizeof(cp));
73799
73800 cp.handle = cpu_to_le16(conn->handle);
73801 - memcpy(cp.ltk, ltk, sizeof(ltk));
73802 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73803
73804 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73805 }
73806 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73807 index 6f9c25b..d19fd66 100644
73808 --- a/net/bluetooth/l2cap_core.c
73809 +++ b/net/bluetooth/l2cap_core.c
73810 @@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73811 break;
73812
73813 case L2CAP_CONF_RFC:
73814 - if (olen == sizeof(rfc))
73815 - memcpy(&rfc, (void *)val, olen);
73816 + if (olen != sizeof(rfc))
73817 + break;
73818 +
73819 + memcpy(&rfc, (void *)val, olen);
73820
73821 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73822 rfc.mode != chan->mode)
73823 @@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73824
73825 switch (type) {
73826 case L2CAP_CONF_RFC:
73827 - if (olen == sizeof(rfc))
73828 - memcpy(&rfc, (void *)val, olen);
73829 + if (olen != sizeof(rfc))
73830 + break;
73831 +
73832 + memcpy(&rfc, (void *)val, olen);
73833 goto done;
73834 }
73835 }
73836 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73837 index 5fe2ff3..10968b5 100644
73838 --- a/net/bridge/netfilter/ebtables.c
73839 +++ b/net/bridge/netfilter/ebtables.c
73840 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73841 tmp.valid_hooks = t->table->valid_hooks;
73842 }
73843 mutex_unlock(&ebt_mutex);
73844 - if (copy_to_user(user, &tmp, *len) != 0){
73845 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73846 BUGPRINT("c2u Didn't work\n");
73847 ret = -EFAULT;
73848 break;
73849 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
73850 index 5cf5222..6f704ad 100644
73851 --- a/net/caif/cfctrl.c
73852 +++ b/net/caif/cfctrl.c
73853 @@ -9,6 +9,7 @@
73854 #include <linux/stddef.h>
73855 #include <linux/spinlock.h>
73856 #include <linux/slab.h>
73857 +#include <linux/sched.h>
73858 #include <net/caif/caif_layer.h>
73859 #include <net/caif/cfpkt.h>
73860 #include <net/caif/cfctrl.h>
73861 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
73862 memset(&dev_info, 0, sizeof(dev_info));
73863 dev_info.id = 0xff;
73864 cfsrvl_init(&this->serv, 0, &dev_info, false);
73865 - atomic_set(&this->req_seq_no, 1);
73866 - atomic_set(&this->rsp_seq_no, 1);
73867 + atomic_set_unchecked(&this->req_seq_no, 1);
73868 + atomic_set_unchecked(&this->rsp_seq_no, 1);
73869 this->serv.layer.receive = cfctrl_recv;
73870 sprintf(this->serv.layer.name, "ctrl");
73871 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
73872 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
73873 struct cfctrl_request_info *req)
73874 {
73875 spin_lock_bh(&ctrl->info_list_lock);
73876 - atomic_inc(&ctrl->req_seq_no);
73877 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
73878 + atomic_inc_unchecked(&ctrl->req_seq_no);
73879 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
73880 list_add_tail(&req->list, &ctrl->list);
73881 spin_unlock_bh(&ctrl->info_list_lock);
73882 }
73883 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
73884 if (p != first)
73885 pr_warn("Requests are not received in order\n");
73886
73887 - atomic_set(&ctrl->rsp_seq_no,
73888 + atomic_set_unchecked(&ctrl->rsp_seq_no,
73889 p->sequence_no);
73890 list_del(&p->list);
73891 goto out;
73892 diff --git a/net/can/gw.c b/net/can/gw.c
73893 index 3d79b12..8de85fa 100644
73894 --- a/net/can/gw.c
73895 +++ b/net/can/gw.c
73896 @@ -96,7 +96,7 @@ struct cf_mod {
73897 struct {
73898 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
73899 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
73900 - } csumfunc;
73901 + } __no_const csumfunc;
73902 };
73903
73904
73905 diff --git a/net/compat.c b/net/compat.c
73906 index e055708..3f80795 100644
73907 --- a/net/compat.c
73908 +++ b/net/compat.c
73909 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
73910 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
73911 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73912 return -EFAULT;
73913 - kmsg->msg_name = compat_ptr(tmp1);
73914 - kmsg->msg_iov = compat_ptr(tmp2);
73915 - kmsg->msg_control = compat_ptr(tmp3);
73916 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
73917 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
73918 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
73919 return 0;
73920 }
73921
73922 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73923
73924 if (kern_msg->msg_namelen) {
73925 if (mode == VERIFY_READ) {
73926 - int err = move_addr_to_kernel(kern_msg->msg_name,
73927 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
73928 kern_msg->msg_namelen,
73929 kern_address);
73930 if (err < 0)
73931 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73932 kern_msg->msg_name = NULL;
73933
73934 tot_len = iov_from_user_compat_to_kern(kern_iov,
73935 - (struct compat_iovec __user *)kern_msg->msg_iov,
73936 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
73937 kern_msg->msg_iovlen);
73938 if (tot_len >= 0)
73939 kern_msg->msg_iov = kern_iov;
73940 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
73941
73942 #define CMSG_COMPAT_FIRSTHDR(msg) \
73943 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
73944 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
73945 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
73946 (struct compat_cmsghdr __user *)NULL)
73947
73948 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
73949 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
73950 (ucmlen) <= (unsigned long) \
73951 ((mhdr)->msg_controllen - \
73952 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
73953 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
73954
73955 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
73956 struct compat_cmsghdr __user *cmsg, int cmsg_len)
73957 {
73958 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
73959 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
73960 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
73961 msg->msg_controllen)
73962 return NULL;
73963 return (struct compat_cmsghdr __user *)ptr;
73964 @@ -219,7 +219,7 @@ Efault:
73965
73966 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
73967 {
73968 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73969 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73970 struct compat_cmsghdr cmhdr;
73971 int cmlen;
73972
73973 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
73974
73975 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
73976 {
73977 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
73978 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
73979 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
73980 int fdnum = scm->fp->count;
73981 struct file **fp = scm->fp->fp;
73982 @@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
73983 return -EFAULT;
73984 old_fs = get_fs();
73985 set_fs(KERNEL_DS);
73986 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
73987 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
73988 set_fs(old_fs);
73989
73990 return err;
73991 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
73992 len = sizeof(ktime);
73993 old_fs = get_fs();
73994 set_fs(KERNEL_DS);
73995 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
73996 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
73997 set_fs(old_fs);
73998
73999 if (!err) {
74000 @@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74001 case MCAST_JOIN_GROUP:
74002 case MCAST_LEAVE_GROUP:
74003 {
74004 - struct compat_group_req __user *gr32 = (void *)optval;
74005 + struct compat_group_req __user *gr32 = (void __user *)optval;
74006 struct group_req __user *kgr =
74007 compat_alloc_user_space(sizeof(struct group_req));
74008 u32 interface;
74009 @@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74010 case MCAST_BLOCK_SOURCE:
74011 case MCAST_UNBLOCK_SOURCE:
74012 {
74013 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74014 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74015 struct group_source_req __user *kgsr = compat_alloc_user_space(
74016 sizeof(struct group_source_req));
74017 u32 interface;
74018 @@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74019 }
74020 case MCAST_MSFILTER:
74021 {
74022 - struct compat_group_filter __user *gf32 = (void *)optval;
74023 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74024 struct group_filter __user *kgf;
74025 u32 interface, fmode, numsrc;
74026
74027 @@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74028 char __user *optval, int __user *optlen,
74029 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74030 {
74031 - struct compat_group_filter __user *gf32 = (void *)optval;
74032 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74033 struct group_filter __user *kgf;
74034 int __user *koptlen;
74035 u32 interface, fmode, numsrc;
74036 diff --git a/net/core/datagram.c b/net/core/datagram.c
74037 index e4fbfd6..6a6ac94 100644
74038 --- a/net/core/datagram.c
74039 +++ b/net/core/datagram.c
74040 @@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74041 }
74042
74043 kfree_skb(skb);
74044 - atomic_inc(&sk->sk_drops);
74045 + atomic_inc_unchecked(&sk->sk_drops);
74046 sk_mem_reclaim_partial(sk);
74047
74048 return err;
74049 diff --git a/net/core/dev.c b/net/core/dev.c
74050 index 99e1d75..adf968a 100644
74051 --- a/net/core/dev.c
74052 +++ b/net/core/dev.c
74053 @@ -1136,9 +1136,13 @@ void dev_load(struct net *net, const char *name)
74054 if (no_module && capable(CAP_NET_ADMIN))
74055 no_module = request_module("netdev-%s", name);
74056 if (no_module && capable(CAP_SYS_MODULE)) {
74057 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74058 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74059 +#else
74060 if (!request_module("%s", name))
74061 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
74062 name);
74063 +#endif
74064 }
74065 }
74066 EXPORT_SYMBOL(dev_load);
74067 @@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74068 {
74069 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74070 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74071 - atomic_long_inc(&dev->rx_dropped);
74072 + atomic_long_inc_unchecked(&dev->rx_dropped);
74073 kfree_skb(skb);
74074 return NET_RX_DROP;
74075 }
74076 @@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74077 nf_reset(skb);
74078
74079 if (unlikely(!is_skb_forwardable(dev, skb))) {
74080 - atomic_long_inc(&dev->rx_dropped);
74081 + atomic_long_inc_unchecked(&dev->rx_dropped);
74082 kfree_skb(skb);
74083 return NET_RX_DROP;
74084 }
74085 @@ -2042,7 +2046,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74086
74087 struct dev_gso_cb {
74088 void (*destructor)(struct sk_buff *skb);
74089 -};
74090 +} __no_const;
74091
74092 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74093
74094 @@ -2898,7 +2902,7 @@ enqueue:
74095
74096 local_irq_restore(flags);
74097
74098 - atomic_long_inc(&skb->dev->rx_dropped);
74099 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74100 kfree_skb(skb);
74101 return NET_RX_DROP;
74102 }
74103 @@ -2970,7 +2974,7 @@ int netif_rx_ni(struct sk_buff *skb)
74104 }
74105 EXPORT_SYMBOL(netif_rx_ni);
74106
74107 -static void net_tx_action(struct softirq_action *h)
74108 +static void net_tx_action(void)
74109 {
74110 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74111
74112 @@ -3258,7 +3262,7 @@ ncls:
74113 if (pt_prev) {
74114 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74115 } else {
74116 - atomic_long_inc(&skb->dev->rx_dropped);
74117 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74118 kfree_skb(skb);
74119 /* Jamal, now you will not able to escape explaining
74120 * me how you were going to use this. :-)
74121 @@ -3818,7 +3822,7 @@ void netif_napi_del(struct napi_struct *napi)
74122 }
74123 EXPORT_SYMBOL(netif_napi_del);
74124
74125 -static void net_rx_action(struct softirq_action *h)
74126 +static void net_rx_action(void)
74127 {
74128 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74129 unsigned long time_limit = jiffies + 2;
74130 @@ -4288,8 +4292,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
74131 else
74132 seq_printf(seq, "%04x", ntohs(pt->type));
74133
74134 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74135 + seq_printf(seq, " %-8s %p\n",
74136 + pt->dev ? pt->dev->name : "", NULL);
74137 +#else
74138 seq_printf(seq, " %-8s %pF\n",
74139 pt->dev ? pt->dev->name : "", pt->func);
74140 +#endif
74141 }
74142
74143 return 0;
74144 @@ -5839,7 +5848,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74145 } else {
74146 netdev_stats_to_stats64(storage, &dev->stats);
74147 }
74148 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74149 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74150 return storage;
74151 }
74152 EXPORT_SYMBOL(dev_get_stats);
74153 diff --git a/net/core/flow.c b/net/core/flow.c
74154 index e318c7e..168b1d0 100644
74155 --- a/net/core/flow.c
74156 +++ b/net/core/flow.c
74157 @@ -61,7 +61,7 @@ struct flow_cache {
74158 struct timer_list rnd_timer;
74159 };
74160
74161 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
74162 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74163 EXPORT_SYMBOL(flow_cache_genid);
74164 static struct flow_cache flow_cache_global;
74165 static struct kmem_cache *flow_cachep __read_mostly;
74166 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74167
74168 static int flow_entry_valid(struct flow_cache_entry *fle)
74169 {
74170 - if (atomic_read(&flow_cache_genid) != fle->genid)
74171 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74172 return 0;
74173 if (fle->object && !fle->object->ops->check(fle->object))
74174 return 0;
74175 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74176 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74177 fcp->hash_count++;
74178 }
74179 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74180 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74181 flo = fle->object;
74182 if (!flo)
74183 goto ret_object;
74184 @@ -280,7 +280,7 @@ nocache:
74185 }
74186 flo = resolver(net, key, family, dir, flo, ctx);
74187 if (fle) {
74188 - fle->genid = atomic_read(&flow_cache_genid);
74189 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
74190 if (!IS_ERR(flo))
74191 fle->object = flo;
74192 else
74193 diff --git a/net/core/iovec.c b/net/core/iovec.c
74194 index 7e7aeb0..2a998cb 100644
74195 --- a/net/core/iovec.c
74196 +++ b/net/core/iovec.c
74197 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74198 if (m->msg_namelen) {
74199 if (mode == VERIFY_READ) {
74200 void __user *namep;
74201 - namep = (void __user __force *) m->msg_name;
74202 + namep = (void __force_user *) m->msg_name;
74203 err = move_addr_to_kernel(namep, m->msg_namelen,
74204 address);
74205 if (err < 0)
74206 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74207 }
74208
74209 size = m->msg_iovlen * sizeof(struct iovec);
74210 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74211 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74212 return -EFAULT;
74213
74214 m->msg_iov = iov;
74215 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74216 index 90430b7..0032ec0 100644
74217 --- a/net/core/rtnetlink.c
74218 +++ b/net/core/rtnetlink.c
74219 @@ -56,7 +56,7 @@ struct rtnl_link {
74220 rtnl_doit_func doit;
74221 rtnl_dumpit_func dumpit;
74222 rtnl_calcit_func calcit;
74223 -};
74224 +} __no_const;
74225
74226 static DEFINE_MUTEX(rtnl_mutex);
74227
74228 diff --git a/net/core/scm.c b/net/core/scm.c
74229 index 611c5ef..88f6d6d 100644
74230 --- a/net/core/scm.c
74231 +++ b/net/core/scm.c
74232 @@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
74233 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74234 {
74235 struct cmsghdr __user *cm
74236 - = (__force struct cmsghdr __user *)msg->msg_control;
74237 + = (struct cmsghdr __force_user *)msg->msg_control;
74238 struct cmsghdr cmhdr;
74239 int cmlen = CMSG_LEN(len);
74240 int err;
74241 @@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74242 err = -EFAULT;
74243 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74244 goto out;
74245 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74246 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74247 goto out;
74248 cmlen = CMSG_SPACE(len);
74249 if (msg->msg_controllen < cmlen)
74250 @@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
74251 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74252 {
74253 struct cmsghdr __user *cm
74254 - = (__force struct cmsghdr __user*)msg->msg_control;
74255 + = (struct cmsghdr __force_user *)msg->msg_control;
74256
74257 int fdmax = 0;
74258 int fdnum = scm->fp->count;
74259 @@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74260 if (fdnum < fdmax)
74261 fdmax = fdnum;
74262
74263 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74264 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74265 i++, cmfptr++)
74266 {
74267 int new_fd;
74268 diff --git a/net/core/sock.c b/net/core/sock.c
74269 index b2e14c0..6651b32 100644
74270 --- a/net/core/sock.c
74271 +++ b/net/core/sock.c
74272 @@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74273 struct sk_buff_head *list = &sk->sk_receive_queue;
74274
74275 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
74276 - atomic_inc(&sk->sk_drops);
74277 + atomic_inc_unchecked(&sk->sk_drops);
74278 trace_sock_rcvqueue_full(sk, skb);
74279 return -ENOMEM;
74280 }
74281 @@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74282 return err;
74283
74284 if (!sk_rmem_schedule(sk, skb->truesize)) {
74285 - atomic_inc(&sk->sk_drops);
74286 + atomic_inc_unchecked(&sk->sk_drops);
74287 return -ENOBUFS;
74288 }
74289
74290 @@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74291 skb_dst_force(skb);
74292
74293 spin_lock_irqsave(&list->lock, flags);
74294 - skb->dropcount = atomic_read(&sk->sk_drops);
74295 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74296 __skb_queue_tail(list, skb);
74297 spin_unlock_irqrestore(&list->lock, flags);
74298
74299 @@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74300 skb->dev = NULL;
74301
74302 if (sk_rcvqueues_full(sk, skb)) {
74303 - atomic_inc(&sk->sk_drops);
74304 + atomic_inc_unchecked(&sk->sk_drops);
74305 goto discard_and_relse;
74306 }
74307 if (nested)
74308 @@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74309 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74310 } else if (sk_add_backlog(sk, skb)) {
74311 bh_unlock_sock(sk);
74312 - atomic_inc(&sk->sk_drops);
74313 + atomic_inc_unchecked(&sk->sk_drops);
74314 goto discard_and_relse;
74315 }
74316
74317 @@ -984,7 +984,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74318 if (len > sizeof(peercred))
74319 len = sizeof(peercred);
74320 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74321 - if (copy_to_user(optval, &peercred, len))
74322 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74323 return -EFAULT;
74324 goto lenout;
74325 }
74326 @@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74327 return -ENOTCONN;
74328 if (lv < len)
74329 return -EINVAL;
74330 - if (copy_to_user(optval, address, len))
74331 + if (len > sizeof(address) || copy_to_user(optval, address, len))
74332 return -EFAULT;
74333 goto lenout;
74334 }
74335 @@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74336
74337 if (len > lv)
74338 len = lv;
74339 - if (copy_to_user(optval, &v, len))
74340 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
74341 return -EFAULT;
74342 lenout:
74343 if (put_user(len, optlen))
74344 @@ -2128,7 +2128,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74345 */
74346 smp_wmb();
74347 atomic_set(&sk->sk_refcnt, 1);
74348 - atomic_set(&sk->sk_drops, 0);
74349 + atomic_set_unchecked(&sk->sk_drops, 0);
74350 }
74351 EXPORT_SYMBOL(sock_init_data);
74352
74353 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
74354 index b9868e1..849f809 100644
74355 --- a/net/core/sock_diag.c
74356 +++ b/net/core/sock_diag.c
74357 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
74358
74359 int sock_diag_check_cookie(void *sk, __u32 *cookie)
74360 {
74361 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74362 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
74363 cookie[1] != INET_DIAG_NOCOOKIE) &&
74364 ((u32)(unsigned long)sk != cookie[0] ||
74365 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
74366 return -ESTALE;
74367 else
74368 +#endif
74369 return 0;
74370 }
74371 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
74372
74373 void sock_diag_save_cookie(void *sk, __u32 *cookie)
74374 {
74375 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74376 + cookie[0] = 0;
74377 + cookie[1] = 0;
74378 +#else
74379 cookie[0] = (u32)(unsigned long)sk;
74380 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74381 +#endif
74382 }
74383 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
74384
74385 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74386 index 02e75d1..9a57a7c 100644
74387 --- a/net/decnet/sysctl_net_decnet.c
74388 +++ b/net/decnet/sysctl_net_decnet.c
74389 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74390
74391 if (len > *lenp) len = *lenp;
74392
74393 - if (copy_to_user(buffer, addr, len))
74394 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
74395 return -EFAULT;
74396
74397 *lenp = len;
74398 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74399
74400 if (len > *lenp) len = *lenp;
74401
74402 - if (copy_to_user(buffer, devname, len))
74403 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
74404 return -EFAULT;
74405
74406 *lenp = len;
74407 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74408 index 39a2d29..f39c0fe 100644
74409 --- a/net/econet/Kconfig
74410 +++ b/net/econet/Kconfig
74411 @@ -4,7 +4,7 @@
74412
74413 config ECONET
74414 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74415 - depends on EXPERIMENTAL && INET
74416 + depends on EXPERIMENTAL && INET && BROKEN
74417 ---help---
74418 Econet is a fairly old and slow networking protocol mainly used by
74419 Acorn computers to access file and print servers. It uses native
74420 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74421 index cbe3a68..a879b75 100644
74422 --- a/net/ipv4/fib_frontend.c
74423 +++ b/net/ipv4/fib_frontend.c
74424 @@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74425 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74426 fib_sync_up(dev);
74427 #endif
74428 - atomic_inc(&net->ipv4.dev_addr_genid);
74429 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74430 rt_cache_flush(dev_net(dev), -1);
74431 break;
74432 case NETDEV_DOWN:
74433 fib_del_ifaddr(ifa, NULL);
74434 - atomic_inc(&net->ipv4.dev_addr_genid);
74435 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74436 if (ifa->ifa_dev->ifa_list == NULL) {
74437 /* Last address was deleted from this interface.
74438 * Disable IP.
74439 @@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74440 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74441 fib_sync_up(dev);
74442 #endif
74443 - atomic_inc(&net->ipv4.dev_addr_genid);
74444 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74445 rt_cache_flush(dev_net(dev), -1);
74446 break;
74447 case NETDEV_DOWN:
74448 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74449 index 5063fa3..9dd4a69 100644
74450 --- a/net/ipv4/fib_semantics.c
74451 +++ b/net/ipv4/fib_semantics.c
74452 @@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74453 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74454 nh->nh_gw,
74455 nh->nh_parent->fib_scope);
74456 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74457 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74458
74459 return nh->nh_saddr;
74460 }
74461 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74462 index 984ec65..97ac518 100644
74463 --- a/net/ipv4/inet_hashtables.c
74464 +++ b/net/ipv4/inet_hashtables.c
74465 @@ -18,12 +18,15 @@
74466 #include <linux/sched.h>
74467 #include <linux/slab.h>
74468 #include <linux/wait.h>
74469 +#include <linux/security.h>
74470
74471 #include <net/inet_connection_sock.h>
74472 #include <net/inet_hashtables.h>
74473 #include <net/secure_seq.h>
74474 #include <net/ip.h>
74475
74476 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74477 +
74478 /*
74479 * Allocate and initialize a new local port bind bucket.
74480 * The bindhash mutex for snum's hash chain must be held here.
74481 @@ -530,6 +533,8 @@ ok:
74482 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74483 spin_unlock(&head->lock);
74484
74485 + gr_update_task_in_ip_table(current, inet_sk(sk));
74486 +
74487 if (tw) {
74488 inet_twsk_deschedule(tw, death_row);
74489 while (twrefcnt) {
74490 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74491 index d4d61b6..b81aec8 100644
74492 --- a/net/ipv4/inetpeer.c
74493 +++ b/net/ipv4/inetpeer.c
74494 @@ -487,8 +487,8 @@ relookup:
74495 if (p) {
74496 p->daddr = *daddr;
74497 atomic_set(&p->refcnt, 1);
74498 - atomic_set(&p->rid, 0);
74499 - atomic_set(&p->ip_id_count,
74500 + atomic_set_unchecked(&p->rid, 0);
74501 + atomic_set_unchecked(&p->ip_id_count,
74502 (daddr->family == AF_INET) ?
74503 secure_ip_id(daddr->addr.a4) :
74504 secure_ipv6_id(daddr->addr.a6));
74505 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74506 index 3727e23..517f5df 100644
74507 --- a/net/ipv4/ip_fragment.c
74508 +++ b/net/ipv4/ip_fragment.c
74509 @@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74510 return 0;
74511
74512 start = qp->rid;
74513 - end = atomic_inc_return(&peer->rid);
74514 + end = atomic_inc_return_unchecked(&peer->rid);
74515 qp->rid = end;
74516
74517 rc = qp->q.fragments && (end - start) > max;
74518 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74519 index 2fd0fba..83fac99 100644
74520 --- a/net/ipv4/ip_sockglue.c
74521 +++ b/net/ipv4/ip_sockglue.c
74522 @@ -1137,7 +1137,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74523 len = min_t(unsigned int, len, opt->optlen);
74524 if (put_user(len, optlen))
74525 return -EFAULT;
74526 - if (copy_to_user(optval, opt->__data, len))
74527 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74528 + copy_to_user(optval, opt->__data, len))
74529 return -EFAULT;
74530 return 0;
74531 }
74532 @@ -1268,7 +1269,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74533 if (sk->sk_type != SOCK_STREAM)
74534 return -ENOPROTOOPT;
74535
74536 - msg.msg_control = optval;
74537 + msg.msg_control = (void __force_kernel *)optval;
74538 msg.msg_controllen = len;
74539 msg.msg_flags = flags;
74540
74541 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74542 index 92ac7e7..13f93d9 100644
74543 --- a/net/ipv4/ipconfig.c
74544 +++ b/net/ipv4/ipconfig.c
74545 @@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74546
74547 mm_segment_t oldfs = get_fs();
74548 set_fs(get_ds());
74549 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74550 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74551 set_fs(oldfs);
74552 return res;
74553 }
74554 @@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74555
74556 mm_segment_t oldfs = get_fs();
74557 set_fs(get_ds());
74558 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74559 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74560 set_fs(oldfs);
74561 return res;
74562 }
74563 @@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74564
74565 mm_segment_t oldfs = get_fs();
74566 set_fs(get_ds());
74567 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74568 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74569 set_fs(oldfs);
74570 return res;
74571 }
74572 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74573 index 50009c7..5996a9f 100644
74574 --- a/net/ipv4/ping.c
74575 +++ b/net/ipv4/ping.c
74576 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74577 sk_rmem_alloc_get(sp),
74578 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74579 atomic_read(&sp->sk_refcnt), sp,
74580 - atomic_read(&sp->sk_drops), len);
74581 + atomic_read_unchecked(&sp->sk_drops), len);
74582 }
74583
74584 static int ping_seq_show(struct seq_file *seq, void *v)
74585 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74586 index bbd604c..4d5469c 100644
74587 --- a/net/ipv4/raw.c
74588 +++ b/net/ipv4/raw.c
74589 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74590 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74591 {
74592 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74593 - atomic_inc(&sk->sk_drops);
74594 + atomic_inc_unchecked(&sk->sk_drops);
74595 kfree_skb(skb);
74596 return NET_RX_DROP;
74597 }
74598 @@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
74599
74600 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74601 {
74602 + struct icmp_filter filter;
74603 +
74604 if (optlen > sizeof(struct icmp_filter))
74605 optlen = sizeof(struct icmp_filter);
74606 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74607 + if (copy_from_user(&filter, optval, optlen))
74608 return -EFAULT;
74609 + raw_sk(sk)->filter = filter;
74610 return 0;
74611 }
74612
74613 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74614 {
74615 int len, ret = -EFAULT;
74616 + struct icmp_filter filter;
74617
74618 if (get_user(len, optlen))
74619 goto out;
74620 @@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74621 if (len > sizeof(struct icmp_filter))
74622 len = sizeof(struct icmp_filter);
74623 ret = -EFAULT;
74624 - if (put_user(len, optlen) ||
74625 - copy_to_user(optval, &raw_sk(sk)->filter, len))
74626 + filter = raw_sk(sk)->filter;
74627 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74628 goto out;
74629 ret = 0;
74630 out: return ret;
74631 @@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74632 sk_wmem_alloc_get(sp),
74633 sk_rmem_alloc_get(sp),
74634 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74635 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74636 + atomic_read(&sp->sk_refcnt),
74637 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74638 + NULL,
74639 +#else
74640 + sp,
74641 +#endif
74642 + atomic_read_unchecked(&sp->sk_drops));
74643 }
74644
74645 static int raw_seq_show(struct seq_file *seq, void *v)
74646 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74647 index 167ea10..4b15883 100644
74648 --- a/net/ipv4/route.c
74649 +++ b/net/ipv4/route.c
74650 @@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74651
74652 static inline int rt_genid(struct net *net)
74653 {
74654 - return atomic_read(&net->ipv4.rt_genid);
74655 + return atomic_read_unchecked(&net->ipv4.rt_genid);
74656 }
74657
74658 #ifdef CONFIG_PROC_FS
74659 @@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
74660 unsigned char shuffle;
74661
74662 get_random_bytes(&shuffle, sizeof(shuffle));
74663 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74664 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74665 inetpeer_invalidate_tree(AF_INET);
74666 }
74667
74668 @@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
74669 error = rt->dst.error;
74670 if (peer) {
74671 inet_peer_refcheck(rt->peer);
74672 - id = atomic_read(&peer->ip_id_count) & 0xffff;
74673 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74674 if (peer->tcp_ts_stamp) {
74675 ts = peer->tcp_ts;
74676 tsage = get_seconds() - peer->tcp_ts_stamp;
74677 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74678 index 0cb86ce..8e7fda8 100644
74679 --- a/net/ipv4/tcp_ipv4.c
74680 +++ b/net/ipv4/tcp_ipv4.c
74681 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
74682 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74683
74684
74685 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74686 +extern int grsec_enable_blackhole;
74687 +#endif
74688 +
74689 #ifdef CONFIG_TCP_MD5SIG
74690 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
74691 __be32 daddr, __be32 saddr, const struct tcphdr *th);
74692 @@ -1641,6 +1645,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74693 return 0;
74694
74695 reset:
74696 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74697 + if (!grsec_enable_blackhole)
74698 +#endif
74699 tcp_v4_send_reset(rsk, skb);
74700 discard:
74701 kfree_skb(skb);
74702 @@ -1703,12 +1710,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74703 TCP_SKB_CB(skb)->sacked = 0;
74704
74705 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74706 - if (!sk)
74707 + if (!sk) {
74708 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74709 + ret = 1;
74710 +#endif
74711 goto no_tcp_socket;
74712 -
74713 + }
74714 process:
74715 - if (sk->sk_state == TCP_TIME_WAIT)
74716 + if (sk->sk_state == TCP_TIME_WAIT) {
74717 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74718 + ret = 2;
74719 +#endif
74720 goto do_time_wait;
74721 + }
74722
74723 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
74724 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74725 @@ -1758,6 +1772,10 @@ no_tcp_socket:
74726 bad_packet:
74727 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74728 } else {
74729 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74730 + if (!grsec_enable_blackhole || (ret == 1 &&
74731 + (skb->dev->flags & IFF_LOOPBACK)))
74732 +#endif
74733 tcp_v4_send_reset(NULL, skb);
74734 }
74735
74736 @@ -2419,7 +2437,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
74737 0, /* non standard timer */
74738 0, /* open_requests have no inode */
74739 atomic_read(&sk->sk_refcnt),
74740 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74741 + NULL,
74742 +#else
74743 req,
74744 +#endif
74745 len);
74746 }
74747
74748 @@ -2469,7 +2491,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
74749 sock_i_uid(sk),
74750 icsk->icsk_probes_out,
74751 sock_i_ino(sk),
74752 - atomic_read(&sk->sk_refcnt), sk,
74753 + atomic_read(&sk->sk_refcnt),
74754 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74755 + NULL,
74756 +#else
74757 + sk,
74758 +#endif
74759 jiffies_to_clock_t(icsk->icsk_rto),
74760 jiffies_to_clock_t(icsk->icsk_ack.ato),
74761 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
74762 @@ -2497,7 +2524,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
74763 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
74764 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
74765 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74766 - atomic_read(&tw->tw_refcnt), tw, len);
74767 + atomic_read(&tw->tw_refcnt),
74768 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74769 + NULL,
74770 +#else
74771 + tw,
74772 +#endif
74773 + len);
74774 }
74775
74776 #define TMPSZ 150
74777 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
74778 index 3cabafb..640525b 100644
74779 --- a/net/ipv4/tcp_minisocks.c
74780 +++ b/net/ipv4/tcp_minisocks.c
74781 @@ -27,6 +27,10 @@
74782 #include <net/inet_common.h>
74783 #include <net/xfrm.h>
74784
74785 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74786 +extern int grsec_enable_blackhole;
74787 +#endif
74788 +
74789 int sysctl_tcp_syncookies __read_mostly = 1;
74790 EXPORT_SYMBOL(sysctl_tcp_syncookies);
74791
74792 @@ -753,6 +757,10 @@ listen_overflow:
74793
74794 embryonic_reset:
74795 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
74796 +
74797 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74798 + if (!grsec_enable_blackhole)
74799 +#endif
74800 if (!(flg & TCP_FLAG_RST))
74801 req->rsk_ops->send_reset(sk, skb);
74802
74803 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
74804 index a981cdc..48f4c3a 100644
74805 --- a/net/ipv4/tcp_probe.c
74806 +++ b/net/ipv4/tcp_probe.c
74807 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
74808 if (cnt + width >= len)
74809 break;
74810
74811 - if (copy_to_user(buf + cnt, tbuf, width))
74812 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
74813 return -EFAULT;
74814 cnt += width;
74815 }
74816 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
74817 index 34d4a02..3b57f86 100644
74818 --- a/net/ipv4/tcp_timer.c
74819 +++ b/net/ipv4/tcp_timer.c
74820 @@ -22,6 +22,10 @@
74821 #include <linux/gfp.h>
74822 #include <net/tcp.h>
74823
74824 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74825 +extern int grsec_lastack_retries;
74826 +#endif
74827 +
74828 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
74829 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
74830 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
74831 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
74832 }
74833 }
74834
74835 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74836 + if ((sk->sk_state == TCP_LAST_ACK) &&
74837 + (grsec_lastack_retries > 0) &&
74838 + (grsec_lastack_retries < retry_until))
74839 + retry_until = grsec_lastack_retries;
74840 +#endif
74841 +
74842 if (retransmits_timed_out(sk, retry_until,
74843 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
74844 /* Has it gone just too far? */
74845 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
74846 index fe14105..0618260 100644
74847 --- a/net/ipv4/udp.c
74848 +++ b/net/ipv4/udp.c
74849 @@ -87,6 +87,7 @@
74850 #include <linux/types.h>
74851 #include <linux/fcntl.h>
74852 #include <linux/module.h>
74853 +#include <linux/security.h>
74854 #include <linux/socket.h>
74855 #include <linux/sockios.h>
74856 #include <linux/igmp.h>
74857 @@ -109,6 +110,10 @@
74858 #include <trace/events/udp.h>
74859 #include "udp_impl.h"
74860
74861 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74862 +extern int grsec_enable_blackhole;
74863 +#endif
74864 +
74865 struct udp_table udp_table __read_mostly;
74866 EXPORT_SYMBOL(udp_table);
74867
74868 @@ -567,6 +572,9 @@ found:
74869 return s;
74870 }
74871
74872 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
74873 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
74874 +
74875 /*
74876 * This routine is called by the ICMP module when it gets some
74877 * sort of error condition. If err < 0 then the socket should
74878 @@ -858,9 +866,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
74879 dport = usin->sin_port;
74880 if (dport == 0)
74881 return -EINVAL;
74882 +
74883 + err = gr_search_udp_sendmsg(sk, usin);
74884 + if (err)
74885 + return err;
74886 } else {
74887 if (sk->sk_state != TCP_ESTABLISHED)
74888 return -EDESTADDRREQ;
74889 +
74890 + err = gr_search_udp_sendmsg(sk, NULL);
74891 + if (err)
74892 + return err;
74893 +
74894 daddr = inet->inet_daddr;
74895 dport = inet->inet_dport;
74896 /* Open fast path for connected socket.
74897 @@ -1102,7 +1119,7 @@ static unsigned int first_packet_length(struct sock *sk)
74898 udp_lib_checksum_complete(skb)) {
74899 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74900 IS_UDPLITE(sk));
74901 - atomic_inc(&sk->sk_drops);
74902 + atomic_inc_unchecked(&sk->sk_drops);
74903 __skb_unlink(skb, rcvq);
74904 __skb_queue_tail(&list_kill, skb);
74905 }
74906 @@ -1188,6 +1205,10 @@ try_again:
74907 if (!skb)
74908 goto out;
74909
74910 + err = gr_search_udp_recvmsg(sk, skb);
74911 + if (err)
74912 + goto out_free;
74913 +
74914 ulen = skb->len - sizeof(struct udphdr);
74915 copied = len;
74916 if (copied > ulen)
74917 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74918
74919 drop:
74920 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
74921 - atomic_inc(&sk->sk_drops);
74922 + atomic_inc_unchecked(&sk->sk_drops);
74923 kfree_skb(skb);
74924 return -1;
74925 }
74926 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
74927 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
74928
74929 if (!skb1) {
74930 - atomic_inc(&sk->sk_drops);
74931 + atomic_inc_unchecked(&sk->sk_drops);
74932 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
74933 IS_UDPLITE(sk));
74934 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
74935 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
74936 goto csum_error;
74937
74938 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
74939 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74940 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
74941 +#endif
74942 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
74943
74944 /*
74945 @@ -2094,8 +2118,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
74946 sk_wmem_alloc_get(sp),
74947 sk_rmem_alloc_get(sp),
74948 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74949 - atomic_read(&sp->sk_refcnt), sp,
74950 - atomic_read(&sp->sk_drops), len);
74951 + atomic_read(&sp->sk_refcnt),
74952 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74953 + NULL,
74954 +#else
74955 + sp,
74956 +#endif
74957 + atomic_read_unchecked(&sp->sk_drops), len);
74958 }
74959
74960 int udp4_seq_show(struct seq_file *seq, void *v)
74961 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
74962 index 7d5cb97..c56564f 100644
74963 --- a/net/ipv6/addrconf.c
74964 +++ b/net/ipv6/addrconf.c
74965 @@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
74966 p.iph.ihl = 5;
74967 p.iph.protocol = IPPROTO_IPV6;
74968 p.iph.ttl = 64;
74969 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
74970 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
74971
74972 if (ops->ndo_do_ioctl) {
74973 mm_segment_t oldfs = get_fs();
74974 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
74975 index 02dd203..e03fcc9 100644
74976 --- a/net/ipv6/inet6_connection_sock.c
74977 +++ b/net/ipv6/inet6_connection_sock.c
74978 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
74979 #ifdef CONFIG_XFRM
74980 {
74981 struct rt6_info *rt = (struct rt6_info *)dst;
74982 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
74983 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
74984 }
74985 #endif
74986 }
74987 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
74988 #ifdef CONFIG_XFRM
74989 if (dst) {
74990 struct rt6_info *rt = (struct rt6_info *)dst;
74991 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
74992 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
74993 __sk_dst_reset(sk);
74994 dst = NULL;
74995 }
74996 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
74997 index 63dd1f8..e7f53ca 100644
74998 --- a/net/ipv6/ipv6_sockglue.c
74999 +++ b/net/ipv6/ipv6_sockglue.c
75000 @@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75001 if (sk->sk_type != SOCK_STREAM)
75002 return -ENOPROTOOPT;
75003
75004 - msg.msg_control = optval;
75005 + msg.msg_control = (void __force_kernel *)optval;
75006 msg.msg_controllen = len;
75007 msg.msg_flags = flags;
75008
75009 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75010 index 5bddea7..82d9d67 100644
75011 --- a/net/ipv6/raw.c
75012 +++ b/net/ipv6/raw.c
75013 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75014 {
75015 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75016 skb_checksum_complete(skb)) {
75017 - atomic_inc(&sk->sk_drops);
75018 + atomic_inc_unchecked(&sk->sk_drops);
75019 kfree_skb(skb);
75020 return NET_RX_DROP;
75021 }
75022 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75023 struct raw6_sock *rp = raw6_sk(sk);
75024
75025 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75026 - atomic_inc(&sk->sk_drops);
75027 + atomic_inc_unchecked(&sk->sk_drops);
75028 kfree_skb(skb);
75029 return NET_RX_DROP;
75030 }
75031 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75032
75033 if (inet->hdrincl) {
75034 if (skb_checksum_complete(skb)) {
75035 - atomic_inc(&sk->sk_drops);
75036 + atomic_inc_unchecked(&sk->sk_drops);
75037 kfree_skb(skb);
75038 return NET_RX_DROP;
75039 }
75040 @@ -602,7 +602,7 @@ out:
75041 return err;
75042 }
75043
75044 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75045 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75046 struct flowi6 *fl6, struct dst_entry **dstp,
75047 unsigned int flags)
75048 {
75049 @@ -914,12 +914,15 @@ do_confirm:
75050 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75051 char __user *optval, int optlen)
75052 {
75053 + struct icmp6_filter filter;
75054 +
75055 switch (optname) {
75056 case ICMPV6_FILTER:
75057 if (optlen > sizeof(struct icmp6_filter))
75058 optlen = sizeof(struct icmp6_filter);
75059 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75060 + if (copy_from_user(&filter, optval, optlen))
75061 return -EFAULT;
75062 + raw6_sk(sk)->filter = filter;
75063 return 0;
75064 default:
75065 return -ENOPROTOOPT;
75066 @@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75067 char __user *optval, int __user *optlen)
75068 {
75069 int len;
75070 + struct icmp6_filter filter;
75071
75072 switch (optname) {
75073 case ICMPV6_FILTER:
75074 @@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75075 len = sizeof(struct icmp6_filter);
75076 if (put_user(len, optlen))
75077 return -EFAULT;
75078 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75079 + filter = raw6_sk(sk)->filter;
75080 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
75081 return -EFAULT;
75082 return 0;
75083 default:
75084 @@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75085 0, 0L, 0,
75086 sock_i_uid(sp), 0,
75087 sock_i_ino(sp),
75088 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75089 + atomic_read(&sp->sk_refcnt),
75090 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75091 + NULL,
75092 +#else
75093 + sp,
75094 +#endif
75095 + atomic_read_unchecked(&sp->sk_drops));
75096 }
75097
75098 static int raw6_seq_show(struct seq_file *seq, void *v)
75099 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75100 index 98256cf..7f16dbd 100644
75101 --- a/net/ipv6/tcp_ipv6.c
75102 +++ b/net/ipv6/tcp_ipv6.c
75103 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75104 }
75105 #endif
75106
75107 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75108 +extern int grsec_enable_blackhole;
75109 +#endif
75110 +
75111 static void tcp_v6_hash(struct sock *sk)
75112 {
75113 if (sk->sk_state != TCP_CLOSE) {
75114 @@ -1542,6 +1546,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75115 return 0;
75116
75117 reset:
75118 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75119 + if (!grsec_enable_blackhole)
75120 +#endif
75121 tcp_v6_send_reset(sk, skb);
75122 discard:
75123 if (opt_skb)
75124 @@ -1623,12 +1630,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75125 TCP_SKB_CB(skb)->sacked = 0;
75126
75127 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75128 - if (!sk)
75129 + if (!sk) {
75130 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75131 + ret = 1;
75132 +#endif
75133 goto no_tcp_socket;
75134 + }
75135
75136 process:
75137 - if (sk->sk_state == TCP_TIME_WAIT)
75138 + if (sk->sk_state == TCP_TIME_WAIT) {
75139 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75140 + ret = 2;
75141 +#endif
75142 goto do_time_wait;
75143 + }
75144
75145 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75146 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75147 @@ -1676,6 +1691,10 @@ no_tcp_socket:
75148 bad_packet:
75149 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75150 } else {
75151 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75152 + if (!grsec_enable_blackhole || (ret == 1 &&
75153 + (skb->dev->flags & IFF_LOOPBACK)))
75154 +#endif
75155 tcp_v6_send_reset(NULL, skb);
75156 }
75157
75158 @@ -1930,7 +1949,13 @@ static void get_openreq6(struct seq_file *seq,
75159 uid,
75160 0, /* non standard timer */
75161 0, /* open_requests have no inode */
75162 - 0, req);
75163 + 0,
75164 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75165 + NULL
75166 +#else
75167 + req
75168 +#endif
75169 + );
75170 }
75171
75172 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75173 @@ -1980,7 +2005,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75174 sock_i_uid(sp),
75175 icsk->icsk_probes_out,
75176 sock_i_ino(sp),
75177 - atomic_read(&sp->sk_refcnt), sp,
75178 + atomic_read(&sp->sk_refcnt),
75179 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75180 + NULL,
75181 +#else
75182 + sp,
75183 +#endif
75184 jiffies_to_clock_t(icsk->icsk_rto),
75185 jiffies_to_clock_t(icsk->icsk_ack.ato),
75186 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75187 @@ -2015,7 +2045,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75188 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75189 tw->tw_substate, 0, 0,
75190 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75191 - atomic_read(&tw->tw_refcnt), tw);
75192 + atomic_read(&tw->tw_refcnt),
75193 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75194 + NULL
75195 +#else
75196 + tw
75197 +#endif
75198 + );
75199 }
75200
75201 static int tcp6_seq_show(struct seq_file *seq, void *v)
75202 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
75203 index 37b0699..d323408 100644
75204 --- a/net/ipv6/udp.c
75205 +++ b/net/ipv6/udp.c
75206 @@ -50,6 +50,10 @@
75207 #include <linux/seq_file.h>
75208 #include "udp_impl.h"
75209
75210 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75211 +extern int grsec_enable_blackhole;
75212 +#endif
75213 +
75214 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
75215 {
75216 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
75217 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
75218
75219 return 0;
75220 drop:
75221 - atomic_inc(&sk->sk_drops);
75222 + atomic_inc_unchecked(&sk->sk_drops);
75223 drop_no_sk_drops_inc:
75224 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75225 kfree_skb(skb);
75226 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75227 continue;
75228 }
75229 drop:
75230 - atomic_inc(&sk->sk_drops);
75231 + atomic_inc_unchecked(&sk->sk_drops);
75232 UDP6_INC_STATS_BH(sock_net(sk),
75233 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
75234 UDP6_INC_STATS_BH(sock_net(sk),
75235 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75236 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
75237 proto == IPPROTO_UDPLITE);
75238
75239 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75240 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75241 +#endif
75242 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75243
75244 kfree_skb(skb);
75245 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75246 if (!sock_owned_by_user(sk))
75247 udpv6_queue_rcv_skb(sk, skb);
75248 else if (sk_add_backlog(sk, skb)) {
75249 - atomic_inc(&sk->sk_drops);
75250 + atomic_inc_unchecked(&sk->sk_drops);
75251 bh_unlock_sock(sk);
75252 sock_put(sk);
75253 goto discard;
75254 @@ -1411,8 +1418,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75255 0, 0L, 0,
75256 sock_i_uid(sp), 0,
75257 sock_i_ino(sp),
75258 - atomic_read(&sp->sk_refcnt), sp,
75259 - atomic_read(&sp->sk_drops));
75260 + atomic_read(&sp->sk_refcnt),
75261 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75262 + NULL,
75263 +#else
75264 + sp,
75265 +#endif
75266 + atomic_read_unchecked(&sp->sk_drops));
75267 }
75268
75269 int udp6_seq_show(struct seq_file *seq, void *v)
75270 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75271 index 6b9d5a0..4dffaf1 100644
75272 --- a/net/irda/ircomm/ircomm_tty.c
75273 +++ b/net/irda/ircomm/ircomm_tty.c
75274 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75275 add_wait_queue(&self->open_wait, &wait);
75276
75277 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75278 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75279 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75280
75281 /* As far as I can see, we protect open_count - Jean II */
75282 spin_lock_irqsave(&self->spinlock, flags);
75283 if (!tty_hung_up_p(filp)) {
75284 extra_count = 1;
75285 - self->open_count--;
75286 + local_dec(&self->open_count);
75287 }
75288 spin_unlock_irqrestore(&self->spinlock, flags);
75289 - self->blocked_open++;
75290 + local_inc(&self->blocked_open);
75291
75292 while (1) {
75293 if (tty->termios->c_cflag & CBAUD) {
75294 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75295 }
75296
75297 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75298 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75299 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75300
75301 schedule();
75302 }
75303 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75304 if (extra_count) {
75305 /* ++ is not atomic, so this should be protected - Jean II */
75306 spin_lock_irqsave(&self->spinlock, flags);
75307 - self->open_count++;
75308 + local_inc(&self->open_count);
75309 spin_unlock_irqrestore(&self->spinlock, flags);
75310 }
75311 - self->blocked_open--;
75312 + local_dec(&self->blocked_open);
75313
75314 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75315 - __FILE__,__LINE__, tty->driver->name, self->open_count);
75316 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75317
75318 if (!retval)
75319 self->flags |= ASYNC_NORMAL_ACTIVE;
75320 @@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75321 }
75322 /* ++ is not atomic, so this should be protected - Jean II */
75323 spin_lock_irqsave(&self->spinlock, flags);
75324 - self->open_count++;
75325 + local_inc(&self->open_count);
75326
75327 tty->driver_data = self;
75328 self->tty = tty;
75329 spin_unlock_irqrestore(&self->spinlock, flags);
75330
75331 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75332 - self->line, self->open_count);
75333 + self->line, local_read(&self->open_count));
75334
75335 /* Not really used by us, but lets do it anyway */
75336 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75337 @@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75338 return;
75339 }
75340
75341 - if ((tty->count == 1) && (self->open_count != 1)) {
75342 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75343 /*
75344 * Uh, oh. tty->count is 1, which means that the tty
75345 * structure will be freed. state->count should always
75346 @@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75347 */
75348 IRDA_DEBUG(0, "%s(), bad serial port count; "
75349 "tty->count is 1, state->count is %d\n", __func__ ,
75350 - self->open_count);
75351 - self->open_count = 1;
75352 + local_read(&self->open_count));
75353 + local_set(&self->open_count, 1);
75354 }
75355
75356 - if (--self->open_count < 0) {
75357 + if (local_dec_return(&self->open_count) < 0) {
75358 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75359 - __func__, self->line, self->open_count);
75360 - self->open_count = 0;
75361 + __func__, self->line, local_read(&self->open_count));
75362 + local_set(&self->open_count, 0);
75363 }
75364 - if (self->open_count) {
75365 + if (local_read(&self->open_count)) {
75366 spin_unlock_irqrestore(&self->spinlock, flags);
75367
75368 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75369 @@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75370 tty->closing = 0;
75371 self->tty = NULL;
75372
75373 - if (self->blocked_open) {
75374 + if (local_read(&self->blocked_open)) {
75375 if (self->close_delay)
75376 schedule_timeout_interruptible(self->close_delay);
75377 wake_up_interruptible(&self->open_wait);
75378 @@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75379 spin_lock_irqsave(&self->spinlock, flags);
75380 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75381 self->tty = NULL;
75382 - self->open_count = 0;
75383 + local_set(&self->open_count, 0);
75384 spin_unlock_irqrestore(&self->spinlock, flags);
75385
75386 wake_up_interruptible(&self->open_wait);
75387 @@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75388 seq_putc(m, '\n');
75389
75390 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75391 - seq_printf(m, "Open count: %d\n", self->open_count);
75392 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75393 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75394 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75395
75396 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75397 index 07d7d55..541de95 100644
75398 --- a/net/iucv/af_iucv.c
75399 +++ b/net/iucv/af_iucv.c
75400 @@ -783,10 +783,10 @@ static int iucv_sock_autobind(struct sock *sk)
75401
75402 write_lock_bh(&iucv_sk_list.lock);
75403
75404 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75405 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75406 while (__iucv_get_sock_by_name(name)) {
75407 sprintf(name, "%08x",
75408 - atomic_inc_return(&iucv_sk_list.autobind_name));
75409 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75410 }
75411
75412 write_unlock_bh(&iucv_sk_list.lock);
75413 diff --git a/net/key/af_key.c b/net/key/af_key.c
75414 index 7e5d927..cdbb54e 100644
75415 --- a/net/key/af_key.c
75416 +++ b/net/key/af_key.c
75417 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75418 static u32 get_acqseq(void)
75419 {
75420 u32 res;
75421 - static atomic_t acqseq;
75422 + static atomic_unchecked_t acqseq;
75423
75424 do {
75425 - res = atomic_inc_return(&acqseq);
75426 + res = atomic_inc_return_unchecked(&acqseq);
75427 } while (!res);
75428 return res;
75429 }
75430 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75431 index db8fae5..ff070cd 100644
75432 --- a/net/mac80211/ieee80211_i.h
75433 +++ b/net/mac80211/ieee80211_i.h
75434 @@ -28,6 +28,7 @@
75435 #include <net/ieee80211_radiotap.h>
75436 #include <net/cfg80211.h>
75437 #include <net/mac80211.h>
75438 +#include <asm/local.h>
75439 #include "key.h"
75440 #include "sta_info.h"
75441
75442 @@ -842,7 +843,7 @@ struct ieee80211_local {
75443 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75444 spinlock_t queue_stop_reason_lock;
75445
75446 - int open_count;
75447 + local_t open_count;
75448 int monitors, cooked_mntrs;
75449 /* number of interfaces with corresponding FIF_ flags */
75450 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75451 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75452 index c20051b..2accbc4 100644
75453 --- a/net/mac80211/iface.c
75454 +++ b/net/mac80211/iface.c
75455 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75456 break;
75457 }
75458
75459 - if (local->open_count == 0) {
75460 + if (local_read(&local->open_count) == 0) {
75461 res = drv_start(local);
75462 if (res)
75463 goto err_del_bss;
75464 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75465 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75466
75467 if (!is_valid_ether_addr(dev->dev_addr)) {
75468 - if (!local->open_count)
75469 + if (!local_read(&local->open_count))
75470 drv_stop(local);
75471 return -EADDRNOTAVAIL;
75472 }
75473 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75474 mutex_unlock(&local->mtx);
75475
75476 if (coming_up)
75477 - local->open_count++;
75478 + local_inc(&local->open_count);
75479
75480 if (hw_reconf_flags)
75481 ieee80211_hw_config(local, hw_reconf_flags);
75482 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75483 err_del_interface:
75484 drv_remove_interface(local, sdata);
75485 err_stop:
75486 - if (!local->open_count)
75487 + if (!local_read(&local->open_count))
75488 drv_stop(local);
75489 err_del_bss:
75490 sdata->bss = NULL;
75491 @@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75492 }
75493
75494 if (going_down)
75495 - local->open_count--;
75496 + local_dec(&local->open_count);
75497
75498 switch (sdata->vif.type) {
75499 case NL80211_IFTYPE_AP_VLAN:
75500 @@ -550,7 +550,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75501
75502 ieee80211_recalc_ps(local, -1);
75503
75504 - if (local->open_count == 0) {
75505 + if (local_read(&local->open_count) == 0) {
75506 if (local->ops->napi_poll)
75507 napi_disable(&local->napi);
75508 ieee80211_clear_tx_pending(local);
75509 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75510 index 1633648..d45ebfa 100644
75511 --- a/net/mac80211/main.c
75512 +++ b/net/mac80211/main.c
75513 @@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75514 local->hw.conf.power_level = power;
75515 }
75516
75517 - if (changed && local->open_count) {
75518 + if (changed && local_read(&local->open_count)) {
75519 ret = drv_config(local, changed);
75520 /*
75521 * Goal:
75522 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75523 index ef8eba1..5c63952 100644
75524 --- a/net/mac80211/pm.c
75525 +++ b/net/mac80211/pm.c
75526 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75527 struct ieee80211_sub_if_data *sdata;
75528 struct sta_info *sta;
75529
75530 - if (!local->open_count)
75531 + if (!local_read(&local->open_count))
75532 goto suspend;
75533
75534 ieee80211_scan_cancel(local);
75535 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75536 cancel_work_sync(&local->dynamic_ps_enable_work);
75537 del_timer_sync(&local->dynamic_ps_timer);
75538
75539 - local->wowlan = wowlan && local->open_count;
75540 + local->wowlan = wowlan && local_read(&local->open_count);
75541 if (local->wowlan) {
75542 int err = drv_suspend(local, wowlan);
75543 if (err < 0) {
75544 @@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75545 }
75546
75547 /* stop hardware - this must stop RX */
75548 - if (local->open_count)
75549 + if (local_read(&local->open_count))
75550 ieee80211_stop_device(local);
75551
75552 suspend:
75553 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75554 index 3313c11..bec9f17 100644
75555 --- a/net/mac80211/rate.c
75556 +++ b/net/mac80211/rate.c
75557 @@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75558
75559 ASSERT_RTNL();
75560
75561 - if (local->open_count)
75562 + if (local_read(&local->open_count))
75563 return -EBUSY;
75564
75565 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75566 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75567 index c97a065..ff61928 100644
75568 --- a/net/mac80211/rc80211_pid_debugfs.c
75569 +++ b/net/mac80211/rc80211_pid_debugfs.c
75570 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75571
75572 spin_unlock_irqrestore(&events->lock, status);
75573
75574 - if (copy_to_user(buf, pb, p))
75575 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75576 return -EFAULT;
75577
75578 return p;
75579 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75580 index 32f7a3b..2b52a0d 100644
75581 --- a/net/mac80211/util.c
75582 +++ b/net/mac80211/util.c
75583 @@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75584 }
75585 #endif
75586 /* everything else happens only if HW was up & running */
75587 - if (!local->open_count)
75588 + if (!local_read(&local->open_count))
75589 goto wake_up;
75590
75591 /*
75592 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75593 index 0c6f67e..d02cdfc 100644
75594 --- a/net/netfilter/Kconfig
75595 +++ b/net/netfilter/Kconfig
75596 @@ -836,6 +836,16 @@ config NETFILTER_XT_MATCH_ESP
75597
75598 To compile it as a module, choose M here. If unsure, say N.
75599
75600 +config NETFILTER_XT_MATCH_GRADM
75601 + tristate '"gradm" match support'
75602 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75603 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75604 + ---help---
75605 + The gradm match allows to match on grsecurity RBAC being enabled.
75606 + It is useful when iptables rules are applied early on bootup to
75607 + prevent connections to the machine (except from a trusted host)
75608 + while the RBAC system is disabled.
75609 +
75610 config NETFILTER_XT_MATCH_HASHLIMIT
75611 tristate '"hashlimit" match support'
75612 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75613 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75614 index ca36765..0882e7c 100644
75615 --- a/net/netfilter/Makefile
75616 +++ b/net/netfilter/Makefile
75617 @@ -86,6 +86,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75618 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75619 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
75620 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75621 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75622 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75623 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75624 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75625 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75626 index 29fa5ba..8debc79 100644
75627 --- a/net/netfilter/ipvs/ip_vs_conn.c
75628 +++ b/net/netfilter/ipvs/ip_vs_conn.c
75629 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75630 /* Increase the refcnt counter of the dest */
75631 atomic_inc(&dest->refcnt);
75632
75633 - conn_flags = atomic_read(&dest->conn_flags);
75634 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
75635 if (cp->protocol != IPPROTO_UDP)
75636 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75637 /* Bind with the destination and its corresponding transmitter */
75638 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75639 atomic_set(&cp->refcnt, 1);
75640
75641 atomic_set(&cp->n_control, 0);
75642 - atomic_set(&cp->in_pkts, 0);
75643 + atomic_set_unchecked(&cp->in_pkts, 0);
75644
75645 atomic_inc(&ipvs->conn_count);
75646 if (flags & IP_VS_CONN_F_NO_CPORT)
75647 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75648
75649 /* Don't drop the entry if its number of incoming packets is not
75650 located in [0, 8] */
75651 - i = atomic_read(&cp->in_pkts);
75652 + i = atomic_read_unchecked(&cp->in_pkts);
75653 if (i > 8 || i < 0) return 0;
75654
75655 if (!todrop_rate[i]) return 0;
75656 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75657 index 00bdb1d..6725a48 100644
75658 --- a/net/netfilter/ipvs/ip_vs_core.c
75659 +++ b/net/netfilter/ipvs/ip_vs_core.c
75660 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75661 ret = cp->packet_xmit(skb, cp, pd->pp);
75662 /* do not touch skb anymore */
75663
75664 - atomic_inc(&cp->in_pkts);
75665 + atomic_inc_unchecked(&cp->in_pkts);
75666 ip_vs_conn_put(cp);
75667 return ret;
75668 }
75669 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75670 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75671 pkts = sysctl_sync_threshold(ipvs);
75672 else
75673 - pkts = atomic_add_return(1, &cp->in_pkts);
75674 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75675
75676 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75677 cp->protocol == IPPROTO_SCTP) {
75678 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75679 index f558998..9cdff60 100644
75680 --- a/net/netfilter/ipvs/ip_vs_ctl.c
75681 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
75682 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75683 ip_vs_rs_hash(ipvs, dest);
75684 write_unlock_bh(&ipvs->rs_lock);
75685 }
75686 - atomic_set(&dest->conn_flags, conn_flags);
75687 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
75688
75689 /* bind the service */
75690 if (!dest->svc) {
75691 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75692 " %-7s %-6d %-10d %-10d\n",
75693 &dest->addr.in6,
75694 ntohs(dest->port),
75695 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75696 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75697 atomic_read(&dest->weight),
75698 atomic_read(&dest->activeconns),
75699 atomic_read(&dest->inactconns));
75700 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75701 "%-7s %-6d %-10d %-10d\n",
75702 ntohl(dest->addr.ip),
75703 ntohs(dest->port),
75704 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75705 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75706 atomic_read(&dest->weight),
75707 atomic_read(&dest->activeconns),
75708 atomic_read(&dest->inactconns));
75709 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
75710
75711 entry.addr = dest->addr.ip;
75712 entry.port = dest->port;
75713 - entry.conn_flags = atomic_read(&dest->conn_flags);
75714 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
75715 entry.weight = atomic_read(&dest->weight);
75716 entry.u_threshold = dest->u_threshold;
75717 entry.l_threshold = dest->l_threshold;
75718 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
75719 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
75720
75721 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
75722 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75723 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75724 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
75725 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
75726 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
75727 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
75728 index 8a0d6d6..90ec197 100644
75729 --- a/net/netfilter/ipvs/ip_vs_sync.c
75730 +++ b/net/netfilter/ipvs/ip_vs_sync.c
75731 @@ -649,7 +649,7 @@ control:
75732 * i.e only increment in_pkts for Templates.
75733 */
75734 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
75735 - int pkts = atomic_add_return(1, &cp->in_pkts);
75736 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75737
75738 if (pkts % sysctl_sync_period(ipvs) != 1)
75739 return;
75740 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
75741
75742 if (opt)
75743 memcpy(&cp->in_seq, opt, sizeof(*opt));
75744 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75745 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75746 cp->state = state;
75747 cp->old_state = cp->state;
75748 /*
75749 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
75750 index 7fd66de..e6fb361 100644
75751 --- a/net/netfilter/ipvs/ip_vs_xmit.c
75752 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
75753 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
75754 else
75755 rc = NF_ACCEPT;
75756 /* do not touch skb anymore */
75757 - atomic_inc(&cp->in_pkts);
75758 + atomic_inc_unchecked(&cp->in_pkts);
75759 goto out;
75760 }
75761
75762 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
75763 else
75764 rc = NF_ACCEPT;
75765 /* do not touch skb anymore */
75766 - atomic_inc(&cp->in_pkts);
75767 + atomic_inc_unchecked(&cp->in_pkts);
75768 goto out;
75769 }
75770
75771 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
75772 index 66b2c54..c7884e3 100644
75773 --- a/net/netfilter/nfnetlink_log.c
75774 +++ b/net/netfilter/nfnetlink_log.c
75775 @@ -70,7 +70,7 @@ struct nfulnl_instance {
75776 };
75777
75778 static DEFINE_SPINLOCK(instances_lock);
75779 -static atomic_t global_seq;
75780 +static atomic_unchecked_t global_seq;
75781
75782 #define INSTANCE_BUCKETS 16
75783 static struct hlist_head instance_table[INSTANCE_BUCKETS];
75784 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
75785 /* global sequence number */
75786 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
75787 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
75788 - htonl(atomic_inc_return(&global_seq)));
75789 + htonl(atomic_inc_return_unchecked(&global_seq)));
75790
75791 if (data_len) {
75792 struct nlattr *nla;
75793 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
75794 new file mode 100644
75795 index 0000000..6905327
75796 --- /dev/null
75797 +++ b/net/netfilter/xt_gradm.c
75798 @@ -0,0 +1,51 @@
75799 +/*
75800 + * gradm match for netfilter
75801 + * Copyright © Zbigniew Krzystolik, 2010
75802 + *
75803 + * This program is free software; you can redistribute it and/or modify
75804 + * it under the terms of the GNU General Public License; either version
75805 + * 2 or 3 as published by the Free Software Foundation.
75806 + */
75807 +#include <linux/module.h>
75808 +#include <linux/moduleparam.h>
75809 +#include <linux/skbuff.h>
75810 +#include <linux/netfilter/x_tables.h>
75811 +#include <linux/grsecurity.h>
75812 +#include <linux/netfilter/xt_gradm.h>
75813 +
75814 +static bool
75815 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
75816 +{
75817 + const struct xt_gradm_mtinfo *info = par->matchinfo;
75818 + bool retval = false;
75819 + if (gr_acl_is_enabled())
75820 + retval = true;
75821 + return retval ^ info->invflags;
75822 +}
75823 +
75824 +static struct xt_match gradm_mt_reg __read_mostly = {
75825 + .name = "gradm",
75826 + .revision = 0,
75827 + .family = NFPROTO_UNSPEC,
75828 + .match = gradm_mt,
75829 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
75830 + .me = THIS_MODULE,
75831 +};
75832 +
75833 +static int __init gradm_mt_init(void)
75834 +{
75835 + return xt_register_match(&gradm_mt_reg);
75836 +}
75837 +
75838 +static void __exit gradm_mt_exit(void)
75839 +{
75840 + xt_unregister_match(&gradm_mt_reg);
75841 +}
75842 +
75843 +module_init(gradm_mt_init);
75844 +module_exit(gradm_mt_exit);
75845 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
75846 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
75847 +MODULE_LICENSE("GPL");
75848 +MODULE_ALIAS("ipt_gradm");
75849 +MODULE_ALIAS("ip6t_gradm");
75850 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
75851 index 4fe4fb4..87a89e5 100644
75852 --- a/net/netfilter/xt_statistic.c
75853 +++ b/net/netfilter/xt_statistic.c
75854 @@ -19,7 +19,7 @@
75855 #include <linux/module.h>
75856
75857 struct xt_statistic_priv {
75858 - atomic_t count;
75859 + atomic_unchecked_t count;
75860 } ____cacheline_aligned_in_smp;
75861
75862 MODULE_LICENSE("GPL");
75863 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
75864 break;
75865 case XT_STATISTIC_MODE_NTH:
75866 do {
75867 - oval = atomic_read(&info->master->count);
75868 + oval = atomic_read_unchecked(&info->master->count);
75869 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
75870 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
75871 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
75872 if (nval == 0)
75873 ret = !ret;
75874 break;
75875 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
75876 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
75877 if (info->master == NULL)
75878 return -ENOMEM;
75879 - atomic_set(&info->master->count, info->u.nth.count);
75880 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
75881
75882 return 0;
75883 }
75884 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
75885 index faa48f7..65f7f54 100644
75886 --- a/net/netlink/af_netlink.c
75887 +++ b/net/netlink/af_netlink.c
75888 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
75889 sk->sk_error_report(sk);
75890 }
75891 }
75892 - atomic_inc(&sk->sk_drops);
75893 + atomic_inc_unchecked(&sk->sk_drops);
75894 }
75895
75896 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
75897 @@ -2013,7 +2013,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
75898 sk_wmem_alloc_get(s),
75899 nlk->cb,
75900 atomic_read(&s->sk_refcnt),
75901 - atomic_read(&s->sk_drops),
75902 + atomic_read_unchecked(&s->sk_drops),
75903 sock_i_ino(s)
75904 );
75905
75906 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
75907 index 06592d8..64860f6 100644
75908 --- a/net/netrom/af_netrom.c
75909 +++ b/net/netrom/af_netrom.c
75910 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75911 struct sock *sk = sock->sk;
75912 struct nr_sock *nr = nr_sk(sk);
75913
75914 + memset(sax, 0, sizeof(*sax));
75915 lock_sock(sk);
75916 if (peer != 0) {
75917 if (sk->sk_state != TCP_ESTABLISHED) {
75918 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
75919 *uaddr_len = sizeof(struct full_sockaddr_ax25);
75920 } else {
75921 sax->fsa_ax25.sax25_family = AF_NETROM;
75922 - sax->fsa_ax25.sax25_ndigis = 0;
75923 sax->fsa_ax25.sax25_call = nr->source_addr;
75924 *uaddr_len = sizeof(struct sockaddr_ax25);
75925 }
75926 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
75927 index 4f2c0df..f0ff342 100644
75928 --- a/net/packet/af_packet.c
75929 +++ b/net/packet/af_packet.c
75930 @@ -1687,7 +1687,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75931
75932 spin_lock(&sk->sk_receive_queue.lock);
75933 po->stats.tp_packets++;
75934 - skb->dropcount = atomic_read(&sk->sk_drops);
75935 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75936 __skb_queue_tail(&sk->sk_receive_queue, skb);
75937 spin_unlock(&sk->sk_receive_queue.lock);
75938 sk->sk_data_ready(sk, skb->len);
75939 @@ -1696,7 +1696,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
75940 drop_n_acct:
75941 spin_lock(&sk->sk_receive_queue.lock);
75942 po->stats.tp_drops++;
75943 - atomic_inc(&sk->sk_drops);
75944 + atomic_inc_unchecked(&sk->sk_drops);
75945 spin_unlock(&sk->sk_receive_queue.lock);
75946
75947 drop_n_restore:
75948 @@ -3294,7 +3294,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75949 case PACKET_HDRLEN:
75950 if (len > sizeof(int))
75951 len = sizeof(int);
75952 - if (copy_from_user(&val, optval, len))
75953 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
75954 return -EFAULT;
75955 switch (val) {
75956 case TPACKET_V1:
75957 @@ -3344,7 +3344,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
75958
75959 if (put_user(len, optlen))
75960 return -EFAULT;
75961 - if (copy_to_user(optval, data, len))
75962 + if (len > sizeof(st) || copy_to_user(optval, data, len))
75963 return -EFAULT;
75964 return 0;
75965 }
75966 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
75967 index d65f699..05aa6ce 100644
75968 --- a/net/phonet/af_phonet.c
75969 +++ b/net/phonet/af_phonet.c
75970 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
75971 {
75972 struct phonet_protocol *pp;
75973
75974 - if (protocol >= PHONET_NPROTO)
75975 + if (protocol < 0 || protocol >= PHONET_NPROTO)
75976 return NULL;
75977
75978 rcu_read_lock();
75979 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
75980 {
75981 int err = 0;
75982
75983 - if (protocol >= PHONET_NPROTO)
75984 + if (protocol < 0 || protocol >= PHONET_NPROTO)
75985 return -EINVAL;
75986
75987 err = proto_register(pp->prot, 1);
75988 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
75989 index 9726fe6..fc4e3a4 100644
75990 --- a/net/phonet/pep.c
75991 +++ b/net/phonet/pep.c
75992 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
75993
75994 case PNS_PEP_CTRL_REQ:
75995 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
75996 - atomic_inc(&sk->sk_drops);
75997 + atomic_inc_unchecked(&sk->sk_drops);
75998 break;
75999 }
76000 __skb_pull(skb, 4);
76001 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76002 }
76003
76004 if (pn->rx_credits == 0) {
76005 - atomic_inc(&sk->sk_drops);
76006 + atomic_inc_unchecked(&sk->sk_drops);
76007 err = -ENOBUFS;
76008 break;
76009 }
76010 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
76011 }
76012
76013 if (pn->rx_credits == 0) {
76014 - atomic_inc(&sk->sk_drops);
76015 + atomic_inc_unchecked(&sk->sk_drops);
76016 err = NET_RX_DROP;
76017 break;
76018 }
76019 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
76020 index 4c7eff3..59c727f 100644
76021 --- a/net/phonet/socket.c
76022 +++ b/net/phonet/socket.c
76023 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
76024 pn->resource, sk->sk_state,
76025 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
76026 sock_i_uid(sk), sock_i_ino(sk),
76027 - atomic_read(&sk->sk_refcnt), sk,
76028 - atomic_read(&sk->sk_drops), &len);
76029 + atomic_read(&sk->sk_refcnt),
76030 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76031 + NULL,
76032 +#else
76033 + sk,
76034 +#endif
76035 + atomic_read_unchecked(&sk->sk_drops), &len);
76036 }
76037 seq_printf(seq, "%*s\n", 127 - len, "");
76038 return 0;
76039 diff --git a/net/rds/cong.c b/net/rds/cong.c
76040 index e5b65ac..f3b6fb7 100644
76041 --- a/net/rds/cong.c
76042 +++ b/net/rds/cong.c
76043 @@ -78,7 +78,7 @@
76044 * finds that the saved generation number is smaller than the global generation
76045 * number, it wakes up the process.
76046 */
76047 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
76048 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
76049
76050 /*
76051 * Congestion monitoring
76052 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
76053 rdsdebug("waking map %p for %pI4\n",
76054 map, &map->m_addr);
76055 rds_stats_inc(s_cong_update_received);
76056 - atomic_inc(&rds_cong_generation);
76057 + atomic_inc_unchecked(&rds_cong_generation);
76058 if (waitqueue_active(&map->m_waitq))
76059 wake_up(&map->m_waitq);
76060 if (waitqueue_active(&rds_poll_waitq))
76061 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
76062
76063 int rds_cong_updated_since(unsigned long *recent)
76064 {
76065 - unsigned long gen = atomic_read(&rds_cong_generation);
76066 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
76067
76068 if (likely(*recent == gen))
76069 return 0;
76070 diff --git a/net/rds/ib.h b/net/rds/ib.h
76071 index edfaaaf..8c89879 100644
76072 --- a/net/rds/ib.h
76073 +++ b/net/rds/ib.h
76074 @@ -128,7 +128,7 @@ struct rds_ib_connection {
76075 /* sending acks */
76076 unsigned long i_ack_flags;
76077 #ifdef KERNEL_HAS_ATOMIC64
76078 - atomic64_t i_ack_next; /* next ACK to send */
76079 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76080 #else
76081 spinlock_t i_ack_lock; /* protect i_ack_next */
76082 u64 i_ack_next; /* next ACK to send */
76083 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
76084 index a1e1162..265e129 100644
76085 --- a/net/rds/ib_cm.c
76086 +++ b/net/rds/ib_cm.c
76087 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
76088 /* Clear the ACK state */
76089 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76090 #ifdef KERNEL_HAS_ATOMIC64
76091 - atomic64_set(&ic->i_ack_next, 0);
76092 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76093 #else
76094 ic->i_ack_next = 0;
76095 #endif
76096 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
76097 index 8d19491..05a3e65 100644
76098 --- a/net/rds/ib_recv.c
76099 +++ b/net/rds/ib_recv.c
76100 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76101 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
76102 int ack_required)
76103 {
76104 - atomic64_set(&ic->i_ack_next, seq);
76105 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76106 if (ack_required) {
76107 smp_mb__before_clear_bit();
76108 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76109 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76110 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76111 smp_mb__after_clear_bit();
76112
76113 - return atomic64_read(&ic->i_ack_next);
76114 + return atomic64_read_unchecked(&ic->i_ack_next);
76115 }
76116 #endif
76117
76118 diff --git a/net/rds/iw.h b/net/rds/iw.h
76119 index 04ce3b1..48119a6 100644
76120 --- a/net/rds/iw.h
76121 +++ b/net/rds/iw.h
76122 @@ -134,7 +134,7 @@ struct rds_iw_connection {
76123 /* sending acks */
76124 unsigned long i_ack_flags;
76125 #ifdef KERNEL_HAS_ATOMIC64
76126 - atomic64_t i_ack_next; /* next ACK to send */
76127 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76128 #else
76129 spinlock_t i_ack_lock; /* protect i_ack_next */
76130 u64 i_ack_next; /* next ACK to send */
76131 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
76132 index a91e1db..cf3053f 100644
76133 --- a/net/rds/iw_cm.c
76134 +++ b/net/rds/iw_cm.c
76135 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
76136 /* Clear the ACK state */
76137 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76138 #ifdef KERNEL_HAS_ATOMIC64
76139 - atomic64_set(&ic->i_ack_next, 0);
76140 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76141 #else
76142 ic->i_ack_next = 0;
76143 #endif
76144 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
76145 index 4503335..db566b4 100644
76146 --- a/net/rds/iw_recv.c
76147 +++ b/net/rds/iw_recv.c
76148 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76149 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
76150 int ack_required)
76151 {
76152 - atomic64_set(&ic->i_ack_next, seq);
76153 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76154 if (ack_required) {
76155 smp_mb__before_clear_bit();
76156 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76157 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76158 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76159 smp_mb__after_clear_bit();
76160
76161 - return atomic64_read(&ic->i_ack_next);
76162 + return atomic64_read_unchecked(&ic->i_ack_next);
76163 }
76164 #endif
76165
76166 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
76167 index edac9ef..16bcb98 100644
76168 --- a/net/rds/tcp.c
76169 +++ b/net/rds/tcp.c
76170 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
76171 int val = 1;
76172
76173 set_fs(KERNEL_DS);
76174 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
76175 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
76176 sizeof(val));
76177 set_fs(oldfs);
76178 }
76179 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
76180 index 1b4fd68..2234175 100644
76181 --- a/net/rds/tcp_send.c
76182 +++ b/net/rds/tcp_send.c
76183 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
76184
76185 oldfs = get_fs();
76186 set_fs(KERNEL_DS);
76187 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
76188 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
76189 sizeof(val));
76190 set_fs(oldfs);
76191 }
76192 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
76193 index 74c064c..fdec26f 100644
76194 --- a/net/rxrpc/af_rxrpc.c
76195 +++ b/net/rxrpc/af_rxrpc.c
76196 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
76197 __be32 rxrpc_epoch;
76198
76199 /* current debugging ID */
76200 -atomic_t rxrpc_debug_id;
76201 +atomic_unchecked_t rxrpc_debug_id;
76202
76203 /* count of skbs currently in use */
76204 atomic_t rxrpc_n_skbs;
76205 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
76206 index c3126e8..21facc7 100644
76207 --- a/net/rxrpc/ar-ack.c
76208 +++ b/net/rxrpc/ar-ack.c
76209 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76210
76211 _enter("{%d,%d,%d,%d},",
76212 call->acks_hard, call->acks_unacked,
76213 - atomic_read(&call->sequence),
76214 + atomic_read_unchecked(&call->sequence),
76215 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
76216
76217 stop = 0;
76218 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76219
76220 /* each Tx packet has a new serial number */
76221 sp->hdr.serial =
76222 - htonl(atomic_inc_return(&call->conn->serial));
76223 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
76224
76225 hdr = (struct rxrpc_header *) txb->head;
76226 hdr->serial = sp->hdr.serial;
76227 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
76228 */
76229 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
76230 {
76231 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
76232 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
76233 }
76234
76235 /*
76236 @@ -629,7 +629,7 @@ process_further:
76237
76238 latest = ntohl(sp->hdr.serial);
76239 hard = ntohl(ack.firstPacket);
76240 - tx = atomic_read(&call->sequence);
76241 + tx = atomic_read_unchecked(&call->sequence);
76242
76243 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76244 latest,
76245 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
76246 goto maybe_reschedule;
76247
76248 send_ACK_with_skew:
76249 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
76250 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
76251 ntohl(ack.serial));
76252 send_ACK:
76253 mtu = call->conn->trans->peer->if_mtu;
76254 @@ -1173,7 +1173,7 @@ send_ACK:
76255 ackinfo.rxMTU = htonl(5692);
76256 ackinfo.jumbo_max = htonl(4);
76257
76258 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76259 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76260 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76261 ntohl(hdr.serial),
76262 ntohs(ack.maxSkew),
76263 @@ -1191,7 +1191,7 @@ send_ACK:
76264 send_message:
76265 _debug("send message");
76266
76267 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76268 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76269 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
76270 send_message_2:
76271
76272 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
76273 index bf656c2..48f9d27 100644
76274 --- a/net/rxrpc/ar-call.c
76275 +++ b/net/rxrpc/ar-call.c
76276 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
76277 spin_lock_init(&call->lock);
76278 rwlock_init(&call->state_lock);
76279 atomic_set(&call->usage, 1);
76280 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
76281 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76282 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
76283
76284 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
76285 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
76286 index 4106ca9..a338d7a 100644
76287 --- a/net/rxrpc/ar-connection.c
76288 +++ b/net/rxrpc/ar-connection.c
76289 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
76290 rwlock_init(&conn->lock);
76291 spin_lock_init(&conn->state_lock);
76292 atomic_set(&conn->usage, 1);
76293 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
76294 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76295 conn->avail_calls = RXRPC_MAXCALLS;
76296 conn->size_align = 4;
76297 conn->header_size = sizeof(struct rxrpc_header);
76298 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
76299 index e7ed43a..6afa140 100644
76300 --- a/net/rxrpc/ar-connevent.c
76301 +++ b/net/rxrpc/ar-connevent.c
76302 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
76303
76304 len = iov[0].iov_len + iov[1].iov_len;
76305
76306 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76307 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76308 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
76309
76310 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76311 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
76312 index 1a2b0633..e8d1382 100644
76313 --- a/net/rxrpc/ar-input.c
76314 +++ b/net/rxrpc/ar-input.c
76315 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
76316 /* track the latest serial number on this connection for ACK packet
76317 * information */
76318 serial = ntohl(sp->hdr.serial);
76319 - hi_serial = atomic_read(&call->conn->hi_serial);
76320 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
76321 while (serial > hi_serial)
76322 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
76323 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
76324 serial);
76325
76326 /* request ACK generation for any ACK or DATA packet that requests
76327 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
76328 index 8e22bd3..f66d1c0 100644
76329 --- a/net/rxrpc/ar-internal.h
76330 +++ b/net/rxrpc/ar-internal.h
76331 @@ -272,8 +272,8 @@ struct rxrpc_connection {
76332 int error; /* error code for local abort */
76333 int debug_id; /* debug ID for printks */
76334 unsigned call_counter; /* call ID counter */
76335 - atomic_t serial; /* packet serial number counter */
76336 - atomic_t hi_serial; /* highest serial number received */
76337 + atomic_unchecked_t serial; /* packet serial number counter */
76338 + atomic_unchecked_t hi_serial; /* highest serial number received */
76339 u8 avail_calls; /* number of calls available */
76340 u8 size_align; /* data size alignment (for security) */
76341 u8 header_size; /* rxrpc + security header size */
76342 @@ -346,7 +346,7 @@ struct rxrpc_call {
76343 spinlock_t lock;
76344 rwlock_t state_lock; /* lock for state transition */
76345 atomic_t usage;
76346 - atomic_t sequence; /* Tx data packet sequence counter */
76347 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
76348 u32 abort_code; /* local/remote abort code */
76349 enum { /* current state of call */
76350 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
76351 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
76352 */
76353 extern atomic_t rxrpc_n_skbs;
76354 extern __be32 rxrpc_epoch;
76355 -extern atomic_t rxrpc_debug_id;
76356 +extern atomic_unchecked_t rxrpc_debug_id;
76357 extern struct workqueue_struct *rxrpc_workqueue;
76358
76359 /*
76360 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
76361 index 87f7135..74d3703 100644
76362 --- a/net/rxrpc/ar-local.c
76363 +++ b/net/rxrpc/ar-local.c
76364 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
76365 spin_lock_init(&local->lock);
76366 rwlock_init(&local->services_lock);
76367 atomic_set(&local->usage, 1);
76368 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
76369 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76370 memcpy(&local->srx, srx, sizeof(*srx));
76371 }
76372
76373 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
76374 index 16ae887..d24f12b 100644
76375 --- a/net/rxrpc/ar-output.c
76376 +++ b/net/rxrpc/ar-output.c
76377 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
76378 sp->hdr.cid = call->cid;
76379 sp->hdr.callNumber = call->call_id;
76380 sp->hdr.seq =
76381 - htonl(atomic_inc_return(&call->sequence));
76382 + htonl(atomic_inc_return_unchecked(&call->sequence));
76383 sp->hdr.serial =
76384 - htonl(atomic_inc_return(&conn->serial));
76385 + htonl(atomic_inc_return_unchecked(&conn->serial));
76386 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
76387 sp->hdr.userStatus = 0;
76388 sp->hdr.securityIndex = conn->security_ix;
76389 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
76390 index 2754f09..b20e38f 100644
76391 --- a/net/rxrpc/ar-peer.c
76392 +++ b/net/rxrpc/ar-peer.c
76393 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
76394 INIT_LIST_HEAD(&peer->error_targets);
76395 spin_lock_init(&peer->lock);
76396 atomic_set(&peer->usage, 1);
76397 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
76398 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76399 memcpy(&peer->srx, srx, sizeof(*srx));
76400
76401 rxrpc_assess_MTU_size(peer);
76402 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
76403 index 38047f7..9f48511 100644
76404 --- a/net/rxrpc/ar-proc.c
76405 +++ b/net/rxrpc/ar-proc.c
76406 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
76407 atomic_read(&conn->usage),
76408 rxrpc_conn_states[conn->state],
76409 key_serial(conn->key),
76410 - atomic_read(&conn->serial),
76411 - atomic_read(&conn->hi_serial));
76412 + atomic_read_unchecked(&conn->serial),
76413 + atomic_read_unchecked(&conn->hi_serial));
76414
76415 return 0;
76416 }
76417 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
76418 index 92df566..87ec1bf 100644
76419 --- a/net/rxrpc/ar-transport.c
76420 +++ b/net/rxrpc/ar-transport.c
76421 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
76422 spin_lock_init(&trans->client_lock);
76423 rwlock_init(&trans->conn_lock);
76424 atomic_set(&trans->usage, 1);
76425 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
76426 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76427
76428 if (peer->srx.transport.family == AF_INET) {
76429 switch (peer->srx.transport_type) {
76430 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
76431 index 7635107..4670276 100644
76432 --- a/net/rxrpc/rxkad.c
76433 +++ b/net/rxrpc/rxkad.c
76434 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
76435
76436 len = iov[0].iov_len + iov[1].iov_len;
76437
76438 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76439 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76440 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
76441
76442 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76443 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
76444
76445 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
76446
76447 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
76448 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76449 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
76450
76451 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
76452 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
76453 index 1e2eee8..ce3967e 100644
76454 --- a/net/sctp/proc.c
76455 +++ b/net/sctp/proc.c
76456 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
76457 seq_printf(seq,
76458 "%8pK %8pK %-3d %-3d %-2d %-4d "
76459 "%4d %8d %8d %7d %5lu %-5d %5d ",
76460 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
76461 + assoc, sk,
76462 + sctp_sk(sk)->type, sk->sk_state,
76463 assoc->state, hash,
76464 assoc->assoc_id,
76465 assoc->sndbuf_used,
76466 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
76467 index 92ba71d..9a97902 100644
76468 --- a/net/sctp/socket.c
76469 +++ b/net/sctp/socket.c
76470 @@ -4569,7 +4569,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
76471 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
76472 if (space_left < addrlen)
76473 return -ENOMEM;
76474 - if (copy_to_user(to, &temp, addrlen))
76475 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
76476 return -EFAULT;
76477 to += addrlen;
76478 cnt++;
76479 diff --git a/net/socket.c b/net/socket.c
76480 index 851edcd..b786851 100644
76481 --- a/net/socket.c
76482 +++ b/net/socket.c
76483 @@ -88,6 +88,7 @@
76484 #include <linux/nsproxy.h>
76485 #include <linux/magic.h>
76486 #include <linux/slab.h>
76487 +#include <linux/in.h>
76488
76489 #include <asm/uaccess.h>
76490 #include <asm/unistd.h>
76491 @@ -105,6 +106,8 @@
76492 #include <linux/sockios.h>
76493 #include <linux/atalk.h>
76494
76495 +#include <linux/grsock.h>
76496 +
76497 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
76498 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
76499 unsigned long nr_segs, loff_t pos);
76500 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
76501 &sockfs_dentry_operations, SOCKFS_MAGIC);
76502 }
76503
76504 -static struct vfsmount *sock_mnt __read_mostly;
76505 +struct vfsmount *sock_mnt __read_mostly;
76506
76507 static struct file_system_type sock_fs_type = {
76508 .name = "sockfs",
76509 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
76510 return -EAFNOSUPPORT;
76511 if (type < 0 || type >= SOCK_MAX)
76512 return -EINVAL;
76513 + if (protocol < 0)
76514 + return -EINVAL;
76515
76516 /* Compatibility.
76517
76518 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
76519 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
76520 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
76521
76522 + if(!gr_search_socket(family, type, protocol)) {
76523 + retval = -EACCES;
76524 + goto out;
76525 + }
76526 +
76527 + if (gr_handle_sock_all(family, type, protocol)) {
76528 + retval = -EACCES;
76529 + goto out;
76530 + }
76531 +
76532 retval = sock_create(family, type, protocol, &sock);
76533 if (retval < 0)
76534 goto out;
76535 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76536 if (sock) {
76537 err = move_addr_to_kernel(umyaddr, addrlen, &address);
76538 if (err >= 0) {
76539 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
76540 + err = -EACCES;
76541 + goto error;
76542 + }
76543 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
76544 + if (err)
76545 + goto error;
76546 +
76547 err = security_socket_bind(sock,
76548 (struct sockaddr *)&address,
76549 addrlen);
76550 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76551 (struct sockaddr *)
76552 &address, addrlen);
76553 }
76554 +error:
76555 fput_light(sock->file, fput_needed);
76556 }
76557 return err;
76558 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
76559 if ((unsigned)backlog > somaxconn)
76560 backlog = somaxconn;
76561
76562 + if (gr_handle_sock_server_other(sock->sk)) {
76563 + err = -EPERM;
76564 + goto error;
76565 + }
76566 +
76567 + err = gr_search_listen(sock);
76568 + if (err)
76569 + goto error;
76570 +
76571 err = security_socket_listen(sock, backlog);
76572 if (!err)
76573 err = sock->ops->listen(sock, backlog);
76574
76575 +error:
76576 fput_light(sock->file, fput_needed);
76577 }
76578 return err;
76579 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76580 newsock->type = sock->type;
76581 newsock->ops = sock->ops;
76582
76583 + if (gr_handle_sock_server_other(sock->sk)) {
76584 + err = -EPERM;
76585 + sock_release(newsock);
76586 + goto out_put;
76587 + }
76588 +
76589 + err = gr_search_accept(sock);
76590 + if (err) {
76591 + sock_release(newsock);
76592 + goto out_put;
76593 + }
76594 +
76595 /*
76596 * We don't need try_module_get here, as the listening socket (sock)
76597 * has the protocol module (sock->ops->owner) held.
76598 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76599 fd_install(newfd, newfile);
76600 err = newfd;
76601
76602 + gr_attach_curr_ip(newsock->sk);
76603 +
76604 out_put:
76605 fput_light(sock->file, fput_needed);
76606 out:
76607 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76608 int, addrlen)
76609 {
76610 struct socket *sock;
76611 + struct sockaddr *sck;
76612 struct sockaddr_storage address;
76613 int err, fput_needed;
76614
76615 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76616 if (err < 0)
76617 goto out_put;
76618
76619 + sck = (struct sockaddr *)&address;
76620 +
76621 + if (gr_handle_sock_client(sck)) {
76622 + err = -EACCES;
76623 + goto out_put;
76624 + }
76625 +
76626 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
76627 + if (err)
76628 + goto out_put;
76629 +
76630 err =
76631 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
76632 if (err)
76633 @@ -1966,7 +2026,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
76634 * checking falls down on this.
76635 */
76636 if (copy_from_user(ctl_buf,
76637 - (void __user __force *)msg_sys->msg_control,
76638 + (void __force_user *)msg_sys->msg_control,
76639 ctl_len))
76640 goto out_freectl;
76641 msg_sys->msg_control = ctl_buf;
76642 @@ -2136,7 +2196,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
76643 * kernel msghdr to use the kernel address space)
76644 */
76645
76646 - uaddr = (__force void __user *)msg_sys->msg_name;
76647 + uaddr = (void __force_user *)msg_sys->msg_name;
76648 uaddr_len = COMPAT_NAMELEN(msg);
76649 if (MSG_CMSG_COMPAT & flags) {
76650 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
76651 @@ -2758,7 +2818,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76652 }
76653
76654 ifr = compat_alloc_user_space(buf_size);
76655 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
76656 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
76657
76658 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
76659 return -EFAULT;
76660 @@ -2782,12 +2842,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76661 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
76662
76663 if (copy_in_user(rxnfc, compat_rxnfc,
76664 - (void *)(&rxnfc->fs.m_ext + 1) -
76665 - (void *)rxnfc) ||
76666 + (void __user *)(&rxnfc->fs.m_ext + 1) -
76667 + (void __user *)rxnfc) ||
76668 copy_in_user(&rxnfc->fs.ring_cookie,
76669 &compat_rxnfc->fs.ring_cookie,
76670 - (void *)(&rxnfc->fs.location + 1) -
76671 - (void *)&rxnfc->fs.ring_cookie) ||
76672 + (void __user *)(&rxnfc->fs.location + 1) -
76673 + (void __user *)&rxnfc->fs.ring_cookie) ||
76674 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
76675 sizeof(rxnfc->rule_cnt)))
76676 return -EFAULT;
76677 @@ -2799,12 +2859,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76678
76679 if (convert_out) {
76680 if (copy_in_user(compat_rxnfc, rxnfc,
76681 - (const void *)(&rxnfc->fs.m_ext + 1) -
76682 - (const void *)rxnfc) ||
76683 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
76684 + (const void __user *)rxnfc) ||
76685 copy_in_user(&compat_rxnfc->fs.ring_cookie,
76686 &rxnfc->fs.ring_cookie,
76687 - (const void *)(&rxnfc->fs.location + 1) -
76688 - (const void *)&rxnfc->fs.ring_cookie) ||
76689 + (const void __user *)(&rxnfc->fs.location + 1) -
76690 + (const void __user *)&rxnfc->fs.ring_cookie) ||
76691 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
76692 sizeof(rxnfc->rule_cnt)))
76693 return -EFAULT;
76694 @@ -2874,7 +2934,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
76695 old_fs = get_fs();
76696 set_fs(KERNEL_DS);
76697 err = dev_ioctl(net, cmd,
76698 - (struct ifreq __user __force *) &kifr);
76699 + (struct ifreq __force_user *) &kifr);
76700 set_fs(old_fs);
76701
76702 return err;
76703 @@ -2983,7 +3043,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
76704
76705 old_fs = get_fs();
76706 set_fs(KERNEL_DS);
76707 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
76708 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
76709 set_fs(old_fs);
76710
76711 if (cmd == SIOCGIFMAP && !err) {
76712 @@ -3088,7 +3148,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
76713 ret |= __get_user(rtdev, &(ur4->rt_dev));
76714 if (rtdev) {
76715 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
76716 - r4.rt_dev = (char __user __force *)devname;
76717 + r4.rt_dev = (char __force_user *)devname;
76718 devname[15] = 0;
76719 } else
76720 r4.rt_dev = NULL;
76721 @@ -3314,8 +3374,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
76722 int __user *uoptlen;
76723 int err;
76724
76725 - uoptval = (char __user __force *) optval;
76726 - uoptlen = (int __user __force *) optlen;
76727 + uoptval = (char __force_user *) optval;
76728 + uoptlen = (int __force_user *) optlen;
76729
76730 set_fs(KERNEL_DS);
76731 if (level == SOL_SOCKET)
76732 @@ -3335,7 +3395,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
76733 char __user *uoptval;
76734 int err;
76735
76736 - uoptval = (char __user __force *) optval;
76737 + uoptval = (char __force_user *) optval;
76738
76739 set_fs(KERNEL_DS);
76740 if (level == SOL_SOCKET)
76741 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
76742 index 994cfea..5343b6b 100644
76743 --- a/net/sunrpc/sched.c
76744 +++ b/net/sunrpc/sched.c
76745 @@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
76746 #ifdef RPC_DEBUG
76747 static void rpc_task_set_debuginfo(struct rpc_task *task)
76748 {
76749 - static atomic_t rpc_pid;
76750 + static atomic_unchecked_t rpc_pid;
76751
76752 - task->tk_pid = atomic_inc_return(&rpc_pid);
76753 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
76754 }
76755 #else
76756 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
76757 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
76758 index 8343737..677025e 100644
76759 --- a/net/sunrpc/xprtrdma/svc_rdma.c
76760 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
76761 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
76762 static unsigned int min_max_inline = 4096;
76763 static unsigned int max_max_inline = 65536;
76764
76765 -atomic_t rdma_stat_recv;
76766 -atomic_t rdma_stat_read;
76767 -atomic_t rdma_stat_write;
76768 -atomic_t rdma_stat_sq_starve;
76769 -atomic_t rdma_stat_rq_starve;
76770 -atomic_t rdma_stat_rq_poll;
76771 -atomic_t rdma_stat_rq_prod;
76772 -atomic_t rdma_stat_sq_poll;
76773 -atomic_t rdma_stat_sq_prod;
76774 +atomic_unchecked_t rdma_stat_recv;
76775 +atomic_unchecked_t rdma_stat_read;
76776 +atomic_unchecked_t rdma_stat_write;
76777 +atomic_unchecked_t rdma_stat_sq_starve;
76778 +atomic_unchecked_t rdma_stat_rq_starve;
76779 +atomic_unchecked_t rdma_stat_rq_poll;
76780 +atomic_unchecked_t rdma_stat_rq_prod;
76781 +atomic_unchecked_t rdma_stat_sq_poll;
76782 +atomic_unchecked_t rdma_stat_sq_prod;
76783
76784 /* Temporary NFS request map and context caches */
76785 struct kmem_cache *svc_rdma_map_cachep;
76786 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
76787 len -= *ppos;
76788 if (len > *lenp)
76789 len = *lenp;
76790 - if (len && copy_to_user(buffer, str_buf, len))
76791 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
76792 return -EFAULT;
76793 *lenp = len;
76794 *ppos += len;
76795 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
76796 {
76797 .procname = "rdma_stat_read",
76798 .data = &rdma_stat_read,
76799 - .maxlen = sizeof(atomic_t),
76800 + .maxlen = sizeof(atomic_unchecked_t),
76801 .mode = 0644,
76802 .proc_handler = read_reset_stat,
76803 },
76804 {
76805 .procname = "rdma_stat_recv",
76806 .data = &rdma_stat_recv,
76807 - .maxlen = sizeof(atomic_t),
76808 + .maxlen = sizeof(atomic_unchecked_t),
76809 .mode = 0644,
76810 .proc_handler = read_reset_stat,
76811 },
76812 {
76813 .procname = "rdma_stat_write",
76814 .data = &rdma_stat_write,
76815 - .maxlen = sizeof(atomic_t),
76816 + .maxlen = sizeof(atomic_unchecked_t),
76817 .mode = 0644,
76818 .proc_handler = read_reset_stat,
76819 },
76820 {
76821 .procname = "rdma_stat_sq_starve",
76822 .data = &rdma_stat_sq_starve,
76823 - .maxlen = sizeof(atomic_t),
76824 + .maxlen = sizeof(atomic_unchecked_t),
76825 .mode = 0644,
76826 .proc_handler = read_reset_stat,
76827 },
76828 {
76829 .procname = "rdma_stat_rq_starve",
76830 .data = &rdma_stat_rq_starve,
76831 - .maxlen = sizeof(atomic_t),
76832 + .maxlen = sizeof(atomic_unchecked_t),
76833 .mode = 0644,
76834 .proc_handler = read_reset_stat,
76835 },
76836 {
76837 .procname = "rdma_stat_rq_poll",
76838 .data = &rdma_stat_rq_poll,
76839 - .maxlen = sizeof(atomic_t),
76840 + .maxlen = sizeof(atomic_unchecked_t),
76841 .mode = 0644,
76842 .proc_handler = read_reset_stat,
76843 },
76844 {
76845 .procname = "rdma_stat_rq_prod",
76846 .data = &rdma_stat_rq_prod,
76847 - .maxlen = sizeof(atomic_t),
76848 + .maxlen = sizeof(atomic_unchecked_t),
76849 .mode = 0644,
76850 .proc_handler = read_reset_stat,
76851 },
76852 {
76853 .procname = "rdma_stat_sq_poll",
76854 .data = &rdma_stat_sq_poll,
76855 - .maxlen = sizeof(atomic_t),
76856 + .maxlen = sizeof(atomic_unchecked_t),
76857 .mode = 0644,
76858 .proc_handler = read_reset_stat,
76859 },
76860 {
76861 .procname = "rdma_stat_sq_prod",
76862 .data = &rdma_stat_sq_prod,
76863 - .maxlen = sizeof(atomic_t),
76864 + .maxlen = sizeof(atomic_unchecked_t),
76865 .mode = 0644,
76866 .proc_handler = read_reset_stat,
76867 },
76868 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76869 index 41cb63b..c4a1489 100644
76870 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76871 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
76872 @@ -501,7 +501,7 @@ next_sge:
76873 svc_rdma_put_context(ctxt, 0);
76874 goto out;
76875 }
76876 - atomic_inc(&rdma_stat_read);
76877 + atomic_inc_unchecked(&rdma_stat_read);
76878
76879 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
76880 chl_map->ch[ch_no].count -= read_wr.num_sge;
76881 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76882 dto_q);
76883 list_del_init(&ctxt->dto_q);
76884 } else {
76885 - atomic_inc(&rdma_stat_rq_starve);
76886 + atomic_inc_unchecked(&rdma_stat_rq_starve);
76887 clear_bit(XPT_DATA, &xprt->xpt_flags);
76888 ctxt = NULL;
76889 }
76890 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
76891 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
76892 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
76893 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
76894 - atomic_inc(&rdma_stat_recv);
76895 + atomic_inc_unchecked(&rdma_stat_recv);
76896
76897 /* Build up the XDR from the receive buffers. */
76898 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
76899 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76900 index 42eb7ba..c887c45 100644
76901 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76902 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
76903 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
76904 write_wr.wr.rdma.remote_addr = to;
76905
76906 /* Post It */
76907 - atomic_inc(&rdma_stat_write);
76908 + atomic_inc_unchecked(&rdma_stat_write);
76909 if (svc_rdma_send(xprt, &write_wr))
76910 goto err;
76911 return 0;
76912 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76913 index 73b428b..5f3f8f3 100644
76914 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
76915 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
76916 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76917 return;
76918
76919 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
76920 - atomic_inc(&rdma_stat_rq_poll);
76921 + atomic_inc_unchecked(&rdma_stat_rq_poll);
76922
76923 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
76924 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
76925 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
76926 }
76927
76928 if (ctxt)
76929 - atomic_inc(&rdma_stat_rq_prod);
76930 + atomic_inc_unchecked(&rdma_stat_rq_prod);
76931
76932 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
76933 /*
76934 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76935 return;
76936
76937 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
76938 - atomic_inc(&rdma_stat_sq_poll);
76939 + atomic_inc_unchecked(&rdma_stat_sq_poll);
76940 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
76941 if (wc.status != IB_WC_SUCCESS)
76942 /* Close the transport */
76943 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
76944 }
76945
76946 if (ctxt)
76947 - atomic_inc(&rdma_stat_sq_prod);
76948 + atomic_inc_unchecked(&rdma_stat_sq_prod);
76949 }
76950
76951 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
76952 @@ -1266,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
76953 spin_lock_bh(&xprt->sc_lock);
76954 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
76955 spin_unlock_bh(&xprt->sc_lock);
76956 - atomic_inc(&rdma_stat_sq_starve);
76957 + atomic_inc_unchecked(&rdma_stat_sq_starve);
76958
76959 /* See if we can opportunistically reap SQ WR to make room */
76960 sq_cq_reap(xprt);
76961 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
76962 index c3e65ae..f512a2b 100644
76963 --- a/net/sysctl_net.c
76964 +++ b/net/sysctl_net.c
76965 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
76966 struct ctl_table *table)
76967 {
76968 /* Allow network administrator to have same access as root. */
76969 - if (capable(CAP_NET_ADMIN)) {
76970 + if (capable_nolog(CAP_NET_ADMIN)) {
76971 int mode = (table->mode >> 6) & 7;
76972 return (mode << 6) | (mode << 3) | mode;
76973 }
76974 diff --git a/net/tipc/link.c b/net/tipc/link.c
76975 index b4b9b30..5b62131 100644
76976 --- a/net/tipc/link.c
76977 +++ b/net/tipc/link.c
76978 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
76979 struct tipc_msg fragm_hdr;
76980 struct sk_buff *buf, *buf_chain, *prev;
76981 u32 fragm_crs, fragm_rest, hsz, sect_rest;
76982 - const unchar *sect_crs;
76983 + const unchar __user *sect_crs;
76984 int curr_sect;
76985 u32 fragm_no;
76986
76987 @@ -1247,7 +1247,7 @@ again:
76988
76989 if (!sect_rest) {
76990 sect_rest = msg_sect[++curr_sect].iov_len;
76991 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
76992 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
76993 }
76994
76995 if (sect_rest < fragm_rest)
76996 @@ -1266,7 +1266,7 @@ error:
76997 }
76998 } else
76999 skb_copy_to_linear_data_offset(buf, fragm_crs,
77000 - sect_crs, sz);
77001 + (const void __force_kernel *)sect_crs, sz);
77002 sect_crs += sz;
77003 sect_rest -= sz;
77004 fragm_crs += sz;
77005 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
77006 index e3afe16..333ea83 100644
77007 --- a/net/tipc/msg.c
77008 +++ b/net/tipc/msg.c
77009 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
77010 msg_sect[cnt].iov_len);
77011 else
77012 skb_copy_to_linear_data_offset(*buf, pos,
77013 - msg_sect[cnt].iov_base,
77014 + (const void __force_kernel *)msg_sect[cnt].iov_base,
77015 msg_sect[cnt].iov_len);
77016 pos += msg_sect[cnt].iov_len;
77017 }
77018 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
77019 index b2964e9..fdf2e27 100644
77020 --- a/net/tipc/subscr.c
77021 +++ b/net/tipc/subscr.c
77022 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
77023 {
77024 struct iovec msg_sect;
77025
77026 - msg_sect.iov_base = (void *)&sub->evt;
77027 + msg_sect.iov_base = (void __force_user *)&sub->evt;
77028 msg_sect.iov_len = sizeof(struct tipc_event);
77029
77030 sub->evt.event = htohl(event, sub->swap);
77031 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
77032 index d510353..26c8a32 100644
77033 --- a/net/unix/af_unix.c
77034 +++ b/net/unix/af_unix.c
77035 @@ -779,6 +779,12 @@ static struct sock *unix_find_other(struct net *net,
77036 err = -ECONNREFUSED;
77037 if (!S_ISSOCK(inode->i_mode))
77038 goto put_fail;
77039 +
77040 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
77041 + err = -EACCES;
77042 + goto put_fail;
77043 + }
77044 +
77045 u = unix_find_socket_byinode(inode);
77046 if (!u)
77047 goto put_fail;
77048 @@ -799,6 +805,13 @@ static struct sock *unix_find_other(struct net *net,
77049 if (u) {
77050 struct dentry *dentry;
77051 dentry = unix_sk(u)->path.dentry;
77052 +
77053 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
77054 + err = -EPERM;
77055 + sock_put(u);
77056 + goto fail;
77057 + }
77058 +
77059 if (dentry)
77060 touch_atime(&unix_sk(u)->path);
77061 } else
77062 @@ -881,11 +894,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
77063 err = security_path_mknod(&path, dentry, mode, 0);
77064 if (err)
77065 goto out_mknod_drop_write;
77066 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
77067 + err = -EACCES;
77068 + goto out_mknod_drop_write;
77069 + }
77070 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
77071 out_mknod_drop_write:
77072 mnt_drop_write(path.mnt);
77073 if (err)
77074 goto out_mknod_dput;
77075 +
77076 + gr_handle_create(dentry, path.mnt);
77077 +
77078 mutex_unlock(&path.dentry->d_inode->i_mutex);
77079 dput(path.dentry);
77080 path.dentry = dentry;
77081 diff --git a/net/wireless/core.h b/net/wireless/core.h
77082 index 3ac2dd0..fbe533e 100644
77083 --- a/net/wireless/core.h
77084 +++ b/net/wireless/core.h
77085 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
77086 struct mutex mtx;
77087
77088 /* rfkill support */
77089 - struct rfkill_ops rfkill_ops;
77090 + rfkill_ops_no_const rfkill_ops;
77091 struct rfkill *rfkill;
77092 struct work_struct rfkill_sync;
77093
77094 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
77095 index af648e0..6185d3a 100644
77096 --- a/net/wireless/wext-core.c
77097 +++ b/net/wireless/wext-core.c
77098 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77099 */
77100
77101 /* Support for very large requests */
77102 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
77103 - (user_length > descr->max_tokens)) {
77104 + if (user_length > descr->max_tokens) {
77105 /* Allow userspace to GET more than max so
77106 * we can support any size GET requests.
77107 * There is still a limit : -ENOMEM.
77108 @@ -787,22 +786,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77109 }
77110 }
77111
77112 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
77113 - /*
77114 - * If this is a GET, but not NOMAX, it means that the extra
77115 - * data is not bounded by userspace, but by max_tokens. Thus
77116 - * set the length to max_tokens. This matches the extra data
77117 - * allocation.
77118 - * The driver should fill it with the number of tokens it
77119 - * provided, and it may check iwp->length rather than having
77120 - * knowledge of max_tokens. If the driver doesn't change the
77121 - * iwp->length, this ioctl just copies back max_token tokens
77122 - * filled with zeroes. Hopefully the driver isn't claiming
77123 - * them to be valid data.
77124 - */
77125 - iwp->length = descr->max_tokens;
77126 - }
77127 -
77128 err = handler(dev, info, (union iwreq_data *) iwp, extra);
77129
77130 iwp->length += essid_compat;
77131 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
77132 index 7661576..80f7627 100644
77133 --- a/net/xfrm/xfrm_policy.c
77134 +++ b/net/xfrm/xfrm_policy.c
77135 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
77136 {
77137 policy->walk.dead = 1;
77138
77139 - atomic_inc(&policy->genid);
77140 + atomic_inc_unchecked(&policy->genid);
77141
77142 if (del_timer(&policy->timer))
77143 xfrm_pol_put(policy);
77144 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
77145 hlist_add_head(&policy->bydst, chain);
77146 xfrm_pol_hold(policy);
77147 net->xfrm.policy_count[dir]++;
77148 - atomic_inc(&flow_cache_genid);
77149 + atomic_inc_unchecked(&flow_cache_genid);
77150 if (delpol)
77151 __xfrm_policy_unlink(delpol, dir);
77152 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
77153 @@ -1530,7 +1530,7 @@ free_dst:
77154 goto out;
77155 }
77156
77157 -static int inline
77158 +static inline int
77159 xfrm_dst_alloc_copy(void **target, const void *src, int size)
77160 {
77161 if (!*target) {
77162 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
77163 return 0;
77164 }
77165
77166 -static int inline
77167 +static inline int
77168 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77169 {
77170 #ifdef CONFIG_XFRM_SUB_POLICY
77171 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77172 #endif
77173 }
77174
77175 -static int inline
77176 +static inline int
77177 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
77178 {
77179 #ifdef CONFIG_XFRM_SUB_POLICY
77180 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
77181
77182 xdst->num_pols = num_pols;
77183 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
77184 - xdst->policy_genid = atomic_read(&pols[0]->genid);
77185 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
77186
77187 return xdst;
77188 }
77189 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
77190 if (xdst->xfrm_genid != dst->xfrm->genid)
77191 return 0;
77192 if (xdst->num_pols > 0 &&
77193 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
77194 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
77195 return 0;
77196
77197 mtu = dst_mtu(dst->child);
77198 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
77199 sizeof(pol->xfrm_vec[i].saddr));
77200 pol->xfrm_vec[i].encap_family = mp->new_family;
77201 /* flush bundles */
77202 - atomic_inc(&pol->genid);
77203 + atomic_inc_unchecked(&pol->genid);
77204 }
77205 }
77206
77207 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
77208 index ff1720d..d428ee7 100644
77209 --- a/scripts/Makefile.build
77210 +++ b/scripts/Makefile.build
77211 @@ -111,7 +111,7 @@ endif
77212 endif
77213
77214 # Do not include host rules unless needed
77215 -ifneq ($(hostprogs-y)$(hostprogs-m),)
77216 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
77217 include scripts/Makefile.host
77218 endif
77219
77220 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
77221 index 686cb0d..9d653bf 100644
77222 --- a/scripts/Makefile.clean
77223 +++ b/scripts/Makefile.clean
77224 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
77225 __clean-files := $(extra-y) $(always) \
77226 $(targets) $(clean-files) \
77227 $(host-progs) \
77228 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
77229 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
77230 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
77231
77232 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
77233
77234 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
77235 index 1ac414f..a1c1451 100644
77236 --- a/scripts/Makefile.host
77237 +++ b/scripts/Makefile.host
77238 @@ -31,6 +31,7 @@
77239 # Note: Shared libraries consisting of C++ files are not supported
77240
77241 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
77242 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
77243
77244 # C code
77245 # Executables compiled from a single .c file
77246 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
77247 # Shared libaries (only .c supported)
77248 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
77249 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
77250 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
77251 # Remove .so files from "xxx-objs"
77252 host-cobjs := $(filter-out %.so,$(host-cobjs))
77253
77254 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
77255 index cb1f50c..cef2a7c 100644
77256 --- a/scripts/basic/fixdep.c
77257 +++ b/scripts/basic/fixdep.c
77258 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
77259 /*
77260 * Lookup a value in the configuration string.
77261 */
77262 -static int is_defined_config(const char *name, int len, unsigned int hash)
77263 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
77264 {
77265 struct item *aux;
77266
77267 @@ -211,10 +211,10 @@ static void clear_config(void)
77268 /*
77269 * Record the use of a CONFIG_* word.
77270 */
77271 -static void use_config(const char *m, int slen)
77272 +static void use_config(const char *m, unsigned int slen)
77273 {
77274 unsigned int hash = strhash(m, slen);
77275 - int c, i;
77276 + unsigned int c, i;
77277
77278 if (is_defined_config(m, slen, hash))
77279 return;
77280 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
77281
77282 static void parse_config_file(const char *map, size_t len)
77283 {
77284 - const int *end = (const int *) (map + len);
77285 + const unsigned int *end = (const unsigned int *) (map + len);
77286 /* start at +1, so that p can never be < map */
77287 - const int *m = (const int *) map + 1;
77288 + const unsigned int *m = (const unsigned int *) map + 1;
77289 const char *p, *q;
77290
77291 for (; m < end; m++) {
77292 @@ -406,7 +406,7 @@ static void print_deps(void)
77293 static void traps(void)
77294 {
77295 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
77296 - int *p = (int *)test;
77297 + unsigned int *p = (unsigned int *)test;
77298
77299 if (*p != INT_CONF) {
77300 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
77301 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
77302 new file mode 100644
77303 index 0000000..8729101
77304 --- /dev/null
77305 +++ b/scripts/gcc-plugin.sh
77306 @@ -0,0 +1,2 @@
77307 +#!/bin/sh
77308 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
77309 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
77310 index 44ddaa5..a3119bd 100644
77311 --- a/scripts/mod/file2alias.c
77312 +++ b/scripts/mod/file2alias.c
77313 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
77314 unsigned long size, unsigned long id_size,
77315 void *symval)
77316 {
77317 - int i;
77318 + unsigned int i;
77319
77320 if (size % id_size || size < id_size) {
77321 if (cross_build != 0)
77322 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
77323 /* USB is special because the bcdDevice can be matched against a numeric range */
77324 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
77325 static void do_usb_entry(struct usb_device_id *id,
77326 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
77327 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
77328 unsigned char range_lo, unsigned char range_hi,
77329 unsigned char max, struct module *mod)
77330 {
77331 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
77332 {
77333 unsigned int devlo, devhi;
77334 unsigned char chi, clo, max;
77335 - int ndigits;
77336 + unsigned int ndigits;
77337
77338 id->match_flags = TO_NATIVE(id->match_flags);
77339 id->idVendor = TO_NATIVE(id->idVendor);
77340 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
77341 for (i = 0; i < count; i++) {
77342 const char *id = (char *)devs[i].id;
77343 char acpi_id[sizeof(devs[0].id)];
77344 - int j;
77345 + unsigned int j;
77346
77347 buf_printf(&mod->dev_table_buf,
77348 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77349 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77350
77351 for (j = 0; j < PNP_MAX_DEVICES; j++) {
77352 const char *id = (char *)card->devs[j].id;
77353 - int i2, j2;
77354 + unsigned int i2, j2;
77355 int dup = 0;
77356
77357 if (!id[0])
77358 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77359 /* add an individual alias for every device entry */
77360 if (!dup) {
77361 char acpi_id[sizeof(card->devs[0].id)];
77362 - int k;
77363 + unsigned int k;
77364
77365 buf_printf(&mod->dev_table_buf,
77366 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77367 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
77368 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
77369 char *alias)
77370 {
77371 - int i, j;
77372 + unsigned int i, j;
77373
77374 sprintf(alias, "dmi*");
77375
77376 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
77377 index c4e7d15..4241aef 100644
77378 --- a/scripts/mod/modpost.c
77379 +++ b/scripts/mod/modpost.c
77380 @@ -922,6 +922,7 @@ enum mismatch {
77381 ANY_INIT_TO_ANY_EXIT,
77382 ANY_EXIT_TO_ANY_INIT,
77383 EXPORT_TO_INIT_EXIT,
77384 + DATA_TO_TEXT
77385 };
77386
77387 struct sectioncheck {
77388 @@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[] = {
77389 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
77390 .mismatch = EXPORT_TO_INIT_EXIT,
77391 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
77392 +},
77393 +/* Do not reference code from writable data */
77394 +{
77395 + .fromsec = { DATA_SECTIONS, NULL },
77396 + .tosec = { TEXT_SECTIONS, NULL },
77397 + .mismatch = DATA_TO_TEXT
77398 }
77399 };
77400
77401 @@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
77402 continue;
77403 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
77404 continue;
77405 - if (sym->st_value == addr)
77406 - return sym;
77407 /* Find a symbol nearby - addr are maybe negative */
77408 d = sym->st_value - addr;
77409 + if (d == 0)
77410 + return sym;
77411 if (d < 0)
77412 d = addr - sym->st_value;
77413 if (d < distance) {
77414 @@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const char *modname,
77415 tosym, prl_to, prl_to, tosym);
77416 free(prl_to);
77417 break;
77418 + case DATA_TO_TEXT:
77419 +/*
77420 + fprintf(stderr,
77421 + "The variable %s references\n"
77422 + "the %s %s%s%s\n",
77423 + fromsym, to, sec2annotation(tosec), tosym, to_p);
77424 +*/
77425 + break;
77426 }
77427 fprintf(stderr, "\n");
77428 }
77429 @@ -1668,7 +1683,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
77430 static void check_sec_ref(struct module *mod, const char *modname,
77431 struct elf_info *elf)
77432 {
77433 - int i;
77434 + unsigned int i;
77435 Elf_Shdr *sechdrs = elf->sechdrs;
77436
77437 /* Walk through all sections */
77438 @@ -1766,7 +1781,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
77439 va_end(ap);
77440 }
77441
77442 -void buf_write(struct buffer *buf, const char *s, int len)
77443 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
77444 {
77445 if (buf->size - buf->pos < len) {
77446 buf->size += len + SZ;
77447 @@ -1984,7 +1999,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
77448 if (fstat(fileno(file), &st) < 0)
77449 goto close_write;
77450
77451 - if (st.st_size != b->pos)
77452 + if (st.st_size != (off_t)b->pos)
77453 goto close_write;
77454
77455 tmp = NOFAIL(malloc(b->pos));
77456 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
77457 index 51207e4..f7d603d 100644
77458 --- a/scripts/mod/modpost.h
77459 +++ b/scripts/mod/modpost.h
77460 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
77461
77462 struct buffer {
77463 char *p;
77464 - int pos;
77465 - int size;
77466 + unsigned int pos;
77467 + unsigned int size;
77468 };
77469
77470 void __attribute__((format(printf, 2, 3)))
77471 buf_printf(struct buffer *buf, const char *fmt, ...);
77472
77473 void
77474 -buf_write(struct buffer *buf, const char *s, int len);
77475 +buf_write(struct buffer *buf, const char *s, unsigned int len);
77476
77477 struct module {
77478 struct module *next;
77479 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
77480 index 9dfcd6d..099068e 100644
77481 --- a/scripts/mod/sumversion.c
77482 +++ b/scripts/mod/sumversion.c
77483 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
77484 goto out;
77485 }
77486
77487 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
77488 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
77489 warn("writing sum in %s failed: %s\n",
77490 filename, strerror(errno));
77491 goto out;
77492 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
77493 index 5c11312..72742b5 100644
77494 --- a/scripts/pnmtologo.c
77495 +++ b/scripts/pnmtologo.c
77496 @@ -237,14 +237,14 @@ static void write_header(void)
77497 fprintf(out, " * Linux logo %s\n", logoname);
77498 fputs(" */\n\n", out);
77499 fputs("#include <linux/linux_logo.h>\n\n", out);
77500 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
77501 + fprintf(out, "static unsigned char %s_data[] = {\n",
77502 logoname);
77503 }
77504
77505 static void write_footer(void)
77506 {
77507 fputs("\n};\n\n", out);
77508 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
77509 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
77510 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
77511 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
77512 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
77513 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
77514 fputs("\n};\n\n", out);
77515
77516 /* write logo clut */
77517 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
77518 + fprintf(out, "static unsigned char %s_clut[] = {\n",
77519 logoname);
77520 write_hex_cnt = 0;
77521 for (i = 0; i < logo_clutsize; i++) {
77522 diff --git a/security/Kconfig b/security/Kconfig
77523 index ccc61f8..5effdb4 100644
77524 --- a/security/Kconfig
77525 +++ b/security/Kconfig
77526 @@ -4,6 +4,640 @@
77527
77528 menu "Security options"
77529
77530 +source grsecurity/Kconfig
77531 +
77532 +menu "PaX"
77533 +
77534 + config ARCH_TRACK_EXEC_LIMIT
77535 + bool
77536 +
77537 + config PAX_KERNEXEC_PLUGIN
77538 + bool
77539 +
77540 + config PAX_PER_CPU_PGD
77541 + bool
77542 +
77543 + config TASK_SIZE_MAX_SHIFT
77544 + int
77545 + depends on X86_64
77546 + default 47 if !PAX_PER_CPU_PGD
77547 + default 42 if PAX_PER_CPU_PGD
77548 +
77549 + config PAX_ENABLE_PAE
77550 + bool
77551 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
77552 +
77553 +config PAX
77554 + bool "Enable various PaX features"
77555 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
77556 + help
77557 + This allows you to enable various PaX features. PaX adds
77558 + intrusion prevention mechanisms to the kernel that reduce
77559 + the risks posed by exploitable memory corruption bugs.
77560 +
77561 +menu "PaX Control"
77562 + depends on PAX
77563 +
77564 +config PAX_SOFTMODE
77565 + bool 'Support soft mode'
77566 + help
77567 + Enabling this option will allow you to run PaX in soft mode, that
77568 + is, PaX features will not be enforced by default, only on executables
77569 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
77570 + support as they are the only way to mark executables for soft mode use.
77571 +
77572 + Soft mode can be activated by using the "pax_softmode=1" kernel command
77573 + line option on boot. Furthermore you can control various PaX features
77574 + at runtime via the entries in /proc/sys/kernel/pax.
77575 +
77576 +config PAX_EI_PAX
77577 + bool 'Use legacy ELF header marking'
77578 + help
77579 + Enabling this option will allow you to control PaX features on
77580 + a per executable basis via the 'chpax' utility available at
77581 + http://pax.grsecurity.net/. The control flags will be read from
77582 + an otherwise reserved part of the ELF header. This marking has
77583 + numerous drawbacks (no support for soft-mode, toolchain does not
77584 + know about the non-standard use of the ELF header) therefore it
77585 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
77586 + support.
77587 +
77588 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77589 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
77590 + option otherwise they will not get any protection.
77591 +
77592 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
77593 + support as well, they will override the legacy EI_PAX marks.
77594 +
77595 +config PAX_PT_PAX_FLAGS
77596 + bool 'Use ELF program header marking'
77597 + help
77598 + Enabling this option will allow you to control PaX features on
77599 + a per executable basis via the 'paxctl' utility available at
77600 + http://pax.grsecurity.net/. The control flags will be read from
77601 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
77602 + has the benefits of supporting both soft mode and being fully
77603 + integrated into the toolchain (the binutils patch is available
77604 + from http://pax.grsecurity.net).
77605 +
77606 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77607 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77608 + support otherwise they will not get any protection.
77609 +
77610 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77611 + must make sure that the marks are the same if a binary has both marks.
77612 +
77613 + Note that if you enable the legacy EI_PAX marking support as well,
77614 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
77615 +
77616 +config PAX_XATTR_PAX_FLAGS
77617 + bool 'Use filesystem extended attributes marking'
77618 + select CIFS_XATTR if CIFS
77619 + select EXT2_FS_XATTR if EXT2_FS
77620 + select EXT3_FS_XATTR if EXT3_FS
77621 + select EXT4_FS_XATTR if EXT4_FS
77622 + select JFFS2_FS_XATTR if JFFS2_FS
77623 + select REISERFS_FS_XATTR if REISERFS_FS
77624 + select SQUASHFS_XATTR if SQUASHFS
77625 + select TMPFS_XATTR if TMPFS
77626 + select UBIFS_FS_XATTR if UBIFS_FS
77627 + help
77628 + Enabling this option will allow you to control PaX features on
77629 + a per executable basis via the 'setfattr' utility. The control
77630 + flags will be read from the user.pax.flags extended attribute of
77631 + the file. This marking has the benefit of supporting binary-only
77632 + applications that self-check themselves (e.g., skype) and would
77633 + not tolerate chpax/paxctl changes. The main drawback is that
77634 + extended attributes are not supported by some filesystems (e.g.,
77635 + isofs, udf, vfat) so copying files through such filesystems will
77636 + lose the extended attributes and these PaX markings.
77637 +
77638 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77639 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77640 + support otherwise they will not get any protection.
77641 +
77642 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77643 + must make sure that the marks are the same if a binary has both marks.
77644 +
77645 + Note that if you enable the legacy EI_PAX marking support as well,
77646 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
77647 +
77648 +choice
77649 + prompt 'MAC system integration'
77650 + default PAX_HAVE_ACL_FLAGS
77651 + help
77652 + Mandatory Access Control systems have the option of controlling
77653 + PaX flags on a per executable basis, choose the method supported
77654 + by your particular system.
77655 +
77656 + - "none": if your MAC system does not interact with PaX,
77657 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
77658 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
77659 +
77660 + NOTE: this option is for developers/integrators only.
77661 +
77662 + config PAX_NO_ACL_FLAGS
77663 + bool 'none'
77664 +
77665 + config PAX_HAVE_ACL_FLAGS
77666 + bool 'direct'
77667 +
77668 + config PAX_HOOK_ACL_FLAGS
77669 + bool 'hook'
77670 +endchoice
77671 +
77672 +endmenu
77673 +
77674 +menu "Non-executable pages"
77675 + depends on PAX
77676 +
77677 +config PAX_NOEXEC
77678 + bool "Enforce non-executable pages"
77679 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
77680 + help
77681 + By design some architectures do not allow for protecting memory
77682 + pages against execution or even if they do, Linux does not make
77683 + use of this feature. In practice this means that if a page is
77684 + readable (such as the stack or heap) it is also executable.
77685 +
77686 + There is a well known exploit technique that makes use of this
77687 + fact and a common programming mistake where an attacker can
77688 + introduce code of his choice somewhere in the attacked program's
77689 + memory (typically the stack or the heap) and then execute it.
77690 +
77691 + If the attacked program was running with different (typically
77692 + higher) privileges than that of the attacker, then he can elevate
77693 + his own privilege level (e.g. get a root shell, write to files for
77694 + which he does not have write access to, etc).
77695 +
77696 + Enabling this option will let you choose from various features
77697 + that prevent the injection and execution of 'foreign' code in
77698 + a program.
77699 +
77700 + This will also break programs that rely on the old behaviour and
77701 + expect that dynamically allocated memory via the malloc() family
77702 + of functions is executable (which it is not). Notable examples
77703 + are the XFree86 4.x server, the java runtime and wine.
77704 +
77705 +config PAX_PAGEEXEC
77706 + bool "Paging based non-executable pages"
77707 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
77708 + select S390_SWITCH_AMODE if S390
77709 + select S390_EXEC_PROTECT if S390
77710 + select ARCH_TRACK_EXEC_LIMIT if X86_32
77711 + help
77712 + This implementation is based on the paging feature of the CPU.
77713 + On i386 without hardware non-executable bit support there is a
77714 + variable but usually low performance impact, however on Intel's
77715 + P4 core based CPUs it is very high so you should not enable this
77716 + for kernels meant to be used on such CPUs.
77717 +
77718 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
77719 + with hardware non-executable bit support there is no performance
77720 + impact, on ppc the impact is negligible.
77721 +
77722 + Note that several architectures require various emulations due to
77723 + badly designed userland ABIs, this will cause a performance impact
77724 + but will disappear as soon as userland is fixed. For example, ppc
77725 + userland MUST have been built with secure-plt by a recent toolchain.
77726 +
77727 +config PAX_SEGMEXEC
77728 + bool "Segmentation based non-executable pages"
77729 + depends on PAX_NOEXEC && X86_32
77730 + help
77731 + This implementation is based on the segmentation feature of the
77732 + CPU and has a very small performance impact, however applications
77733 + will be limited to a 1.5 GB address space instead of the normal
77734 + 3 GB.
77735 +
77736 +config PAX_EMUTRAMP
77737 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
77738 + default y if PARISC
77739 + help
77740 + There are some programs and libraries that for one reason or
77741 + another attempt to execute special small code snippets from
77742 + non-executable memory pages. Most notable examples are the
77743 + signal handler return code generated by the kernel itself and
77744 + the GCC trampolines.
77745 +
77746 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
77747 + such programs will no longer work under your kernel.
77748 +
77749 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
77750 + utilities to enable trampoline emulation for the affected programs
77751 + yet still have the protection provided by the non-executable pages.
77752 +
77753 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
77754 + your system will not even boot.
77755 +
77756 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
77757 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
77758 + for the affected files.
77759 +
77760 + NOTE: enabling this feature *may* open up a loophole in the
77761 + protection provided by non-executable pages that an attacker
77762 + could abuse. Therefore the best solution is to not have any
77763 + files on your system that would require this option. This can
77764 + be achieved by not using libc5 (which relies on the kernel
77765 + signal handler return code) and not using or rewriting programs
77766 + that make use of the nested function implementation of GCC.
77767 + Skilled users can just fix GCC itself so that it implements
77768 + nested function calls in a way that does not interfere with PaX.
77769 +
77770 +config PAX_EMUSIGRT
77771 + bool "Automatically emulate sigreturn trampolines"
77772 + depends on PAX_EMUTRAMP && PARISC
77773 + default y
77774 + help
77775 + Enabling this option will have the kernel automatically detect
77776 + and emulate signal return trampolines executing on the stack
77777 + that would otherwise lead to task termination.
77778 +
77779 + This solution is intended as a temporary one for users with
77780 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
77781 + Modula-3 runtime, etc) or executables linked to such, basically
77782 + everything that does not specify its own SA_RESTORER function in
77783 + normal executable memory like glibc 2.1+ does.
77784 +
77785 + On parisc you MUST enable this option, otherwise your system will
77786 + not even boot.
77787 +
77788 + NOTE: this feature cannot be disabled on a per executable basis
77789 + and since it *does* open up a loophole in the protection provided
77790 + by non-executable pages, the best solution is to not have any
77791 + files on your system that would require this option.
77792 +
77793 +config PAX_MPROTECT
77794 + bool "Restrict mprotect()"
77795 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
77796 + help
77797 + Enabling this option will prevent programs from
77798 + - changing the executable status of memory pages that were
77799 + not originally created as executable,
77800 + - making read-only executable pages writable again,
77801 + - creating executable pages from anonymous memory,
77802 + - making read-only-after-relocations (RELRO) data pages writable again.
77803 +
77804 + You should say Y here to complete the protection provided by
77805 + the enforcement of non-executable pages.
77806 +
77807 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77808 + this feature on a per file basis.
77809 +
77810 +config PAX_MPROTECT_COMPAT
77811 + bool "Use legacy/compat protection demoting (read help)"
77812 + depends on PAX_MPROTECT
77813 + default n
77814 + help
77815 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
77816 + by sending the proper error code to the application. For some broken
77817 + userland, this can cause problems with Python or other applications. The
77818 + current implementation however allows for applications like clamav to
77819 + detect if JIT compilation/execution is allowed and to fall back gracefully
77820 + to an interpreter-based mode if it does not. While we encourage everyone
77821 + to use the current implementation as-is and push upstream to fix broken
77822 + userland (note that the RWX logging option can assist with this), in some
77823 + environments this may not be possible. Having to disable MPROTECT
77824 + completely on certain binaries reduces the security benefit of PaX,
77825 + so this option is provided for those environments to revert to the old
77826 + behavior.
77827 +
77828 +config PAX_ELFRELOCS
77829 + bool "Allow ELF text relocations (read help)"
77830 + depends on PAX_MPROTECT
77831 + default n
77832 + help
77833 + Non-executable pages and mprotect() restrictions are effective
77834 + in preventing the introduction of new executable code into an
77835 + attacked task's address space. There remain only two venues
77836 + for this kind of attack: if the attacker can execute already
77837 + existing code in the attacked task then he can either have it
77838 + create and mmap() a file containing his code or have it mmap()
77839 + an already existing ELF library that does not have position
77840 + independent code in it and use mprotect() on it to make it
77841 + writable and copy his code there. While protecting against
77842 + the former approach is beyond PaX, the latter can be prevented
77843 + by having only PIC ELF libraries on one's system (which do not
77844 + need to relocate their code). If you are sure this is your case,
77845 + as is the case with all modern Linux distributions, then leave
77846 + this option disabled. You should say 'n' here.
77847 +
77848 +config PAX_ETEXECRELOCS
77849 + bool "Allow ELF ET_EXEC text relocations"
77850 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
77851 + select PAX_ELFRELOCS
77852 + default y
77853 + help
77854 + On some architectures there are incorrectly created applications
77855 + that require text relocations and would not work without enabling
77856 + this option. If you are an alpha, ia64 or parisc user, you should
77857 + enable this option and disable it once you have made sure that
77858 + none of your applications need it.
77859 +
77860 +config PAX_EMUPLT
77861 + bool "Automatically emulate ELF PLT"
77862 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
77863 + default y
77864 + help
77865 + Enabling this option will have the kernel automatically detect
77866 + and emulate the Procedure Linkage Table entries in ELF files.
77867 + On some architectures such entries are in writable memory, and
77868 + become non-executable leading to task termination. Therefore
77869 + it is mandatory that you enable this option on alpha, parisc,
77870 + sparc and sparc64, otherwise your system would not even boot.
77871 +
77872 + NOTE: this feature *does* open up a loophole in the protection
77873 + provided by the non-executable pages, therefore the proper
77874 + solution is to modify the toolchain to produce a PLT that does
77875 + not need to be writable.
77876 +
77877 +config PAX_DLRESOLVE
77878 + bool 'Emulate old glibc resolver stub'
77879 + depends on PAX_EMUPLT && SPARC
77880 + default n
77881 + help
77882 + This option is needed if userland has an old glibc (before 2.4)
77883 + that puts a 'save' instruction into the runtime generated resolver
77884 + stub that needs special emulation.
77885 +
77886 +config PAX_KERNEXEC
77887 + bool "Enforce non-executable kernel pages"
77888 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
77889 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
77890 + select PAX_KERNEXEC_PLUGIN if X86_64
77891 + help
77892 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
77893 + that is, enabling this option will make it harder to inject
77894 + and execute 'foreign' code in kernel memory itself.
77895 +
77896 + Note that on x86_64 kernels there is a known regression when
77897 + this feature and KVM/VMX are both enabled in the host kernel.
77898 +
77899 +choice
77900 + prompt "Return Address Instrumentation Method"
77901 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
77902 + depends on PAX_KERNEXEC_PLUGIN
77903 + help
77904 + Select the method used to instrument function pointer dereferences.
77905 + Note that binary modules cannot be instrumented by this approach.
77906 +
77907 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
77908 + bool "bts"
77909 + help
77910 + This method is compatible with binary only modules but has
77911 + a higher runtime overhead.
77912 +
77913 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
77914 + bool "or"
77915 + depends on !PARAVIRT
77916 + help
77917 + This method is incompatible with binary only modules but has
77918 + a lower runtime overhead.
77919 +endchoice
77920 +
77921 +config PAX_KERNEXEC_PLUGIN_METHOD
77922 + string
77923 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
77924 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
77925 + default ""
77926 +
77927 +config PAX_KERNEXEC_MODULE_TEXT
77928 + int "Minimum amount of memory reserved for module code"
77929 + default "4"
77930 + depends on PAX_KERNEXEC && X86_32 && MODULES
77931 + help
77932 + Due to implementation details the kernel must reserve a fixed
77933 + amount of memory for module code at compile time that cannot be
77934 + changed at runtime. Here you can specify the minimum amount
77935 + in MB that will be reserved. Due to the same implementation
77936 + details this size will always be rounded up to the next 2/4 MB
77937 + boundary (depends on PAE) so the actually available memory for
77938 + module code will usually be more than this minimum.
77939 +
77940 + The default 4 MB should be enough for most users but if you have
77941 + an excessive number of modules (e.g., most distribution configs
77942 + compile many drivers as modules) or use huge modules such as
77943 + nvidia's kernel driver, you will need to adjust this amount.
77944 + A good rule of thumb is to look at your currently loaded kernel
77945 + modules and add up their sizes.
77946 +
77947 +endmenu
77948 +
77949 +menu "Address Space Layout Randomization"
77950 + depends on PAX
77951 +
77952 +config PAX_ASLR
77953 + bool "Address Space Layout Randomization"
77954 + help
77955 + Many if not most exploit techniques rely on the knowledge of
77956 + certain addresses in the attacked program. The following options
77957 + will allow the kernel to apply a certain amount of randomization
77958 + to specific parts of the program thereby forcing an attacker to
77959 + guess them in most cases. Any failed guess will most likely crash
77960 + the attacked program which allows the kernel to detect such attempts
77961 + and react on them. PaX itself provides no reaction mechanisms,
77962 + instead it is strongly encouraged that you make use of Nergal's
77963 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
77964 + (http://www.grsecurity.net/) built-in crash detection features or
77965 + develop one yourself.
77966 +
77967 + By saying Y here you can choose to randomize the following areas:
77968 + - top of the task's kernel stack
77969 + - top of the task's userland stack
77970 + - base address for mmap() requests that do not specify one
77971 + (this includes all libraries)
77972 + - base address of the main executable
77973 +
77974 + It is strongly recommended to say Y here as address space layout
77975 + randomization has negligible impact on performance yet it provides
77976 + a very effective protection.
77977 +
77978 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77979 + this feature on a per file basis.
77980 +
77981 +config PAX_RANDKSTACK
77982 + bool "Randomize kernel stack base"
77983 + depends on X86_TSC && X86
77984 + help
77985 + By saying Y here the kernel will randomize every task's kernel
77986 + stack on every system call. This will not only force an attacker
77987 + to guess it but also prevent him from making use of possible
77988 + leaked information about it.
77989 +
77990 + Since the kernel stack is a rather scarce resource, randomization
77991 + may cause unexpected stack overflows, therefore you should very
77992 + carefully test your system. Note that once enabled in the kernel
77993 + configuration, this feature cannot be disabled on a per file basis.
77994 +
77995 +config PAX_RANDUSTACK
77996 + bool "Randomize user stack base"
77997 + depends on PAX_ASLR
77998 + help
77999 + By saying Y here the kernel will randomize every task's userland
78000 + stack. The randomization is done in two steps where the second
78001 + one may apply a big amount of shift to the top of the stack and
78002 + cause problems for programs that want to use lots of memory (more
78003 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
78004 + For this reason the second step can be controlled by 'chpax' or
78005 + 'paxctl' on a per file basis.
78006 +
78007 +config PAX_RANDMMAP
78008 + bool "Randomize mmap() base"
78009 + depends on PAX_ASLR
78010 + help
78011 + By saying Y here the kernel will use a randomized base address for
78012 + mmap() requests that do not specify one themselves. As a result
78013 + all dynamically loaded libraries will appear at random addresses
78014 + and therefore be harder to exploit by a technique where an attacker
78015 + attempts to execute library code for his purposes (e.g. spawn a
78016 + shell from an exploited program that is running at an elevated
78017 + privilege level).
78018 +
78019 + Furthermore, if a program is relinked as a dynamic ELF file, its
78020 + base address will be randomized as well, completing the full
78021 + randomization of the address space layout. Attacking such programs
78022 + becomes a guess game. You can find an example of doing this at
78023 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
78024 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
78025 +
78026 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
78027 + feature on a per file basis.
78028 +
78029 +endmenu
78030 +
78031 +menu "Miscellaneous hardening features"
78032 +
78033 +config PAX_MEMORY_SANITIZE
78034 + bool "Sanitize all freed memory"
78035 + depends on !HIBERNATION
78036 + help
78037 + By saying Y here the kernel will erase memory pages as soon as they
78038 + are freed. This in turn reduces the lifetime of data stored in the
78039 + pages, making it less likely that sensitive information such as
78040 + passwords, cryptographic secrets, etc stay in memory for too long.
78041 +
78042 + This is especially useful for programs whose runtime is short, long
78043 + lived processes and the kernel itself benefit from this as long as
78044 + they operate on whole memory pages and ensure timely freeing of pages
78045 + that may hold sensitive information.
78046 +
78047 + The tradeoff is performance impact, on a single CPU system kernel
78048 + compilation sees a 3% slowdown, other systems and workloads may vary
78049 + and you are advised to test this feature on your expected workload
78050 + before deploying it.
78051 +
78052 + Note that this feature does not protect data stored in live pages,
78053 + e.g., process memory swapped to disk may stay there for a long time.
78054 +
78055 +config PAX_MEMORY_STACKLEAK
78056 + bool "Sanitize kernel stack"
78057 + depends on X86
78058 + help
78059 + By saying Y here the kernel will erase the kernel stack before it
78060 + returns from a system call. This in turn reduces the information
78061 + that a kernel stack leak bug can reveal.
78062 +
78063 + Note that such a bug can still leak information that was put on
78064 + the stack by the current system call (the one eventually triggering
78065 + the bug) but traces of earlier system calls on the kernel stack
78066 + cannot leak anymore.
78067 +
78068 + The tradeoff is performance impact: on a single CPU system kernel
78069 + compilation sees a 1% slowdown, other systems and workloads may vary
78070 + and you are advised to test this feature on your expected workload
78071 + before deploying it.
78072 +
78073 + Note: full support for this feature requires gcc with plugin support
78074 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
78075 + versions means that functions with large enough stack frames may
78076 + leave uninitialized memory behind that may be exposed to a later
78077 + syscall leaking the stack.
78078 +
78079 +config PAX_MEMORY_UDEREF
78080 + bool "Prevent invalid userland pointer dereference"
78081 + depends on X86 && !UML_X86 && !XEN
78082 + select PAX_PER_CPU_PGD if X86_64
78083 + help
78084 + By saying Y here the kernel will be prevented from dereferencing
78085 + userland pointers in contexts where the kernel expects only kernel
78086 + pointers. This is both a useful runtime debugging feature and a
78087 + security measure that prevents exploiting a class of kernel bugs.
78088 +
78089 + The tradeoff is that some virtualization solutions may experience
78090 + a huge slowdown and therefore you should not enable this feature
78091 + for kernels meant to run in such environments. Whether a given VM
78092 + solution is affected or not is best determined by simply trying it
78093 + out, the performance impact will be obvious right on boot as this
78094 + mechanism engages from very early on. A good rule of thumb is that
78095 + VMs running on CPUs without hardware virtualization support (i.e.,
78096 + the majority of IA-32 CPUs) will likely experience the slowdown.
78097 +
78098 +config PAX_REFCOUNT
78099 + bool "Prevent various kernel object reference counter overflows"
78100 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
78101 + help
78102 + By saying Y here the kernel will detect and prevent overflowing
78103 + various (but not all) kinds of object reference counters. Such
78104 + overflows can normally occur due to bugs only and are often, if
78105 + not always, exploitable.
78106 +
78107 + The tradeoff is that data structures protected by an overflowed
78108 + refcount will never be freed and therefore will leak memory. Note
78109 + that this leak also happens even without this protection but in
78110 + that case the overflow can eventually trigger the freeing of the
78111 + data structure while it is still being used elsewhere, resulting
78112 + in the exploitable situation that this feature prevents.
78113 +
78114 + Since this has a negligible performance impact, you should enable
78115 + this feature.
78116 +
78117 +config PAX_USERCOPY
78118 + bool "Harden heap object copies between kernel and userland"
78119 + depends on X86 || PPC || SPARC || ARM
78120 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
78121 + help
78122 + By saying Y here the kernel will enforce the size of heap objects
78123 + when they are copied in either direction between the kernel and
78124 + userland, even if only a part of the heap object is copied.
78125 +
78126 + Specifically, this checking prevents information leaking from the
78127 + kernel heap during kernel to userland copies (if the kernel heap
78128 + object is otherwise fully initialized) and prevents kernel heap
78129 + overflows during userland to kernel copies.
78130 +
78131 + Note that the current implementation provides the strictest bounds
78132 + checks for the SLUB allocator.
78133 +
78134 + Enabling this option also enables per-slab cache protection against
78135 + data in a given cache being copied into/out of via userland
78136 + accessors. Though the whitelist of regions will be reduced over
78137 + time, it notably protects important data structures like task structs.
78138 +
78139 + If frame pointers are enabled on x86, this option will also restrict
78140 + copies into and out of the kernel stack to local variables within a
78141 + single frame.
78142 +
78143 + Since this has a negligible performance impact, you should enable
78144 + this feature.
78145 +
78146 +config PAX_SIZE_OVERFLOW
78147 + bool "Prevent various integer overflows in function size parameters"
78148 + depends on X86
78149 + help
78150 + By saying Y here the kernel recomputes expressions of function
78151 + arguments marked by a size_overflow attribute with double integer
78152 + precision (DImode/TImode for 32/64 bit integer types).
78153 +
78154 + The recomputed argument is checked against INT_MAX and an event
78155 + is logged on overflow and the triggering process is killed.
78156 +
78157 + Homepage:
78158 + http://www.grsecurity.net/~ephox/overflow_plugin/
78159 +
78160 +endmenu
78161 +
78162 +endmenu
78163 +
78164 config KEYS
78165 bool "Enable access key retention support"
78166 help
78167 @@ -169,7 +803,7 @@ config INTEL_TXT
78168 config LSM_MMAP_MIN_ADDR
78169 int "Low address space for LSM to protect from user allocation"
78170 depends on SECURITY && SECURITY_SELINUX
78171 - default 32768 if ARM
78172 + default 32768 if ALPHA || ARM || PARISC || SPARC32
78173 default 65536
78174 help
78175 This is the portion of low virtual memory which should be protected
78176 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
78177 index ad05d39..afffccb 100644
78178 --- a/security/apparmor/lsm.c
78179 +++ b/security/apparmor/lsm.c
78180 @@ -622,7 +622,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
78181 return error;
78182 }
78183
78184 -static struct security_operations apparmor_ops = {
78185 +static struct security_operations apparmor_ops __read_only = {
78186 .name = "apparmor",
78187
78188 .ptrace_access_check = apparmor_ptrace_access_check,
78189 diff --git a/security/commoncap.c b/security/commoncap.c
78190 index 71a166a..851bb3e 100644
78191 --- a/security/commoncap.c
78192 +++ b/security/commoncap.c
78193 @@ -576,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
78194 {
78195 const struct cred *cred = current_cred();
78196
78197 + if (gr_acl_enable_at_secure())
78198 + return 1;
78199 +
78200 if (cred->uid != 0) {
78201 if (bprm->cap_effective)
78202 return 1;
78203 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
78204 index 3ccf7ac..d73ad64 100644
78205 --- a/security/integrity/ima/ima.h
78206 +++ b/security/integrity/ima/ima.h
78207 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78208 extern spinlock_t ima_queue_lock;
78209
78210 struct ima_h_table {
78211 - atomic_long_t len; /* number of stored measurements in the list */
78212 - atomic_long_t violations;
78213 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
78214 + atomic_long_unchecked_t violations;
78215 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
78216 };
78217 extern struct ima_h_table ima_htable;
78218 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
78219 index 88a2788..581ab92 100644
78220 --- a/security/integrity/ima/ima_api.c
78221 +++ b/security/integrity/ima/ima_api.c
78222 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78223 int result;
78224
78225 /* can overflow, only indicator */
78226 - atomic_long_inc(&ima_htable.violations);
78227 + atomic_long_inc_unchecked(&ima_htable.violations);
78228
78229 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
78230 if (!entry) {
78231 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
78232 index e1aa2b4..52027bf 100644
78233 --- a/security/integrity/ima/ima_fs.c
78234 +++ b/security/integrity/ima/ima_fs.c
78235 @@ -28,12 +28,12 @@
78236 static int valid_policy = 1;
78237 #define TMPBUFLEN 12
78238 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
78239 - loff_t *ppos, atomic_long_t *val)
78240 + loff_t *ppos, atomic_long_unchecked_t *val)
78241 {
78242 char tmpbuf[TMPBUFLEN];
78243 ssize_t len;
78244
78245 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
78246 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
78247 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
78248 }
78249
78250 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
78251 index 55a6271..ad829c3 100644
78252 --- a/security/integrity/ima/ima_queue.c
78253 +++ b/security/integrity/ima/ima_queue.c
78254 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
78255 INIT_LIST_HEAD(&qe->later);
78256 list_add_tail_rcu(&qe->later, &ima_measurements);
78257
78258 - atomic_long_inc(&ima_htable.len);
78259 + atomic_long_inc_unchecked(&ima_htable.len);
78260 key = ima_hash_key(entry->digest);
78261 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
78262 return 0;
78263 diff --git a/security/keys/compat.c b/security/keys/compat.c
78264 index 4c48e13..7abdac9 100644
78265 --- a/security/keys/compat.c
78266 +++ b/security/keys/compat.c
78267 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
78268 if (ret == 0)
78269 goto no_payload_free;
78270
78271 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78272 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78273
78274 if (iov != iovstack)
78275 kfree(iov);
78276 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
78277 index fb767c6..b9c49c0 100644
78278 --- a/security/keys/keyctl.c
78279 +++ b/security/keys/keyctl.c
78280 @@ -935,7 +935,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
78281 /*
78282 * Copy the iovec data from userspace
78283 */
78284 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78285 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
78286 unsigned ioc)
78287 {
78288 for (; ioc > 0; ioc--) {
78289 @@ -957,7 +957,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78290 * If successful, 0 will be returned.
78291 */
78292 long keyctl_instantiate_key_common(key_serial_t id,
78293 - const struct iovec *payload_iov,
78294 + const struct iovec __user *payload_iov,
78295 unsigned ioc,
78296 size_t plen,
78297 key_serial_t ringid)
78298 @@ -1052,7 +1052,7 @@ long keyctl_instantiate_key(key_serial_t id,
78299 [0].iov_len = plen
78300 };
78301
78302 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
78303 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
78304 }
78305
78306 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
78307 @@ -1085,7 +1085,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
78308 if (ret == 0)
78309 goto no_payload_free;
78310
78311 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78312 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78313
78314 if (iov != iovstack)
78315 kfree(iov);
78316 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
78317 index d605f75..2bc6be9 100644
78318 --- a/security/keys/keyring.c
78319 +++ b/security/keys/keyring.c
78320 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
78321 ret = -EFAULT;
78322
78323 for (loop = 0; loop < klist->nkeys; loop++) {
78324 + key_serial_t serial;
78325 key = klist->keys[loop];
78326 + serial = key->serial;
78327
78328 tmp = sizeof(key_serial_t);
78329 if (tmp > buflen)
78330 tmp = buflen;
78331
78332 - if (copy_to_user(buffer,
78333 - &key->serial,
78334 - tmp) != 0)
78335 + if (copy_to_user(buffer, &serial, tmp))
78336 goto error;
78337
78338 buflen -= tmp;
78339 diff --git a/security/min_addr.c b/security/min_addr.c
78340 index f728728..6457a0c 100644
78341 --- a/security/min_addr.c
78342 +++ b/security/min_addr.c
78343 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
78344 */
78345 static void update_mmap_min_addr(void)
78346 {
78347 +#ifndef SPARC
78348 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
78349 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
78350 mmap_min_addr = dac_mmap_min_addr;
78351 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
78352 #else
78353 mmap_min_addr = dac_mmap_min_addr;
78354 #endif
78355 +#endif
78356 }
78357
78358 /*
78359 diff --git a/security/security.c b/security/security.c
78360 index bf619ff..8179030 100644
78361 --- a/security/security.c
78362 +++ b/security/security.c
78363 @@ -20,6 +20,7 @@
78364 #include <linux/ima.h>
78365 #include <linux/evm.h>
78366 #include <linux/fsnotify.h>
78367 +#include <linux/mm.h>
78368 #include <net/flow.h>
78369
78370 #define MAX_LSM_EVM_XATTR 2
78371 @@ -28,8 +29,8 @@
78372 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
78373 CONFIG_DEFAULT_SECURITY;
78374
78375 -static struct security_operations *security_ops;
78376 -static struct security_operations default_security_ops = {
78377 +static struct security_operations *security_ops __read_only;
78378 +static struct security_operations default_security_ops __read_only = {
78379 .name = "default",
78380 };
78381
78382 @@ -70,7 +71,9 @@ int __init security_init(void)
78383
78384 void reset_security_ops(void)
78385 {
78386 + pax_open_kernel();
78387 security_ops = &default_security_ops;
78388 + pax_close_kernel();
78389 }
78390
78391 /* Save user chosen LSM */
78392 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
78393 index d85b793..a164832 100644
78394 --- a/security/selinux/hooks.c
78395 +++ b/security/selinux/hooks.c
78396 @@ -95,8 +95,6 @@
78397
78398 #define NUM_SEL_MNT_OPTS 5
78399
78400 -extern struct security_operations *security_ops;
78401 -
78402 /* SECMARK reference count */
78403 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
78404
78405 @@ -5520,7 +5518,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
78406
78407 #endif
78408
78409 -static struct security_operations selinux_ops = {
78410 +static struct security_operations selinux_ops __read_only = {
78411 .name = "selinux",
78412
78413 .ptrace_access_check = selinux_ptrace_access_check,
78414 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
78415 index c220f31..89fab3f 100644
78416 --- a/security/selinux/include/xfrm.h
78417 +++ b/security/selinux/include/xfrm.h
78418 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
78419
78420 static inline void selinux_xfrm_notify_policyload(void)
78421 {
78422 - atomic_inc(&flow_cache_genid);
78423 + atomic_inc_unchecked(&flow_cache_genid);
78424 }
78425 #else
78426 static inline int selinux_xfrm_enabled(void)
78427 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
78428 index 45c32f0..0038be2 100644
78429 --- a/security/smack/smack_lsm.c
78430 +++ b/security/smack/smack_lsm.c
78431 @@ -3500,7 +3500,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
78432 return 0;
78433 }
78434
78435 -struct security_operations smack_ops = {
78436 +struct security_operations smack_ops __read_only = {
78437 .name = "smack",
78438
78439 .ptrace_access_check = smack_ptrace_access_check,
78440 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
78441 index 620d37c..e2ad89b 100644
78442 --- a/security/tomoyo/tomoyo.c
78443 +++ b/security/tomoyo/tomoyo.c
78444 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
78445 * tomoyo_security_ops is a "struct security_operations" which is used for
78446 * registering TOMOYO.
78447 */
78448 -static struct security_operations tomoyo_security_ops = {
78449 +static struct security_operations tomoyo_security_ops __read_only = {
78450 .name = "tomoyo",
78451 .cred_alloc_blank = tomoyo_cred_alloc_blank,
78452 .cred_prepare = tomoyo_cred_prepare,
78453 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
78454 index 51d6709..1f3dbe2 100644
78455 --- a/security/yama/Kconfig
78456 +++ b/security/yama/Kconfig
78457 @@ -1,6 +1,6 @@
78458 config SECURITY_YAMA
78459 bool "Yama support"
78460 - depends on SECURITY
78461 + depends on SECURITY && !GRKERNSEC
78462 select SECURITYFS
78463 select SECURITY_PATH
78464 default n
78465 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
78466 index 270790d..c67dfcb 100644
78467 --- a/sound/aoa/codecs/onyx.c
78468 +++ b/sound/aoa/codecs/onyx.c
78469 @@ -54,7 +54,7 @@ struct onyx {
78470 spdif_locked:1,
78471 analog_locked:1,
78472 original_mute:2;
78473 - int open_count;
78474 + local_t open_count;
78475 struct codec_info *codec_info;
78476
78477 /* mutex serializes concurrent access to the device
78478 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
78479 struct onyx *onyx = cii->codec_data;
78480
78481 mutex_lock(&onyx->mutex);
78482 - onyx->open_count++;
78483 + local_inc(&onyx->open_count);
78484 mutex_unlock(&onyx->mutex);
78485
78486 return 0;
78487 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
78488 struct onyx *onyx = cii->codec_data;
78489
78490 mutex_lock(&onyx->mutex);
78491 - onyx->open_count--;
78492 - if (!onyx->open_count)
78493 + if (local_dec_and_test(&onyx->open_count))
78494 onyx->spdif_locked = onyx->analog_locked = 0;
78495 mutex_unlock(&onyx->mutex);
78496
78497 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
78498 index ffd2025..df062c9 100644
78499 --- a/sound/aoa/codecs/onyx.h
78500 +++ b/sound/aoa/codecs/onyx.h
78501 @@ -11,6 +11,7 @@
78502 #include <linux/i2c.h>
78503 #include <asm/pmac_low_i2c.h>
78504 #include <asm/prom.h>
78505 +#include <asm/local.h>
78506
78507 /* PCM3052 register definitions */
78508
78509 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
78510 index 08fde00..0bf641a 100644
78511 --- a/sound/core/oss/pcm_oss.c
78512 +++ b/sound/core/oss/pcm_oss.c
78513 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
78514 if (in_kernel) {
78515 mm_segment_t fs;
78516 fs = snd_enter_user();
78517 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78518 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78519 snd_leave_user(fs);
78520 } else {
78521 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78522 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78523 }
78524 if (ret != -EPIPE && ret != -ESTRPIPE)
78525 break;
78526 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
78527 if (in_kernel) {
78528 mm_segment_t fs;
78529 fs = snd_enter_user();
78530 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78531 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78532 snd_leave_user(fs);
78533 } else {
78534 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78535 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78536 }
78537 if (ret == -EPIPE) {
78538 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
78539 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
78540 struct snd_pcm_plugin_channel *channels;
78541 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
78542 if (!in_kernel) {
78543 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
78544 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
78545 return -EFAULT;
78546 buf = runtime->oss.buffer;
78547 }
78548 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
78549 }
78550 } else {
78551 tmp = snd_pcm_oss_write2(substream,
78552 - (const char __force *)buf,
78553 + (const char __force_kernel *)buf,
78554 runtime->oss.period_bytes, 0);
78555 if (tmp <= 0)
78556 goto err;
78557 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
78558 struct snd_pcm_runtime *runtime = substream->runtime;
78559 snd_pcm_sframes_t frames, frames1;
78560 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
78561 - char __user *final_dst = (char __force __user *)buf;
78562 + char __user *final_dst = (char __force_user *)buf;
78563 if (runtime->oss.plugin_first) {
78564 struct snd_pcm_plugin_channel *channels;
78565 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
78566 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
78567 xfer += tmp;
78568 runtime->oss.buffer_used -= tmp;
78569 } else {
78570 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
78571 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
78572 runtime->oss.period_bytes, 0);
78573 if (tmp <= 0)
78574 goto err;
78575 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
78576 size1);
78577 size1 /= runtime->channels; /* frames */
78578 fs = snd_enter_user();
78579 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
78580 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
78581 snd_leave_user(fs);
78582 }
78583 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
78584 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
78585 index 91cdf94..4085161 100644
78586 --- a/sound/core/pcm_compat.c
78587 +++ b/sound/core/pcm_compat.c
78588 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
78589 int err;
78590
78591 fs = snd_enter_user();
78592 - err = snd_pcm_delay(substream, &delay);
78593 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
78594 snd_leave_user(fs);
78595 if (err < 0)
78596 return err;
78597 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
78598 index 3fe99e6..26952e4 100644
78599 --- a/sound/core/pcm_native.c
78600 +++ b/sound/core/pcm_native.c
78601 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
78602 switch (substream->stream) {
78603 case SNDRV_PCM_STREAM_PLAYBACK:
78604 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
78605 - (void __user *)arg);
78606 + (void __force_user *)arg);
78607 break;
78608 case SNDRV_PCM_STREAM_CAPTURE:
78609 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
78610 - (void __user *)arg);
78611 + (void __force_user *)arg);
78612 break;
78613 default:
78614 result = -EINVAL;
78615 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
78616 index 5cf8d65..912a79c 100644
78617 --- a/sound/core/seq/seq_device.c
78618 +++ b/sound/core/seq/seq_device.c
78619 @@ -64,7 +64,7 @@ struct ops_list {
78620 int argsize; /* argument size */
78621
78622 /* operators */
78623 - struct snd_seq_dev_ops ops;
78624 + struct snd_seq_dev_ops *ops;
78625
78626 /* registred devices */
78627 struct list_head dev_list; /* list of devices */
78628 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
78629
78630 mutex_lock(&ops->reg_mutex);
78631 /* copy driver operators */
78632 - ops->ops = *entry;
78633 + ops->ops = entry;
78634 ops->driver |= DRIVER_LOADED;
78635 ops->argsize = argsize;
78636
78637 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
78638 dev->name, ops->id, ops->argsize, dev->argsize);
78639 return -EINVAL;
78640 }
78641 - if (ops->ops.init_device(dev) >= 0) {
78642 + if (ops->ops->init_device(dev) >= 0) {
78643 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
78644 ops->num_init_devices++;
78645 } else {
78646 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
78647 dev->name, ops->id, ops->argsize, dev->argsize);
78648 return -EINVAL;
78649 }
78650 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
78651 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
78652 dev->status = SNDRV_SEQ_DEVICE_FREE;
78653 dev->driver_data = NULL;
78654 ops->num_init_devices--;
78655 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
78656 index 621e60e..f4543f5 100644
78657 --- a/sound/drivers/mts64.c
78658 +++ b/sound/drivers/mts64.c
78659 @@ -29,6 +29,7 @@
78660 #include <sound/initval.h>
78661 #include <sound/rawmidi.h>
78662 #include <sound/control.h>
78663 +#include <asm/local.h>
78664
78665 #define CARD_NAME "Miditerminal 4140"
78666 #define DRIVER_NAME "MTS64"
78667 @@ -67,7 +68,7 @@ struct mts64 {
78668 struct pardevice *pardev;
78669 int pardev_claimed;
78670
78671 - int open_count;
78672 + local_t open_count;
78673 int current_midi_output_port;
78674 int current_midi_input_port;
78675 u8 mode[MTS64_NUM_INPUT_PORTS];
78676 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78677 {
78678 struct mts64 *mts = substream->rmidi->private_data;
78679
78680 - if (mts->open_count == 0) {
78681 + if (local_read(&mts->open_count) == 0) {
78682 /* We don't need a spinlock here, because this is just called
78683 if the device has not been opened before.
78684 So there aren't any IRQs from the device */
78685 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78686
78687 msleep(50);
78688 }
78689 - ++(mts->open_count);
78690 + local_inc(&mts->open_count);
78691
78692 return 0;
78693 }
78694 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78695 struct mts64 *mts = substream->rmidi->private_data;
78696 unsigned long flags;
78697
78698 - --(mts->open_count);
78699 - if (mts->open_count == 0) {
78700 + if (local_dec_return(&mts->open_count) == 0) {
78701 /* We need the spinlock_irqsave here because we can still
78702 have IRQs at this point */
78703 spin_lock_irqsave(&mts->lock, flags);
78704 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78705
78706 msleep(500);
78707
78708 - } else if (mts->open_count < 0)
78709 - mts->open_count = 0;
78710 + } else if (local_read(&mts->open_count) < 0)
78711 + local_set(&mts->open_count, 0);
78712
78713 return 0;
78714 }
78715 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
78716 index b953fb4..1999c01 100644
78717 --- a/sound/drivers/opl4/opl4_lib.c
78718 +++ b/sound/drivers/opl4/opl4_lib.c
78719 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
78720 MODULE_DESCRIPTION("OPL4 driver");
78721 MODULE_LICENSE("GPL");
78722
78723 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
78724 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
78725 {
78726 int timeout = 10;
78727 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
78728 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
78729 index 3e32bd3..46fc152 100644
78730 --- a/sound/drivers/portman2x4.c
78731 +++ b/sound/drivers/portman2x4.c
78732 @@ -48,6 +48,7 @@
78733 #include <sound/initval.h>
78734 #include <sound/rawmidi.h>
78735 #include <sound/control.h>
78736 +#include <asm/local.h>
78737
78738 #define CARD_NAME "Portman 2x4"
78739 #define DRIVER_NAME "portman"
78740 @@ -85,7 +86,7 @@ struct portman {
78741 struct pardevice *pardev;
78742 int pardev_claimed;
78743
78744 - int open_count;
78745 + local_t open_count;
78746 int mode[PORTMAN_NUM_INPUT_PORTS];
78747 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
78748 };
78749 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
78750 index 87657dd..a8268d4 100644
78751 --- a/sound/firewire/amdtp.c
78752 +++ b/sound/firewire/amdtp.c
78753 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
78754 ptr = s->pcm_buffer_pointer + data_blocks;
78755 if (ptr >= pcm->runtime->buffer_size)
78756 ptr -= pcm->runtime->buffer_size;
78757 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
78758 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
78759
78760 s->pcm_period_pointer += data_blocks;
78761 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
78762 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
78763 */
78764 void amdtp_out_stream_update(struct amdtp_out_stream *s)
78765 {
78766 - ACCESS_ONCE(s->source_node_id_field) =
78767 + ACCESS_ONCE_RW(s->source_node_id_field) =
78768 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
78769 }
78770 EXPORT_SYMBOL(amdtp_out_stream_update);
78771 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
78772 index 537a9cb..8e8c8e9 100644
78773 --- a/sound/firewire/amdtp.h
78774 +++ b/sound/firewire/amdtp.h
78775 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
78776 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
78777 struct snd_pcm_substream *pcm)
78778 {
78779 - ACCESS_ONCE(s->pcm) = pcm;
78780 + ACCESS_ONCE_RW(s->pcm) = pcm;
78781 }
78782
78783 /**
78784 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
78785 index d428ffe..751ef78 100644
78786 --- a/sound/firewire/isight.c
78787 +++ b/sound/firewire/isight.c
78788 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
78789 ptr += count;
78790 if (ptr >= runtime->buffer_size)
78791 ptr -= runtime->buffer_size;
78792 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
78793 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
78794
78795 isight->period_counter += count;
78796 if (isight->period_counter >= runtime->period_size) {
78797 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
78798 if (err < 0)
78799 return err;
78800
78801 - ACCESS_ONCE(isight->pcm_active) = true;
78802 + ACCESS_ONCE_RW(isight->pcm_active) = true;
78803
78804 return 0;
78805 }
78806 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
78807 {
78808 struct isight *isight = substream->private_data;
78809
78810 - ACCESS_ONCE(isight->pcm_active) = false;
78811 + ACCESS_ONCE_RW(isight->pcm_active) = false;
78812
78813 mutex_lock(&isight->mutex);
78814 isight_stop_streaming(isight);
78815 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
78816
78817 switch (cmd) {
78818 case SNDRV_PCM_TRIGGER_START:
78819 - ACCESS_ONCE(isight->pcm_running) = true;
78820 + ACCESS_ONCE_RW(isight->pcm_running) = true;
78821 break;
78822 case SNDRV_PCM_TRIGGER_STOP:
78823 - ACCESS_ONCE(isight->pcm_running) = false;
78824 + ACCESS_ONCE_RW(isight->pcm_running) = false;
78825 break;
78826 default:
78827 return -EINVAL;
78828 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
78829 index 7bd5e33..1fcab12 100644
78830 --- a/sound/isa/cmi8330.c
78831 +++ b/sound/isa/cmi8330.c
78832 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
78833
78834 struct snd_pcm *pcm;
78835 struct snd_cmi8330_stream {
78836 - struct snd_pcm_ops ops;
78837 + snd_pcm_ops_no_const ops;
78838 snd_pcm_open_callback_t open;
78839 void *private_data; /* sb or wss */
78840 } streams[2];
78841 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
78842 index 733b014..56ce96f 100644
78843 --- a/sound/oss/sb_audio.c
78844 +++ b/sound/oss/sb_audio.c
78845 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
78846 buf16 = (signed short *)(localbuf + localoffs);
78847 while (c)
78848 {
78849 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78850 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
78851 if (copy_from_user(lbuf8,
78852 userbuf+useroffs + p,
78853 locallen))
78854 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
78855 index 09d4648..cf234c7 100644
78856 --- a/sound/oss/swarm_cs4297a.c
78857 +++ b/sound/oss/swarm_cs4297a.c
78858 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
78859 {
78860 struct cs4297a_state *s;
78861 u32 pwr, id;
78862 - mm_segment_t fs;
78863 int rval;
78864 #ifndef CONFIG_BCM_CS4297A_CSWARM
78865 u64 cfg;
78866 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
78867 if (!rval) {
78868 char *sb1250_duart_present;
78869
78870 +#if 0
78871 + mm_segment_t fs;
78872 fs = get_fs();
78873 set_fs(KERNEL_DS);
78874 -#if 0
78875 val = SOUND_MASK_LINE;
78876 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
78877 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
78878 val = initvol[i].vol;
78879 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
78880 }
78881 + set_fs(fs);
78882 // cs4297a_write_ac97(s, 0x18, 0x0808);
78883 #else
78884 // cs4297a_write_ac97(s, 0x5e, 0x180);
78885 cs4297a_write_ac97(s, 0x02, 0x0808);
78886 cs4297a_write_ac97(s, 0x18, 0x0808);
78887 #endif
78888 - set_fs(fs);
78889
78890 list_add(&s->list, &cs4297a_devs);
78891
78892 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
78893 index 56b4f74..7cfd41a 100644
78894 --- a/sound/pci/hda/hda_codec.h
78895 +++ b/sound/pci/hda/hda_codec.h
78896 @@ -611,7 +611,7 @@ struct hda_bus_ops {
78897 /* notify power-up/down from codec to controller */
78898 void (*pm_notify)(struct hda_bus *bus);
78899 #endif
78900 -};
78901 +} __no_const;
78902
78903 /* template to pass to the bus constructor */
78904 struct hda_bus_template {
78905 @@ -713,6 +713,7 @@ struct hda_codec_ops {
78906 #endif
78907 void (*reboot_notify)(struct hda_codec *codec);
78908 };
78909 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
78910
78911 /* record for amp information cache */
78912 struct hda_cache_head {
78913 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
78914 struct snd_pcm_substream *substream);
78915 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
78916 struct snd_pcm_substream *substream);
78917 -};
78918 +} __no_const;
78919
78920 /* PCM information for each substream */
78921 struct hda_pcm_stream {
78922 @@ -801,7 +802,7 @@ struct hda_codec {
78923 const char *modelname; /* model name for preset */
78924
78925 /* set by patch */
78926 - struct hda_codec_ops patch_ops;
78927 + hda_codec_ops_no_const patch_ops;
78928
78929 /* PCM to create, set by patch_ops.build_pcms callback */
78930 unsigned int num_pcms;
78931 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
78932 index 0da778a..bc38b84 100644
78933 --- a/sound/pci/ice1712/ice1712.h
78934 +++ b/sound/pci/ice1712/ice1712.h
78935 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
78936 unsigned int mask_flags; /* total mask bits */
78937 struct snd_akm4xxx_ops {
78938 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
78939 - } ops;
78940 + } __no_const ops;
78941 };
78942
78943 struct snd_ice1712_spdif {
78944 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
78945 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78946 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78947 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
78948 - } ops;
78949 + } __no_const ops;
78950 };
78951
78952
78953 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
78954 index a8159b81..5f006a5 100644
78955 --- a/sound/pci/ymfpci/ymfpci_main.c
78956 +++ b/sound/pci/ymfpci/ymfpci_main.c
78957 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
78958 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
78959 break;
78960 }
78961 - if (atomic_read(&chip->interrupt_sleep_count)) {
78962 - atomic_set(&chip->interrupt_sleep_count, 0);
78963 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
78964 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78965 wake_up(&chip->interrupt_sleep);
78966 }
78967 __end:
78968 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
78969 continue;
78970 init_waitqueue_entry(&wait, current);
78971 add_wait_queue(&chip->interrupt_sleep, &wait);
78972 - atomic_inc(&chip->interrupt_sleep_count);
78973 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
78974 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
78975 remove_wait_queue(&chip->interrupt_sleep, &wait);
78976 }
78977 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
78978 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
78979 spin_unlock(&chip->reg_lock);
78980
78981 - if (atomic_read(&chip->interrupt_sleep_count)) {
78982 - atomic_set(&chip->interrupt_sleep_count, 0);
78983 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
78984 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78985 wake_up(&chip->interrupt_sleep);
78986 }
78987 }
78988 @@ -2398,7 +2398,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
78989 spin_lock_init(&chip->reg_lock);
78990 spin_lock_init(&chip->voice_lock);
78991 init_waitqueue_head(&chip->interrupt_sleep);
78992 - atomic_set(&chip->interrupt_sleep_count, 0);
78993 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
78994 chip->card = card;
78995 chip->pci = pci;
78996 chip->irq = -1;
78997 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
78998 index 0ad8dca..7186339 100644
78999 --- a/sound/soc/soc-pcm.c
79000 +++ b/sound/soc/soc-pcm.c
79001 @@ -641,7 +641,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
79002 struct snd_soc_platform *platform = rtd->platform;
79003 struct snd_soc_dai *codec_dai = rtd->codec_dai;
79004 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
79005 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
79006 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
79007 struct snd_pcm *pcm;
79008 char new_name[64];
79009 int ret = 0, playback = 0, capture = 0;
79010 diff --git a/sound/usb/card.h b/sound/usb/card.h
79011 index da5fa1a..113cd02 100644
79012 --- a/sound/usb/card.h
79013 +++ b/sound/usb/card.h
79014 @@ -45,6 +45,7 @@ struct snd_urb_ops {
79015 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79016 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79017 };
79018 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
79019
79020 struct snd_usb_substream {
79021 struct snd_usb_stream *stream;
79022 @@ -94,7 +95,7 @@ struct snd_usb_substream {
79023 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
79024 spinlock_t lock;
79025
79026 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
79027 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
79028 int last_frame_number; /* stored frame number */
79029 int last_delay; /* stored delay */
79030 };
79031 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
79032 new file mode 100644
79033 index 0000000..ca64170
79034 --- /dev/null
79035 +++ b/tools/gcc/Makefile
79036 @@ -0,0 +1,26 @@
79037 +#CC := gcc
79038 +#PLUGIN_SOURCE_FILES := pax_plugin.c
79039 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
79040 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
79041 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
79042 +
79043 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
79044 +CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer
79045 +
79046 +hostlibs-y := constify_plugin.so
79047 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
79048 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
79049 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
79050 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
79051 +hostlibs-y += colorize_plugin.so
79052 +hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
79053 +
79054 +always := $(hostlibs-y)
79055 +
79056 +constify_plugin-objs := constify_plugin.o
79057 +stackleak_plugin-objs := stackleak_plugin.o
79058 +kallocstat_plugin-objs := kallocstat_plugin.o
79059 +kernexec_plugin-objs := kernexec_plugin.o
79060 +checker_plugin-objs := checker_plugin.o
79061 +colorize_plugin-objs := colorize_plugin.o
79062 +size_overflow_plugin-objs := size_overflow_plugin.o
79063 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
79064 new file mode 100644
79065 index 0000000..d41b5af
79066 --- /dev/null
79067 +++ b/tools/gcc/checker_plugin.c
79068 @@ -0,0 +1,171 @@
79069 +/*
79070 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79071 + * Licensed under the GPL v2
79072 + *
79073 + * Note: the choice of the license means that the compilation process is
79074 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79075 + * but for the kernel it doesn't matter since it doesn't link against
79076 + * any of the gcc libraries
79077 + *
79078 + * gcc plugin to implement various sparse (source code checker) features
79079 + *
79080 + * TODO:
79081 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
79082 + *
79083 + * BUGS:
79084 + * - none known
79085 + */
79086 +#include "gcc-plugin.h"
79087 +#include "config.h"
79088 +#include "system.h"
79089 +#include "coretypes.h"
79090 +#include "tree.h"
79091 +#include "tree-pass.h"
79092 +#include "flags.h"
79093 +#include "intl.h"
79094 +#include "toplev.h"
79095 +#include "plugin.h"
79096 +//#include "expr.h" where are you...
79097 +#include "diagnostic.h"
79098 +#include "plugin-version.h"
79099 +#include "tm.h"
79100 +#include "function.h"
79101 +#include "basic-block.h"
79102 +#include "gimple.h"
79103 +#include "rtl.h"
79104 +#include "emit-rtl.h"
79105 +#include "tree-flow.h"
79106 +#include "target.h"
79107 +
79108 +extern void c_register_addr_space (const char *str, addr_space_t as);
79109 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
79110 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
79111 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
79112 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
79113 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
79114 +
79115 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79116 +extern rtx emit_move_insn(rtx x, rtx y);
79117 +
79118 +int plugin_is_GPL_compatible;
79119 +
79120 +static struct plugin_info checker_plugin_info = {
79121 + .version = "201111150100",
79122 +};
79123 +
79124 +#define ADDR_SPACE_KERNEL 0
79125 +#define ADDR_SPACE_FORCE_KERNEL 1
79126 +#define ADDR_SPACE_USER 2
79127 +#define ADDR_SPACE_FORCE_USER 3
79128 +#define ADDR_SPACE_IOMEM 0
79129 +#define ADDR_SPACE_FORCE_IOMEM 0
79130 +#define ADDR_SPACE_PERCPU 0
79131 +#define ADDR_SPACE_FORCE_PERCPU 0
79132 +#define ADDR_SPACE_RCU 0
79133 +#define ADDR_SPACE_FORCE_RCU 0
79134 +
79135 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
79136 +{
79137 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
79138 +}
79139 +
79140 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
79141 +{
79142 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
79143 +}
79144 +
79145 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
79146 +{
79147 + return default_addr_space_valid_pointer_mode(mode, as);
79148 +}
79149 +
79150 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
79151 +{
79152 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
79153 +}
79154 +
79155 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
79156 +{
79157 + return default_addr_space_legitimize_address(x, oldx, mode, as);
79158 +}
79159 +
79160 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
79161 +{
79162 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
79163 + return true;
79164 +
79165 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
79166 + return true;
79167 +
79168 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
79169 + return true;
79170 +
79171 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
79172 + return true;
79173 +
79174 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
79175 + return true;
79176 +
79177 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
79178 + return true;
79179 +
79180 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
79181 + return true;
79182 +
79183 + return subset == superset;
79184 +}
79185 +
79186 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
79187 +{
79188 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
79189 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
79190 +
79191 + return op;
79192 +}
79193 +
79194 +static void register_checker_address_spaces(void *event_data, void *data)
79195 +{
79196 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
79197 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
79198 + c_register_addr_space("__user", ADDR_SPACE_USER);
79199 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
79200 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
79201 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
79202 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
79203 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
79204 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
79205 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
79206 +
79207 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
79208 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
79209 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
79210 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
79211 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
79212 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
79213 + targetm.addr_space.convert = checker_addr_space_convert;
79214 +}
79215 +
79216 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79217 +{
79218 + const char * const plugin_name = plugin_info->base_name;
79219 + const int argc = plugin_info->argc;
79220 + const struct plugin_argument * const argv = plugin_info->argv;
79221 + int i;
79222 +
79223 + if (!plugin_default_version_check(version, &gcc_version)) {
79224 + error(G_("incompatible gcc/plugin versions"));
79225 + return 1;
79226 + }
79227 +
79228 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
79229 +
79230 + for (i = 0; i < argc; ++i)
79231 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79232 +
79233 + if (TARGET_64BIT == 0)
79234 + return 0;
79235 +
79236 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
79237 +
79238 + return 0;
79239 +}
79240 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
79241 new file mode 100644
79242 index 0000000..ee950d0
79243 --- /dev/null
79244 +++ b/tools/gcc/colorize_plugin.c
79245 @@ -0,0 +1,147 @@
79246 +/*
79247 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
79248 + * Licensed under the GPL v2
79249 + *
79250 + * Note: the choice of the license means that the compilation process is
79251 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79252 + * but for the kernel it doesn't matter since it doesn't link against
79253 + * any of the gcc libraries
79254 + *
79255 + * gcc plugin to colorize diagnostic output
79256 + *
79257 + */
79258 +
79259 +#include "gcc-plugin.h"
79260 +#include "config.h"
79261 +#include "system.h"
79262 +#include "coretypes.h"
79263 +#include "tree.h"
79264 +#include "tree-pass.h"
79265 +#include "flags.h"
79266 +#include "intl.h"
79267 +#include "toplev.h"
79268 +#include "plugin.h"
79269 +#include "diagnostic.h"
79270 +#include "plugin-version.h"
79271 +#include "tm.h"
79272 +
79273 +int plugin_is_GPL_compatible;
79274 +
79275 +static struct plugin_info colorize_plugin_info = {
79276 + .version = "201203092200",
79277 +};
79278 +
79279 +#define GREEN "\033[32m\033[2m"
79280 +#define LIGHTGREEN "\033[32m\033[1m"
79281 +#define YELLOW "\033[33m\033[2m"
79282 +#define LIGHTYELLOW "\033[33m\033[1m"
79283 +#define RED "\033[31m\033[2m"
79284 +#define LIGHTRED "\033[31m\033[1m"
79285 +#define BLUE "\033[34m\033[2m"
79286 +#define LIGHTBLUE "\033[34m\033[1m"
79287 +#define BRIGHT "\033[m\033[1m"
79288 +#define NORMAL "\033[m"
79289 +
79290 +static diagnostic_starter_fn old_starter;
79291 +static diagnostic_finalizer_fn old_finalizer;
79292 +
79293 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79294 +{
79295 + const char *color;
79296 + char *newprefix;
79297 +
79298 + switch (diagnostic->kind) {
79299 + case DK_NOTE:
79300 + color = LIGHTBLUE;
79301 + break;
79302 +
79303 + case DK_PEDWARN:
79304 + case DK_WARNING:
79305 + color = LIGHTYELLOW;
79306 + break;
79307 +
79308 + case DK_ERROR:
79309 + case DK_FATAL:
79310 + case DK_ICE:
79311 + case DK_PERMERROR:
79312 + case DK_SORRY:
79313 + color = LIGHTRED;
79314 + break;
79315 +
79316 + default:
79317 + color = NORMAL;
79318 + }
79319 +
79320 + old_starter(context, diagnostic);
79321 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
79322 + return;
79323 + pp_destroy_prefix(context->printer);
79324 + pp_set_prefix(context->printer, newprefix);
79325 +}
79326 +
79327 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79328 +{
79329 + old_finalizer(context, diagnostic);
79330 +}
79331 +
79332 +static void colorize_arm(void)
79333 +{
79334 + old_starter = diagnostic_starter(global_dc);
79335 + old_finalizer = diagnostic_finalizer(global_dc);
79336 +
79337 + diagnostic_starter(global_dc) = start_colorize;
79338 + diagnostic_finalizer(global_dc) = finalize_colorize;
79339 +}
79340 +
79341 +static unsigned int execute_colorize_rearm(void)
79342 +{
79343 + if (diagnostic_starter(global_dc) == start_colorize)
79344 + return 0;
79345 +
79346 + colorize_arm();
79347 + return 0;
79348 +}
79349 +
79350 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
79351 + .pass = {
79352 + .type = SIMPLE_IPA_PASS,
79353 + .name = "colorize_rearm",
79354 + .gate = NULL,
79355 + .execute = execute_colorize_rearm,
79356 + .sub = NULL,
79357 + .next = NULL,
79358 + .static_pass_number = 0,
79359 + .tv_id = TV_NONE,
79360 + .properties_required = 0,
79361 + .properties_provided = 0,
79362 + .properties_destroyed = 0,
79363 + .todo_flags_start = 0,
79364 + .todo_flags_finish = 0
79365 + }
79366 +};
79367 +
79368 +static void colorize_start_unit(void *gcc_data, void *user_data)
79369 +{
79370 + colorize_arm();
79371 +}
79372 +
79373 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79374 +{
79375 + const char * const plugin_name = plugin_info->base_name;
79376 + struct register_pass_info colorize_rearm_pass_info = {
79377 + .pass = &pass_ipa_colorize_rearm.pass,
79378 + .reference_pass_name = "*free_lang_data",
79379 + .ref_pass_instance_number = 0,
79380 + .pos_op = PASS_POS_INSERT_AFTER
79381 + };
79382 +
79383 + if (!plugin_default_version_check(version, &gcc_version)) {
79384 + error(G_("incompatible gcc/plugin versions"));
79385 + return 1;
79386 + }
79387 +
79388 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
79389 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
79390 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
79391 + return 0;
79392 +}
79393 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
79394 new file mode 100644
79395 index 0000000..89b7f56
79396 --- /dev/null
79397 +++ b/tools/gcc/constify_plugin.c
79398 @@ -0,0 +1,328 @@
79399 +/*
79400 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
79401 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
79402 + * Licensed under the GPL v2, or (at your option) v3
79403 + *
79404 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
79405 + *
79406 + * Homepage:
79407 + * http://www.grsecurity.net/~ephox/const_plugin/
79408 + *
79409 + * Usage:
79410 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
79411 + * $ gcc -fplugin=constify_plugin.so test.c -O2
79412 + */
79413 +
79414 +#include "gcc-plugin.h"
79415 +#include "config.h"
79416 +#include "system.h"
79417 +#include "coretypes.h"
79418 +#include "tree.h"
79419 +#include "tree-pass.h"
79420 +#include "flags.h"
79421 +#include "intl.h"
79422 +#include "toplev.h"
79423 +#include "plugin.h"
79424 +#include "diagnostic.h"
79425 +#include "plugin-version.h"
79426 +#include "tm.h"
79427 +#include "function.h"
79428 +#include "basic-block.h"
79429 +#include "gimple.h"
79430 +#include "rtl.h"
79431 +#include "emit-rtl.h"
79432 +#include "tree-flow.h"
79433 +
79434 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
79435 +
79436 +int plugin_is_GPL_compatible;
79437 +
79438 +static struct plugin_info const_plugin_info = {
79439 + .version = "201205300030",
79440 + .help = "no-constify\tturn off constification\n",
79441 +};
79442 +
79443 +static void deconstify_tree(tree node);
79444 +
79445 +static void deconstify_type(tree type)
79446 +{
79447 + tree field;
79448 +
79449 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
79450 + tree type = TREE_TYPE(field);
79451 +
79452 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79453 + continue;
79454 + if (!TYPE_READONLY(type))
79455 + continue;
79456 +
79457 + deconstify_tree(field);
79458 + }
79459 + TYPE_READONLY(type) = 0;
79460 + C_TYPE_FIELDS_READONLY(type) = 0;
79461 +}
79462 +
79463 +static void deconstify_tree(tree node)
79464 +{
79465 + tree old_type, new_type, field;
79466 +
79467 + old_type = TREE_TYPE(node);
79468 +
79469 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
79470 +
79471 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
79472 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
79473 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
79474 + DECL_FIELD_CONTEXT(field) = new_type;
79475 +
79476 + deconstify_type(new_type);
79477 +
79478 + TREE_READONLY(node) = 0;
79479 + TREE_TYPE(node) = new_type;
79480 +}
79481 +
79482 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79483 +{
79484 + tree type;
79485 +
79486 + *no_add_attrs = true;
79487 + if (TREE_CODE(*node) == FUNCTION_DECL) {
79488 + error("%qE attribute does not apply to functions", name);
79489 + return NULL_TREE;
79490 + }
79491 +
79492 + if (TREE_CODE(*node) == VAR_DECL) {
79493 + error("%qE attribute does not apply to variables", name);
79494 + return NULL_TREE;
79495 + }
79496 +
79497 + if (TYPE_P(*node)) {
79498 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
79499 + *no_add_attrs = false;
79500 + else
79501 + error("%qE attribute applies to struct and union types only", name);
79502 + return NULL_TREE;
79503 + }
79504 +
79505 + type = TREE_TYPE(*node);
79506 +
79507 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
79508 + error("%qE attribute applies to struct and union types only", name);
79509 + return NULL_TREE;
79510 + }
79511 +
79512 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
79513 + error("%qE attribute is already applied to the type", name);
79514 + return NULL_TREE;
79515 + }
79516 +
79517 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
79518 + error("%qE attribute used on type that is not constified", name);
79519 + return NULL_TREE;
79520 + }
79521 +
79522 + if (TREE_CODE(*node) == TYPE_DECL) {
79523 + deconstify_tree(*node);
79524 + return NULL_TREE;
79525 + }
79526 +
79527 + return NULL_TREE;
79528 +}
79529 +
79530 +static void constify_type(tree type)
79531 +{
79532 + TYPE_READONLY(type) = 1;
79533 + C_TYPE_FIELDS_READONLY(type) = 1;
79534 +}
79535 +
79536 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79537 +{
79538 + *no_add_attrs = true;
79539 + if (!TYPE_P(*node)) {
79540 + error("%qE attribute applies to types only", name);
79541 + return NULL_TREE;
79542 + }
79543 +
79544 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
79545 + error("%qE attribute applies to struct and union types only", name);
79546 + return NULL_TREE;
79547 + }
79548 +
79549 + *no_add_attrs = false;
79550 + constify_type(*node);
79551 + return NULL_TREE;
79552 +}
79553 +
79554 +static struct attribute_spec no_const_attr = {
79555 + .name = "no_const",
79556 + .min_length = 0,
79557 + .max_length = 0,
79558 + .decl_required = false,
79559 + .type_required = false,
79560 + .function_type_required = false,
79561 + .handler = handle_no_const_attribute,
79562 +#if BUILDING_GCC_VERSION >= 4007
79563 + .affects_type_identity = true
79564 +#endif
79565 +};
79566 +
79567 +static struct attribute_spec do_const_attr = {
79568 + .name = "do_const",
79569 + .min_length = 0,
79570 + .max_length = 0,
79571 + .decl_required = false,
79572 + .type_required = false,
79573 + .function_type_required = false,
79574 + .handler = handle_do_const_attribute,
79575 +#if BUILDING_GCC_VERSION >= 4007
79576 + .affects_type_identity = true
79577 +#endif
79578 +};
79579 +
79580 +static void register_attributes(void *event_data, void *data)
79581 +{
79582 + register_attribute(&no_const_attr);
79583 + register_attribute(&do_const_attr);
79584 +}
79585 +
79586 +static bool is_fptr(tree field)
79587 +{
79588 + tree ptr = TREE_TYPE(field);
79589 +
79590 + if (TREE_CODE(ptr) != POINTER_TYPE)
79591 + return false;
79592 +
79593 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
79594 +}
79595 +
79596 +static bool walk_struct(tree node)
79597 +{
79598 + tree field;
79599 +
79600 + if (TYPE_FIELDS(node) == NULL_TREE)
79601 + return false;
79602 +
79603 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
79604 + gcc_assert(!TYPE_READONLY(node));
79605 + deconstify_type(node);
79606 + return false;
79607 + }
79608 +
79609 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
79610 + tree type = TREE_TYPE(field);
79611 + enum tree_code code = TREE_CODE(type);
79612 + if (code == RECORD_TYPE || code == UNION_TYPE) {
79613 + if (!(walk_struct(type)))
79614 + return false;
79615 + } else if (!is_fptr(field) && !TREE_READONLY(field))
79616 + return false;
79617 + }
79618 + return true;
79619 +}
79620 +
79621 +static void finish_type(void *event_data, void *data)
79622 +{
79623 + tree type = (tree)event_data;
79624 +
79625 + if (type == NULL_TREE)
79626 + return;
79627 +
79628 + if (TYPE_READONLY(type))
79629 + return;
79630 +
79631 + if (walk_struct(type))
79632 + constify_type(type);
79633 +}
79634 +
79635 +static unsigned int check_local_variables(void);
79636 +
79637 +struct gimple_opt_pass pass_local_variable = {
79638 + {
79639 + .type = GIMPLE_PASS,
79640 + .name = "check_local_variables",
79641 + .gate = NULL,
79642 + .execute = check_local_variables,
79643 + .sub = NULL,
79644 + .next = NULL,
79645 + .static_pass_number = 0,
79646 + .tv_id = TV_NONE,
79647 + .properties_required = 0,
79648 + .properties_provided = 0,
79649 + .properties_destroyed = 0,
79650 + .todo_flags_start = 0,
79651 + .todo_flags_finish = 0
79652 + }
79653 +};
79654 +
79655 +static unsigned int check_local_variables(void)
79656 +{
79657 + tree var;
79658 + referenced_var_iterator rvi;
79659 +
79660 +#if BUILDING_GCC_VERSION == 4005
79661 + FOR_EACH_REFERENCED_VAR(var, rvi) {
79662 +#else
79663 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
79664 +#endif
79665 + tree type = TREE_TYPE(var);
79666 +
79667 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
79668 + continue;
79669 +
79670 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79671 + continue;
79672 +
79673 + if (!TYPE_READONLY(type))
79674 + continue;
79675 +
79676 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
79677 +// continue;
79678 +
79679 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
79680 +// continue;
79681 +
79682 + if (walk_struct(type)) {
79683 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
79684 + return 1;
79685 + }
79686 + }
79687 + return 0;
79688 +}
79689 +
79690 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79691 +{
79692 + const char * const plugin_name = plugin_info->base_name;
79693 + const int argc = plugin_info->argc;
79694 + const struct plugin_argument * const argv = plugin_info->argv;
79695 + int i;
79696 + bool constify = true;
79697 +
79698 + struct register_pass_info local_variable_pass_info = {
79699 + .pass = &pass_local_variable.pass,
79700 + .reference_pass_name = "*referenced_vars",
79701 + .ref_pass_instance_number = 0,
79702 + .pos_op = PASS_POS_INSERT_AFTER
79703 + };
79704 +
79705 + if (!plugin_default_version_check(version, &gcc_version)) {
79706 + error(G_("incompatible gcc/plugin versions"));
79707 + return 1;
79708 + }
79709 +
79710 + for (i = 0; i < argc; ++i) {
79711 + if (!(strcmp(argv[i].key, "no-constify"))) {
79712 + constify = false;
79713 + continue;
79714 + }
79715 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79716 + }
79717 +
79718 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
79719 + if (constify) {
79720 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
79721 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
79722 + }
79723 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
79724 +
79725 + return 0;
79726 +}
79727 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
79728 new file mode 100644
79729 index 0000000..a5eabce
79730 --- /dev/null
79731 +++ b/tools/gcc/kallocstat_plugin.c
79732 @@ -0,0 +1,167 @@
79733 +/*
79734 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79735 + * Licensed under the GPL v2
79736 + *
79737 + * Note: the choice of the license means that the compilation process is
79738 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79739 + * but for the kernel it doesn't matter since it doesn't link against
79740 + * any of the gcc libraries
79741 + *
79742 + * gcc plugin to find the distribution of k*alloc sizes
79743 + *
79744 + * TODO:
79745 + *
79746 + * BUGS:
79747 + * - none known
79748 + */
79749 +#include "gcc-plugin.h"
79750 +#include "config.h"
79751 +#include "system.h"
79752 +#include "coretypes.h"
79753 +#include "tree.h"
79754 +#include "tree-pass.h"
79755 +#include "flags.h"
79756 +#include "intl.h"
79757 +#include "toplev.h"
79758 +#include "plugin.h"
79759 +//#include "expr.h" where are you...
79760 +#include "diagnostic.h"
79761 +#include "plugin-version.h"
79762 +#include "tm.h"
79763 +#include "function.h"
79764 +#include "basic-block.h"
79765 +#include "gimple.h"
79766 +#include "rtl.h"
79767 +#include "emit-rtl.h"
79768 +
79769 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79770 +
79771 +int plugin_is_GPL_compatible;
79772 +
79773 +static const char * const kalloc_functions[] = {
79774 + "__kmalloc",
79775 + "kmalloc",
79776 + "kmalloc_large",
79777 + "kmalloc_node",
79778 + "kmalloc_order",
79779 + "kmalloc_order_trace",
79780 + "kmalloc_slab",
79781 + "kzalloc",
79782 + "kzalloc_node",
79783 +};
79784 +
79785 +static struct plugin_info kallocstat_plugin_info = {
79786 + .version = "201111150100",
79787 +};
79788 +
79789 +static unsigned int execute_kallocstat(void);
79790 +
79791 +static struct gimple_opt_pass kallocstat_pass = {
79792 + .pass = {
79793 + .type = GIMPLE_PASS,
79794 + .name = "kallocstat",
79795 + .gate = NULL,
79796 + .execute = execute_kallocstat,
79797 + .sub = NULL,
79798 + .next = NULL,
79799 + .static_pass_number = 0,
79800 + .tv_id = TV_NONE,
79801 + .properties_required = 0,
79802 + .properties_provided = 0,
79803 + .properties_destroyed = 0,
79804 + .todo_flags_start = 0,
79805 + .todo_flags_finish = 0
79806 + }
79807 +};
79808 +
79809 +static bool is_kalloc(const char *fnname)
79810 +{
79811 + size_t i;
79812 +
79813 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
79814 + if (!strcmp(fnname, kalloc_functions[i]))
79815 + return true;
79816 + return false;
79817 +}
79818 +
79819 +static unsigned int execute_kallocstat(void)
79820 +{
79821 + basic_block bb;
79822 +
79823 + // 1. loop through BBs and GIMPLE statements
79824 + FOR_EACH_BB(bb) {
79825 + gimple_stmt_iterator gsi;
79826 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79827 + // gimple match:
79828 + tree fndecl, size;
79829 + gimple call_stmt;
79830 + const char *fnname;
79831 +
79832 + // is it a call
79833 + call_stmt = gsi_stmt(gsi);
79834 + if (!is_gimple_call(call_stmt))
79835 + continue;
79836 + fndecl = gimple_call_fndecl(call_stmt);
79837 + if (fndecl == NULL_TREE)
79838 + continue;
79839 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
79840 + continue;
79841 +
79842 + // is it a call to k*alloc
79843 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
79844 + if (!is_kalloc(fnname))
79845 + continue;
79846 +
79847 + // is the size arg the result of a simple const assignment
79848 + size = gimple_call_arg(call_stmt, 0);
79849 + while (true) {
79850 + gimple def_stmt;
79851 + expanded_location xloc;
79852 + size_t size_val;
79853 +
79854 + if (TREE_CODE(size) != SSA_NAME)
79855 + break;
79856 + def_stmt = SSA_NAME_DEF_STMT(size);
79857 + if (!def_stmt || !is_gimple_assign(def_stmt))
79858 + break;
79859 + if (gimple_num_ops(def_stmt) != 2)
79860 + break;
79861 + size = gimple_assign_rhs1(def_stmt);
79862 + if (!TREE_CONSTANT(size))
79863 + continue;
79864 + xloc = expand_location(gimple_location(def_stmt));
79865 + if (!xloc.file)
79866 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
79867 + size_val = TREE_INT_CST_LOW(size);
79868 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
79869 + break;
79870 + }
79871 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
79872 +//debug_tree(gimple_call_fn(call_stmt));
79873 +//print_node(stderr, "pax", fndecl, 4);
79874 + }
79875 + }
79876 +
79877 + return 0;
79878 +}
79879 +
79880 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79881 +{
79882 + const char * const plugin_name = plugin_info->base_name;
79883 + struct register_pass_info kallocstat_pass_info = {
79884 + .pass = &kallocstat_pass.pass,
79885 + .reference_pass_name = "ssa",
79886 + .ref_pass_instance_number = 0,
79887 + .pos_op = PASS_POS_INSERT_AFTER
79888 + };
79889 +
79890 + if (!plugin_default_version_check(version, &gcc_version)) {
79891 + error(G_("incompatible gcc/plugin versions"));
79892 + return 1;
79893 + }
79894 +
79895 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
79896 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
79897 +
79898 + return 0;
79899 +}
79900 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
79901 new file mode 100644
79902 index 0000000..d8a8da2
79903 --- /dev/null
79904 +++ b/tools/gcc/kernexec_plugin.c
79905 @@ -0,0 +1,427 @@
79906 +/*
79907 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79908 + * Licensed under the GPL v2
79909 + *
79910 + * Note: the choice of the license means that the compilation process is
79911 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79912 + * but for the kernel it doesn't matter since it doesn't link against
79913 + * any of the gcc libraries
79914 + *
79915 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
79916 + *
79917 + * TODO:
79918 + *
79919 + * BUGS:
79920 + * - none known
79921 + */
79922 +#include "gcc-plugin.h"
79923 +#include "config.h"
79924 +#include "system.h"
79925 +#include "coretypes.h"
79926 +#include "tree.h"
79927 +#include "tree-pass.h"
79928 +#include "flags.h"
79929 +#include "intl.h"
79930 +#include "toplev.h"
79931 +#include "plugin.h"
79932 +//#include "expr.h" where are you...
79933 +#include "diagnostic.h"
79934 +#include "plugin-version.h"
79935 +#include "tm.h"
79936 +#include "function.h"
79937 +#include "basic-block.h"
79938 +#include "gimple.h"
79939 +#include "rtl.h"
79940 +#include "emit-rtl.h"
79941 +#include "tree-flow.h"
79942 +
79943 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79944 +extern rtx emit_move_insn(rtx x, rtx y);
79945 +
79946 +int plugin_is_GPL_compatible;
79947 +
79948 +static struct plugin_info kernexec_plugin_info = {
79949 + .version = "201111291120",
79950 + .help = "method=[bts|or]\tinstrumentation method\n"
79951 +};
79952 +
79953 +static unsigned int execute_kernexec_reload(void);
79954 +static unsigned int execute_kernexec_fptr(void);
79955 +static unsigned int execute_kernexec_retaddr(void);
79956 +static bool kernexec_cmodel_check(void);
79957 +
79958 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
79959 +static void (*kernexec_instrument_retaddr)(rtx);
79960 +
79961 +static struct gimple_opt_pass kernexec_reload_pass = {
79962 + .pass = {
79963 + .type = GIMPLE_PASS,
79964 + .name = "kernexec_reload",
79965 + .gate = kernexec_cmodel_check,
79966 + .execute = execute_kernexec_reload,
79967 + .sub = NULL,
79968 + .next = NULL,
79969 + .static_pass_number = 0,
79970 + .tv_id = TV_NONE,
79971 + .properties_required = 0,
79972 + .properties_provided = 0,
79973 + .properties_destroyed = 0,
79974 + .todo_flags_start = 0,
79975 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
79976 + }
79977 +};
79978 +
79979 +static struct gimple_opt_pass kernexec_fptr_pass = {
79980 + .pass = {
79981 + .type = GIMPLE_PASS,
79982 + .name = "kernexec_fptr",
79983 + .gate = kernexec_cmodel_check,
79984 + .execute = execute_kernexec_fptr,
79985 + .sub = NULL,
79986 + .next = NULL,
79987 + .static_pass_number = 0,
79988 + .tv_id = TV_NONE,
79989 + .properties_required = 0,
79990 + .properties_provided = 0,
79991 + .properties_destroyed = 0,
79992 + .todo_flags_start = 0,
79993 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
79994 + }
79995 +};
79996 +
79997 +static struct rtl_opt_pass kernexec_retaddr_pass = {
79998 + .pass = {
79999 + .type = RTL_PASS,
80000 + .name = "kernexec_retaddr",
80001 + .gate = kernexec_cmodel_check,
80002 + .execute = execute_kernexec_retaddr,
80003 + .sub = NULL,
80004 + .next = NULL,
80005 + .static_pass_number = 0,
80006 + .tv_id = TV_NONE,
80007 + .properties_required = 0,
80008 + .properties_provided = 0,
80009 + .properties_destroyed = 0,
80010 + .todo_flags_start = 0,
80011 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
80012 + }
80013 +};
80014 +
80015 +static bool kernexec_cmodel_check(void)
80016 +{
80017 + tree section;
80018 +
80019 + if (ix86_cmodel != CM_KERNEL)
80020 + return false;
80021 +
80022 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
80023 + if (!section || !TREE_VALUE(section))
80024 + return true;
80025 +
80026 + section = TREE_VALUE(TREE_VALUE(section));
80027 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
80028 + return true;
80029 +
80030 + return false;
80031 +}
80032 +
80033 +/*
80034 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
80035 + */
80036 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
80037 +{
80038 + gimple asm_movabs_stmt;
80039 +
80040 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
80041 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
80042 + gimple_asm_set_volatile(asm_movabs_stmt, true);
80043 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
80044 + update_stmt(asm_movabs_stmt);
80045 +}
80046 +
80047 +/*
80048 + * find all asm() stmts that clobber r10 and add a reload of r10
80049 + */
80050 +static unsigned int execute_kernexec_reload(void)
80051 +{
80052 + basic_block bb;
80053 +
80054 + // 1. loop through BBs and GIMPLE statements
80055 + FOR_EACH_BB(bb) {
80056 + gimple_stmt_iterator gsi;
80057 +
80058 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80059 + // gimple match: __asm__ ("" : : : "r10");
80060 + gimple asm_stmt;
80061 + size_t nclobbers;
80062 +
80063 + // is it an asm ...
80064 + asm_stmt = gsi_stmt(gsi);
80065 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
80066 + continue;
80067 +
80068 + // ... clobbering r10
80069 + nclobbers = gimple_asm_nclobbers(asm_stmt);
80070 + while (nclobbers--) {
80071 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
80072 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
80073 + continue;
80074 + kernexec_reload_fptr_mask(&gsi);
80075 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
80076 + break;
80077 + }
80078 + }
80079 + }
80080 +
80081 + return 0;
80082 +}
80083 +
80084 +/*
80085 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
80086 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
80087 + */
80088 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
80089 +{
80090 + gimple assign_intptr, assign_new_fptr, call_stmt;
80091 + tree intptr, old_fptr, new_fptr, kernexec_mask;
80092 +
80093 + call_stmt = gsi_stmt(*gsi);
80094 + old_fptr = gimple_call_fn(call_stmt);
80095 +
80096 + // create temporary unsigned long variable used for bitops and cast fptr to it
80097 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
80098 + add_referenced_var(intptr);
80099 + mark_sym_for_renaming(intptr);
80100 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
80101 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80102 + update_stmt(assign_intptr);
80103 +
80104 + // apply logical or to temporary unsigned long and bitmask
80105 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
80106 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
80107 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
80108 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80109 + update_stmt(assign_intptr);
80110 +
80111 + // cast temporary unsigned long back to a temporary fptr variable
80112 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
80113 + add_referenced_var(new_fptr);
80114 + mark_sym_for_renaming(new_fptr);
80115 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
80116 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
80117 + update_stmt(assign_new_fptr);
80118 +
80119 + // replace call stmt fn with the new fptr
80120 + gimple_call_set_fn(call_stmt, new_fptr);
80121 + update_stmt(call_stmt);
80122 +}
80123 +
80124 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
80125 +{
80126 + gimple asm_or_stmt, call_stmt;
80127 + tree old_fptr, new_fptr, input, output;
80128 + VEC(tree, gc) *inputs = NULL;
80129 + VEC(tree, gc) *outputs = NULL;
80130 +
80131 + call_stmt = gsi_stmt(*gsi);
80132 + old_fptr = gimple_call_fn(call_stmt);
80133 +
80134 + // create temporary fptr variable
80135 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
80136 + add_referenced_var(new_fptr);
80137 + mark_sym_for_renaming(new_fptr);
80138 +
80139 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
80140 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
80141 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
80142 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
80143 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
80144 + VEC_safe_push(tree, gc, inputs, input);
80145 + VEC_safe_push(tree, gc, outputs, output);
80146 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
80147 + gimple_asm_set_volatile(asm_or_stmt, true);
80148 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
80149 + update_stmt(asm_or_stmt);
80150 +
80151 + // replace call stmt fn with the new fptr
80152 + gimple_call_set_fn(call_stmt, new_fptr);
80153 + update_stmt(call_stmt);
80154 +}
80155 +
80156 +/*
80157 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
80158 + */
80159 +static unsigned int execute_kernexec_fptr(void)
80160 +{
80161 + basic_block bb;
80162 +
80163 + // 1. loop through BBs and GIMPLE statements
80164 + FOR_EACH_BB(bb) {
80165 + gimple_stmt_iterator gsi;
80166 +
80167 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80168 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
80169 + tree fn;
80170 + gimple call_stmt;
80171 +
80172 + // is it a call ...
80173 + call_stmt = gsi_stmt(gsi);
80174 + if (!is_gimple_call(call_stmt))
80175 + continue;
80176 + fn = gimple_call_fn(call_stmt);
80177 + if (TREE_CODE(fn) == ADDR_EXPR)
80178 + continue;
80179 + if (TREE_CODE(fn) != SSA_NAME)
80180 + gcc_unreachable();
80181 +
80182 + // ... through a function pointer
80183 + fn = SSA_NAME_VAR(fn);
80184 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
80185 + continue;
80186 + fn = TREE_TYPE(fn);
80187 + if (TREE_CODE(fn) != POINTER_TYPE)
80188 + continue;
80189 + fn = TREE_TYPE(fn);
80190 + if (TREE_CODE(fn) != FUNCTION_TYPE)
80191 + continue;
80192 +
80193 + kernexec_instrument_fptr(&gsi);
80194 +
80195 +//debug_tree(gimple_call_fn(call_stmt));
80196 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80197 + }
80198 + }
80199 +
80200 + return 0;
80201 +}
80202 +
80203 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
80204 +static void kernexec_instrument_retaddr_bts(rtx insn)
80205 +{
80206 + rtx btsq;
80207 + rtvec argvec, constraintvec, labelvec;
80208 + int line;
80209 +
80210 + // create asm volatile("btsq $63,(%%rsp)":::)
80211 + argvec = rtvec_alloc(0);
80212 + constraintvec = rtvec_alloc(0);
80213 + labelvec = rtvec_alloc(0);
80214 + line = expand_location(RTL_LOCATION(insn)).line;
80215 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80216 + MEM_VOLATILE_P(btsq) = 1;
80217 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
80218 + emit_insn_before(btsq, insn);
80219 +}
80220 +
80221 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
80222 +static void kernexec_instrument_retaddr_or(rtx insn)
80223 +{
80224 + rtx orq;
80225 + rtvec argvec, constraintvec, labelvec;
80226 + int line;
80227 +
80228 + // create asm volatile("orq %%r10,(%%rsp)":::)
80229 + argvec = rtvec_alloc(0);
80230 + constraintvec = rtvec_alloc(0);
80231 + labelvec = rtvec_alloc(0);
80232 + line = expand_location(RTL_LOCATION(insn)).line;
80233 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80234 + MEM_VOLATILE_P(orq) = 1;
80235 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
80236 + emit_insn_before(orq, insn);
80237 +}
80238 +
80239 +/*
80240 + * find all asm level function returns and forcibly set the highest bit of the return address
80241 + */
80242 +static unsigned int execute_kernexec_retaddr(void)
80243 +{
80244 + rtx insn;
80245 +
80246 + // 1. find function returns
80247 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80248 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
80249 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
80250 + rtx body;
80251 +
80252 + // is it a retn
80253 + if (!JUMP_P(insn))
80254 + continue;
80255 + body = PATTERN(insn);
80256 + if (GET_CODE(body) == PARALLEL)
80257 + body = XVECEXP(body, 0, 0);
80258 + if (GET_CODE(body) != RETURN)
80259 + continue;
80260 + kernexec_instrument_retaddr(insn);
80261 + }
80262 +
80263 +// print_simple_rtl(stderr, get_insns());
80264 +// print_rtl(stderr, get_insns());
80265 +
80266 + return 0;
80267 +}
80268 +
80269 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80270 +{
80271 + const char * const plugin_name = plugin_info->base_name;
80272 + const int argc = plugin_info->argc;
80273 + const struct plugin_argument * const argv = plugin_info->argv;
80274 + int i;
80275 + struct register_pass_info kernexec_reload_pass_info = {
80276 + .pass = &kernexec_reload_pass.pass,
80277 + .reference_pass_name = "ssa",
80278 + .ref_pass_instance_number = 0,
80279 + .pos_op = PASS_POS_INSERT_AFTER
80280 + };
80281 + struct register_pass_info kernexec_fptr_pass_info = {
80282 + .pass = &kernexec_fptr_pass.pass,
80283 + .reference_pass_name = "ssa",
80284 + .ref_pass_instance_number = 0,
80285 + .pos_op = PASS_POS_INSERT_AFTER
80286 + };
80287 + struct register_pass_info kernexec_retaddr_pass_info = {
80288 + .pass = &kernexec_retaddr_pass.pass,
80289 + .reference_pass_name = "pro_and_epilogue",
80290 + .ref_pass_instance_number = 0,
80291 + .pos_op = PASS_POS_INSERT_AFTER
80292 + };
80293 +
80294 + if (!plugin_default_version_check(version, &gcc_version)) {
80295 + error(G_("incompatible gcc/plugin versions"));
80296 + return 1;
80297 + }
80298 +
80299 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
80300 +
80301 + if (TARGET_64BIT == 0)
80302 + return 0;
80303 +
80304 + for (i = 0; i < argc; ++i) {
80305 + if (!strcmp(argv[i].key, "method")) {
80306 + if (!argv[i].value) {
80307 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80308 + continue;
80309 + }
80310 + if (!strcmp(argv[i].value, "bts")) {
80311 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
80312 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
80313 + } else if (!strcmp(argv[i].value, "or")) {
80314 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
80315 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
80316 + fix_register("r10", 1, 1);
80317 + } else
80318 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80319 + continue;
80320 + }
80321 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80322 + }
80323 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
80324 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
80325 +
80326 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
80327 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
80328 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
80329 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
80330 +
80331 + return 0;
80332 +}
80333 diff --git a/tools/gcc/size_overflow_hash.h b/tools/gcc/size_overflow_hash.h
80334 new file mode 100644
80335 index 0000000..41de68c
80336 --- /dev/null
80337 +++ b/tools/gcc/size_overflow_hash.h
80338 @@ -0,0 +1,13146 @@
80339 +struct size_overflow_hash _000001_hash = {
80340 + .next = NULL,
80341 + .name = "alloc_dr",
80342 + .file = "drivers/base/devres.c",
80343 + .param2 = 1,
80344 +};
80345 +struct size_overflow_hash _000002_hash = {
80346 + .next = NULL,
80347 + .name = "__copy_from_user",
80348 + .file = "arch/x86/include/asm/uaccess_32.h",
80349 + .param3 = 1,
80350 +};
80351 +struct size_overflow_hash _000003_hash = {
80352 + .next = NULL,
80353 + .name = "copy_from_user",
80354 + .file = "arch/x86/include/asm/uaccess_32.h",
80355 + .param3 = 1,
80356 +};
80357 +struct size_overflow_hash _000004_hash = {
80358 + .next = NULL,
80359 + .name = "__copy_from_user_inatomic",
80360 + .file = "arch/x86/include/asm/uaccess_32.h",
80361 + .param3 = 1,
80362 +};
80363 +struct size_overflow_hash _000005_hash = {
80364 + .next = NULL,
80365 + .name = "__copy_from_user_nocache",
80366 + .file = "arch/x86/include/asm/uaccess_32.h",
80367 + .param3 = 1,
80368 +};
80369 +struct size_overflow_hash _000006_hash = {
80370 + .next = NULL,
80371 + .name = "__copy_to_user_inatomic",
80372 + .file = "arch/x86/include/asm/uaccess_32.h",
80373 + .param3 = 1,
80374 +};
80375 +struct size_overflow_hash _000007_hash = {
80376 + .next = NULL,
80377 + .name = "do_xip_mapping_read",
80378 + .file = "mm/filemap_xip.c",
80379 + .param5 = 1,
80380 +};
80381 +struct size_overflow_hash _000008_hash = {
80382 + .next = NULL,
80383 + .name = "hugetlbfs_read",
80384 + .file = "fs/hugetlbfs/inode.c",
80385 + .param3 = 1,
80386 +};
80387 +struct size_overflow_hash _000009_hash = {
80388 + .next = NULL,
80389 + .name = "kcalloc",
80390 + .file = "include/linux/slab.h",
80391 + .param1 = 1,
80392 + .param2 = 1,
80393 +};
80394 +struct size_overflow_hash _000011_hash = {
80395 + .next = NULL,
80396 + .name = "kmalloc",
80397 + .file = "include/linux/slub_def.h",
80398 + .param1 = 1,
80399 +};
80400 +struct size_overflow_hash _000012_hash = {
80401 + .next = NULL,
80402 + .name = "kmalloc_slab",
80403 + .file = "include/linux/slub_def.h",
80404 + .param1 = 1,
80405 +};
80406 +struct size_overflow_hash _000013_hash = {
80407 + .next = NULL,
80408 + .name = "kmemdup",
80409 + .file = "include/linux/string.h",
80410 + .param2 = 1,
80411 +};
80412 +struct size_overflow_hash _000014_hash = {
80413 + .next = NULL,
80414 + .name = "__krealloc",
80415 + .file = "include/linux/slab.h",
80416 + .param2 = 1,
80417 +};
80418 +struct size_overflow_hash _000015_hash = {
80419 + .next = NULL,
80420 + .name = "memdup_user",
80421 + .file = "include/linux/string.h",
80422 + .param2 = 1,
80423 +};
80424 +struct size_overflow_hash _000016_hash = {
80425 + .next = NULL,
80426 + .name = "module_alloc",
80427 + .file = "include/linux/moduleloader.h",
80428 + .param1 = 1,
80429 +};
80430 +struct size_overflow_hash _000017_hash = {
80431 + .next = NULL,
80432 + .name = "read_default_ldt",
80433 + .file = "arch/x86/kernel/ldt.c",
80434 + .param2 = 1,
80435 +};
80436 +struct size_overflow_hash _000018_hash = {
80437 + .next = NULL,
80438 + .name = "read_kcore",
80439 + .file = "fs/proc/kcore.c",
80440 + .param3 = 1,
80441 +};
80442 +struct size_overflow_hash _000019_hash = {
80443 + .next = NULL,
80444 + .name = "read_ldt",
80445 + .file = "arch/x86/kernel/ldt.c",
80446 + .param2 = 1,
80447 +};
80448 +struct size_overflow_hash _000020_hash = {
80449 + .next = NULL,
80450 + .name = "read_zero",
80451 + .file = "drivers/char/mem.c",
80452 + .param3 = 1,
80453 +};
80454 +struct size_overflow_hash _000021_hash = {
80455 + .next = NULL,
80456 + .name = "__vmalloc_node",
80457 + .file = "mm/vmalloc.c",
80458 + .param1 = 1,
80459 +};
80460 +struct size_overflow_hash _000022_hash = {
80461 + .next = NULL,
80462 + .name = "vm_map_ram",
80463 + .file = "include/linux/vmalloc.h",
80464 + .param2 = 1,
80465 +};
80466 +struct size_overflow_hash _000023_hash = {
80467 + .next = NULL,
80468 + .name = "aa_simple_write_to_buffer",
80469 + .file = "security/apparmor/apparmorfs.c",
80470 + .param4 = 1,
80471 +};
80472 +struct size_overflow_hash _000024_hash = {
80473 + .next = NULL,
80474 + .name = "ablkcipher_copy_iv",
80475 + .file = "crypto/ablkcipher.c",
80476 + .param3 = 1,
80477 +};
80478 +struct size_overflow_hash _000025_hash = {
80479 + .next = NULL,
80480 + .name = "ablkcipher_next_slow",
80481 + .file = "crypto/ablkcipher.c",
80482 + .param4 = 1,
80483 +};
80484 +struct size_overflow_hash _000026_hash = {
80485 + .next = NULL,
80486 + .name = "acpi_os_allocate",
80487 + .file = "include/acpi/platform/aclinux.h",
80488 + .param1 = 1,
80489 +};
80490 +struct size_overflow_hash _000027_hash = {
80491 + .next = NULL,
80492 + .name = "acpi_system_write_wakeup_device",
80493 + .file = "drivers/acpi/proc.c",
80494 + .param3 = 1,
80495 +};
80496 +struct size_overflow_hash _000028_hash = {
80497 + .next = NULL,
80498 + .name = "ahash_setkey_unaligned",
80499 + .file = "crypto/ahash.c",
80500 + .param3 = 1,
80501 +};
80502 +struct size_overflow_hash _000029_hash = {
80503 + .next = NULL,
80504 + .name = "alloc_fdmem",
80505 + .file = "fs/file.c",
80506 + .param1 = 1,
80507 +};
80508 +struct size_overflow_hash _000030_hash = {
80509 + .next = NULL,
80510 + .name = "audit_unpack_string",
80511 + .file = "kernel/auditfilter.c",
80512 + .param3 = 1,
80513 +};
80514 +struct size_overflow_hash _000031_hash = {
80515 + .next = NULL,
80516 + .name = "bio_alloc_map_data",
80517 + .file = "fs/bio.c",
80518 + .param2 = 1,
80519 +};
80520 +struct size_overflow_hash _000032_hash = {
80521 + .next = NULL,
80522 + .name = "bio_kmalloc",
80523 + .file = "include/linux/bio.h",
80524 + .param2 = 1,
80525 +};
80526 +struct size_overflow_hash _000033_hash = {
80527 + .next = NULL,
80528 + .name = "blkcipher_copy_iv",
80529 + .file = "crypto/blkcipher.c",
80530 + .param3 = 1,
80531 +};
80532 +struct size_overflow_hash _000034_hash = {
80533 + .next = NULL,
80534 + .name = "blkcipher_next_slow",
80535 + .file = "crypto/blkcipher.c",
80536 + .param4 = 1,
80537 +};
80538 +struct size_overflow_hash _000035_hash = {
80539 + .next = NULL,
80540 + .name = "cgroup_write_string",
80541 + .file = "kernel/cgroup.c",
80542 + .param5 = 1,
80543 +};
80544 +struct size_overflow_hash _000036_hash = {
80545 + .next = NULL,
80546 + .name = "cgroup_write_X64",
80547 + .file = "kernel/cgroup.c",
80548 + .param5 = 1,
80549 +};
80550 +struct size_overflow_hash _000037_hash = {
80551 + .next = NULL,
80552 + .name = "clear_refs_write",
80553 + .file = "fs/proc/task_mmu.c",
80554 + .param3 = 1,
80555 +};
80556 +struct size_overflow_hash _000038_hash = {
80557 + .next = NULL,
80558 + .name = "comm_write",
80559 + .file = "fs/proc/base.c",
80560 + .param3 = 1,
80561 +};
80562 +struct size_overflow_hash _000039_hash = {
80563 + .next = NULL,
80564 + .name = "copy_and_check",
80565 + .file = "kernel/module.c",
80566 + .param3 = 1,
80567 +};
80568 +struct size_overflow_hash _000040_hash = {
80569 + .next = NULL,
80570 + .name = "__copy_to_user",
80571 + .file = "arch/x86/include/asm/uaccess_32.h",
80572 + .param3 = 1,
80573 +};
80574 +struct size_overflow_hash _000041_hash = {
80575 + .next = NULL,
80576 + .name = "copy_vm86_regs_from_user",
80577 + .file = "arch/x86/kernel/vm86_32.c",
80578 + .param3 = 1,
80579 +};
80580 +struct size_overflow_hash _000042_hash = {
80581 + .next = NULL,
80582 + .name = "csum_partial_copy_fromiovecend",
80583 + .file = "include/linux/socket.h",
80584 + .param4 = 1,
80585 +};
80586 +struct size_overflow_hash _000043_hash = {
80587 + .next = NULL,
80588 + .name = "ddebug_proc_write",
80589 + .file = "lib/dynamic_debug.c",
80590 + .param3 = 1,
80591 +};
80592 +struct size_overflow_hash _000044_hash = {
80593 + .next = NULL,
80594 + .name = "devm_kzalloc",
80595 + .file = "include/linux/device.h",
80596 + .param2 = 1,
80597 +};
80598 +struct size_overflow_hash _000045_hash = {
80599 + .next = NULL,
80600 + .name = "devres_alloc",
80601 + .file = "include/linux/device.h",
80602 + .param2 = 1,
80603 +};
80604 +struct size_overflow_hash _000046_hash = {
80605 + .next = NULL,
80606 + .name = "do_ip_setsockopt",
80607 + .file = "net/ipv4/ip_sockglue.c",
80608 + .param5 = 1,
80609 +};
80610 +struct size_overflow_hash _000047_hash = {
80611 + .next = NULL,
80612 + .name = "do_kimage_alloc",
80613 + .file = "kernel/kexec.c",
80614 + .param3 = 1,
80615 +};
80616 +struct size_overflow_hash _000048_hash = {
80617 + .next = NULL,
80618 + .name = "do_tty_write",
80619 + .file = "drivers/tty/tty_io.c",
80620 + .param5 = 1,
80621 +};
80622 +struct size_overflow_hash _000049_hash = {
80623 + .next = NULL,
80624 + .name = "fanotify_write",
80625 + .file = "fs/notify/fanotify/fanotify_user.c",
80626 + .param3 = 1,
80627 +};
80628 +struct size_overflow_hash _000050_hash = {
80629 + .next = NULL,
80630 + .name = "file_read_actor",
80631 + .file = "include/linux/fs.h",
80632 + .param4 = 1,
80633 +};
80634 +struct size_overflow_hash _000051_hash = {
80635 + .next = NULL,
80636 + .name = "fill_write_buffer",
80637 + .file = "fs/sysfs/file.c",
80638 + .param3 = 1,
80639 +};
80640 +struct size_overflow_hash _000052_hash = {
80641 + .next = NULL,
80642 + .name = "get_user_cpu_mask",
80643 + .file = "kernel/sched/core.c",
80644 + .param2 = 1,
80645 +};
80646 +struct size_overflow_hash _000053_hash = {
80647 + .next = NULL,
80648 + .name = "hashtab_create",
80649 + .file = "security/selinux/ss/hashtab.c",
80650 + .param3 = 1,
80651 +};
80652 +struct size_overflow_hash _000054_hash = {
80653 + .next = NULL,
80654 + .name = "heap_init",
80655 + .file = "include/linux/prio_heap.h",
80656 + .param2 = 1,
80657 +};
80658 +struct size_overflow_hash _000055_hash = {
80659 + .next = NULL,
80660 + .name = "hest_ghes_dev_register",
80661 + .file = "drivers/acpi/apei/hest.c",
80662 + .param1 = 1,
80663 +};
80664 +struct size_overflow_hash _000056_hash = {
80665 + .next = NULL,
80666 + .name = "ima_write_policy",
80667 + .file = "security/integrity/ima/ima_fs.c",
80668 + .param3 = 1,
80669 +};
80670 +struct size_overflow_hash _000057_hash = {
80671 + .next = NULL,
80672 + .name = "input_ff_create",
80673 + .file = "include/linux/input.h",
80674 + .param2 = 1,
80675 +};
80676 +struct size_overflow_hash _000058_hash = {
80677 + .next = NULL,
80678 + .name = "input_mt_init_slots",
80679 + .file = "include/linux/input/mt.h",
80680 + .param2 = 1,
80681 +};
80682 +struct size_overflow_hash _000059_hash = {
80683 + .next = NULL,
80684 + .name = "iov_iter_copy_from_user",
80685 + .file = "include/linux/fs.h",
80686 + .param4 = 1,
80687 +};
80688 +struct size_overflow_hash _000060_hash = {
80689 + .next = NULL,
80690 + .name = "iov_iter_copy_from_user_atomic",
80691 + .file = "include/linux/fs.h",
80692 + .param4 = 1,
80693 +};
80694 +struct size_overflow_hash _000061_hash = {
80695 + .next = NULL,
80696 + .name = "keyctl_instantiate_key_common",
80697 + .file = "security/keys/keyctl.c",
80698 + .param4 = 1,
80699 +};
80700 +struct size_overflow_hash _000062_hash = {
80701 + .next = NULL,
80702 + .name = "keyctl_update_key",
80703 + .file = "security/keys/keyctl.c",
80704 + .param3 = 1,
80705 +};
80706 +struct size_overflow_hash _000063_hash = {
80707 + .next = NULL,
80708 + .name = "__kfifo_alloc",
80709 + .file = "include/linux/kfifo.h",
80710 + .param2 = 1,
80711 + .param3 = 1,
80712 +};
80713 +struct size_overflow_hash _000065_hash = {
80714 + .next = NULL,
80715 + .name = "kfifo_copy_from_user",
80716 + .file = "kernel/kfifo.c",
80717 + .param3 = 1,
80718 +};
80719 +struct size_overflow_hash _000066_hash = {
80720 + .next = NULL,
80721 + .name = "kmalloc_node",
80722 + .file = "include/linux/slab.h",
80723 + .param1 = 1,
80724 +};
80725 +struct size_overflow_hash _000067_hash = {
80726 + .next = NULL,
80727 + .name = "kmalloc_parameter",
80728 + .file = "kernel/params.c",
80729 + .param1 = 1,
80730 +};
80731 +struct size_overflow_hash _000068_hash = {
80732 + .next = NULL,
80733 + .name = "kobj_map",
80734 + .file = "include/linux/kobj_map.h",
80735 + .param2 = 1,
80736 + .param3 = 1,
80737 +};
80738 +struct size_overflow_hash _000070_hash = {
80739 + .next = NULL,
80740 + .name = "krealloc",
80741 + .file = "include/linux/slab.h",
80742 + .param2 = 1,
80743 +};
80744 +struct size_overflow_hash _000071_hash = {
80745 + .next = NULL,
80746 + .name = "kvmalloc",
80747 + .file = "security/apparmor/lib.c",
80748 + .param1 = 1,
80749 +};
80750 +struct size_overflow_hash _000072_hash = {
80751 + .next = NULL,
80752 + .name = "kzalloc",
80753 + .file = "include/linux/slab.h",
80754 + .param1 = 1,
80755 +};
80756 +struct size_overflow_hash _000073_hash = {
80757 + .next = NULL,
80758 + .name = "listxattr",
80759 + .file = "fs/xattr.c",
80760 + .param3 = 1,
80761 +};
80762 +struct size_overflow_hash _000074_hash = {
80763 + .next = NULL,
80764 + .name = "mempool_kmalloc",
80765 + .file = "include/linux/mempool.h",
80766 + .param2 = 1,
80767 +};
80768 +struct size_overflow_hash _000075_hash = {
80769 + .next = NULL,
80770 + .name = "mem_rw",
80771 + .file = "fs/proc/base.c",
80772 + .param3 = 1,
80773 +};
80774 +struct size_overflow_hash _000076_hash = {
80775 + .next = NULL,
80776 + .name = "module_alloc_update_bounds",
80777 + .file = "kernel/module.c",
80778 + .param1 = 1,
80779 +};
80780 +struct size_overflow_hash _000077_hash = {
80781 + .next = NULL,
80782 + .name = "mpi_alloc_limb_space",
80783 + .file = "lib/mpi/mpiutil.c",
80784 + .param1 = 1,
80785 +};
80786 +struct size_overflow_hash _000078_hash = {
80787 + .next = NULL,
80788 + .name = "mpi_resize",
80789 + .file = "include/linux/mpi.h",
80790 + .param2 = 1,
80791 +};
80792 +struct size_overflow_hash _000079_hash = {
80793 + .next = NULL,
80794 + .name = "mtrr_write",
80795 + .file = "arch/x86/kernel/cpu/mtrr/if.c",
80796 + .param3 = 1,
80797 +};
80798 +struct size_overflow_hash _000080_hash = {
80799 + .next = NULL,
80800 + .name = "oom_adjust_write",
80801 + .file = "fs/proc/base.c",
80802 + .param3 = 1,
80803 +};
80804 +struct size_overflow_hash _000081_hash = {
80805 + .next = NULL,
80806 + .name = "oom_score_adj_write",
80807 + .file = "fs/proc/base.c",
80808 + .param3 = 1,
80809 +};
80810 +struct size_overflow_hash _000082_hash = {
80811 + .next = NULL,
80812 + .name = "pipe_iov_copy_from_user",
80813 + .file = "fs/pipe.c",
80814 + .param3 = 1,
80815 +};
80816 +struct size_overflow_hash _000083_hash = {
80817 + .next = NULL,
80818 + .name = "pipe_iov_copy_to_user",
80819 + .file = "fs/pipe.c",
80820 + .param3 = 1,
80821 +};
80822 +struct size_overflow_hash _000084_hash = {
80823 + .next = NULL,
80824 + .name = "pipe_set_size",
80825 + .file = "fs/pipe.c",
80826 + .param2 = 1,
80827 +};
80828 +struct size_overflow_hash _000085_hash = {
80829 + .next = NULL,
80830 + .name = "platform_device_add_data",
80831 + .file = "include/linux/platform_device.h",
80832 + .param3 = 1,
80833 +};
80834 +struct size_overflow_hash _000086_hash = {
80835 + .next = NULL,
80836 + .name = "platform_device_add_resources",
80837 + .file = "include/linux/platform_device.h",
80838 + .param3 = 1,
80839 +};
80840 +struct size_overflow_hash _000087_hash = {
80841 + .next = NULL,
80842 + .name = "pm_qos_power_write",
80843 + .file = "kernel/power/qos.c",
80844 + .param3 = 1,
80845 +};
80846 +struct size_overflow_hash _000088_hash = {
80847 + .next = NULL,
80848 + .name = "pnpbios_proc_write",
80849 + .file = "drivers/pnp/pnpbios/proc.c",
80850 + .param3 = 1,
80851 +};
80852 +struct size_overflow_hash _000089_hash = {
80853 + .next = NULL,
80854 + .name = "__probe_kernel_read",
80855 + .file = "include/linux/uaccess.h",
80856 + .param3 = 1,
80857 +};
80858 +struct size_overflow_hash _000090_hash = {
80859 + .next = NULL,
80860 + .name = "__probe_kernel_write",
80861 + .file = "include/linux/uaccess.h",
80862 + .param3 = 1,
80863 +};
80864 +struct size_overflow_hash _000091_hash = {
80865 + .next = NULL,
80866 + .name = "proc_coredump_filter_write",
80867 + .file = "fs/proc/base.c",
80868 + .param3 = 1,
80869 +};
80870 +struct size_overflow_hash _000092_hash = {
80871 + .next = NULL,
80872 + .name = "process_vm_rw_pages",
80873 + .file = "mm/process_vm_access.c",
80874 + .param5 = 1,
80875 + .param6 = 1,
80876 +};
80877 +struct size_overflow_hash _000094_hash = {
80878 + .next = NULL,
80879 + .name = "proc_loginuid_write",
80880 + .file = "fs/proc/base.c",
80881 + .param3 = 1,
80882 +};
80883 +struct size_overflow_hash _000095_hash = {
80884 + .next = NULL,
80885 + .name = "proc_pid_attr_write",
80886 + .file = "fs/proc/base.c",
80887 + .param3 = 1,
80888 +};
80889 +struct size_overflow_hash _000096_hash = {
80890 + .next = NULL,
80891 + .name = "pstore_mkfile",
80892 + .file = "fs/pstore/inode.c",
80893 + .param5 = 1,
80894 +};
80895 +struct size_overflow_hash _000097_hash = {
80896 + .next = NULL,
80897 + .name = "qdisc_class_hash_alloc",
80898 + .file = "net/sched/sch_api.c",
80899 + .param1 = 1,
80900 +};
80901 +struct size_overflow_hash _000098_hash = {
80902 + .next = NULL,
80903 + .name = "read",
80904 + .file = "fs/sysfs/bin.c",
80905 + .param3 = 1,
80906 +};
80907 +struct size_overflow_hash _000099_hash = {
80908 + .next = NULL,
80909 + .name = "regmap_access_read_file",
80910 + .file = "drivers/base/regmap/regmap-debugfs.c",
80911 + .param3 = 1,
80912 +};
80913 +struct size_overflow_hash _000100_hash = {
80914 + .next = NULL,
80915 + .name = "regmap_map_read_file",
80916 + .file = "drivers/base/regmap/regmap-debugfs.c",
80917 + .param3 = 1,
80918 +};
80919 +struct size_overflow_hash _000101_hash = {
80920 + .next = NULL,
80921 + .name = "_regmap_raw_write",
80922 + .file = "drivers/base/regmap/regmap.c",
80923 + .param4 = 1,
80924 +};
80925 +struct size_overflow_hash _000102_hash = {
80926 + .next = NULL,
80927 + .name = "regset_tls_set",
80928 + .file = "arch/x86/kernel/tls.c",
80929 + .param4 = 1,
80930 +};
80931 +struct size_overflow_hash _000103_hash = {
80932 + .next = NULL,
80933 + .name = "request_key_auth_new",
80934 + .file = "security/keys/request_key_auth.c",
80935 + .param3 = 1,
80936 +};
80937 +struct size_overflow_hash _000104_hash = {
80938 + .next = NULL,
80939 + .name = "restore_i387_fxsave",
80940 + .file = "arch/x86/kernel/i387.c",
80941 + .param2 = 1,
80942 +};
80943 +struct size_overflow_hash _000105_hash = {
80944 + .next = NULL,
80945 + .name = "rngapi_reset",
80946 + .file = "crypto/rng.c",
80947 + .param3 = 1,
80948 +};
80949 +struct size_overflow_hash _000106_hash = {
80950 + .next = NULL,
80951 + .name = "rw_copy_check_uvector",
80952 + .file = "include/linux/fs.h",
80953 + .param3 = 1,
80954 +};
80955 +struct size_overflow_hash _000107_hash = {
80956 + .next = NULL,
80957 + .name = "sched_autogroup_write",
80958 + .file = "fs/proc/base.c",
80959 + .param3 = 1,
80960 +};
80961 +struct size_overflow_hash _000108_hash = {
80962 + .next = NULL,
80963 + .name = "security_context_to_sid_core",
80964 + .file = "security/selinux/ss/services.c",
80965 + .param2 = 1,
80966 +};
80967 +struct size_overflow_hash _000109_hash = {
80968 + .next = NULL,
80969 + .name = "sel_commit_bools_write",
80970 + .file = "security/selinux/selinuxfs.c",
80971 + .param3 = 1,
80972 +};
80973 +struct size_overflow_hash _000110_hash = {
80974 + .next = NULL,
80975 + .name = "sel_write_avc_cache_threshold",
80976 + .file = "security/selinux/selinuxfs.c",
80977 + .param3 = 1,
80978 +};
80979 +struct size_overflow_hash _000111_hash = {
80980 + .next = NULL,
80981 + .name = "sel_write_bool",
80982 + .file = "security/selinux/selinuxfs.c",
80983 + .param3 = 1,
80984 +};
80985 +struct size_overflow_hash _000112_hash = {
80986 + .next = NULL,
80987 + .name = "sel_write_checkreqprot",
80988 + .file = "security/selinux/selinuxfs.c",
80989 + .param3 = 1,
80990 +};
80991 +struct size_overflow_hash _000113_hash = {
80992 + .next = NULL,
80993 + .name = "sel_write_disable",
80994 + .file = "security/selinux/selinuxfs.c",
80995 + .param3 = 1,
80996 +};
80997 +struct size_overflow_hash _000114_hash = {
80998 + .next = NULL,
80999 + .name = "sel_write_enforce",
81000 + .file = "security/selinux/selinuxfs.c",
81001 + .param3 = 1,
81002 +};
81003 +struct size_overflow_hash _000115_hash = {
81004 + .next = NULL,
81005 + .name = "sel_write_load",
81006 + .file = "security/selinux/selinuxfs.c",
81007 + .param3 = 1,
81008 +};
81009 +struct size_overflow_hash _000116_hash = {
81010 + .next = NULL,
81011 + .name = "setkey_unaligned",
81012 + .file = "crypto/ablkcipher.c",
81013 + .param3 = 1,
81014 +};
81015 +struct size_overflow_hash _000117_hash = {
81016 + .next = NULL,
81017 + .name = "setkey_unaligned",
81018 + .file = "crypto/blkcipher.c",
81019 + .param3 = 1,
81020 +};
81021 +struct size_overflow_hash _000118_hash = {
81022 + .next = NULL,
81023 + .name = "setkey_unaligned",
81024 + .file = "crypto/aead.c",
81025 + .param3 = 1,
81026 +};
81027 +struct size_overflow_hash _000119_hash = {
81028 + .next = NULL,
81029 + .name = "setkey_unaligned",
81030 + .file = "crypto/cipher.c",
81031 + .param3 = 1,
81032 +};
81033 +struct size_overflow_hash _000120_hash = {
81034 + .next = NULL,
81035 + .name = "setxattr",
81036 + .file = "fs/xattr.c",
81037 + .param4 = 1,
81038 +};
81039 +struct size_overflow_hash _000121_hash = {
81040 + .next = NULL,
81041 + .name = "sg_kmalloc",
81042 + .file = "lib/scatterlist.c",
81043 + .param1 = 1,
81044 +};
81045 +struct size_overflow_hash _000122_hash = {
81046 + .next = NULL,
81047 + .name = "shash_setkey_unaligned",
81048 + .file = "crypto/shash.c",
81049 + .param3 = 1,
81050 +};
81051 +struct size_overflow_hash _000123_hash = {
81052 + .next = NULL,
81053 + .name = "shmem_xattr_set",
81054 + .file = "mm/shmem.c",
81055 + .param4 = 1,
81056 +};
81057 +struct size_overflow_hash _000124_hash = {
81058 + .next = NULL,
81059 + .name = "simple_transaction_get",
81060 + .file = "include/linux/fs.h",
81061 + .param3 = 1,
81062 +};
81063 +struct size_overflow_hash _000125_hash = {
81064 + .next = NULL,
81065 + .name = "simple_write_to_buffer",
81066 + .file = "include/linux/fs.h",
81067 + .param2 = 1,
81068 + .param5 = 1,
81069 +};
81070 +struct size_overflow_hash _000127_hash = {
81071 + .next = NULL,
81072 + .name = "smk_write_ambient",
81073 + .file = "security/smack/smackfs.c",
81074 + .param3 = 1,
81075 +};
81076 +struct size_overflow_hash _000128_hash = {
81077 + .next = NULL,
81078 + .name = "smk_write_cipso",
81079 + .file = "security/smack/smackfs.c",
81080 + .param3 = 1,
81081 +};
81082 +struct size_overflow_hash _000129_hash = {
81083 + .next = NULL,
81084 + .name = "smk_write_direct",
81085 + .file = "security/smack/smackfs.c",
81086 + .param3 = 1,
81087 +};
81088 +struct size_overflow_hash _000130_hash = {
81089 + .next = NULL,
81090 + .name = "smk_write_doi",
81091 + .file = "security/smack/smackfs.c",
81092 + .param3 = 1,
81093 +};
81094 +struct size_overflow_hash _000131_hash = {
81095 + .next = NULL,
81096 + .name = "smk_write_load_list",
81097 + .file = "security/smack/smackfs.c",
81098 + .param3 = 1,
81099 +};
81100 +struct size_overflow_hash _000132_hash = {
81101 + .next = &_000102_hash,
81102 + .name = "smk_write_logging",
81103 + .file = "security/smack/smackfs.c",
81104 + .param3 = 1,
81105 +};
81106 +struct size_overflow_hash _000133_hash = {
81107 + .next = NULL,
81108 + .name = "smk_write_netlbladdr",
81109 + .file = "security/smack/smackfs.c",
81110 + .param3 = 1,
81111 +};
81112 +struct size_overflow_hash _000134_hash = {
81113 + .next = NULL,
81114 + .name = "smk_write_onlycap",
81115 + .file = "security/smack/smackfs.c",
81116 + .param3 = 1,
81117 +};
81118 +struct size_overflow_hash _000135_hash = {
81119 + .next = NULL,
81120 + .name = "sys_add_key",
81121 + .file = "include/linux/syscalls.h",
81122 + .param4 = 1,
81123 +};
81124 +struct size_overflow_hash _000136_hash = {
81125 + .next = NULL,
81126 + .name = "sys_modify_ldt",
81127 + .file = "arch/x86/include/asm/syscalls.h",
81128 + .param3 = 1,
81129 +};
81130 +struct size_overflow_hash _000137_hash = {
81131 + .next = NULL,
81132 + .name = "sys_semtimedop",
81133 + .file = "include/linux/syscalls.h",
81134 + .param3 = 1,
81135 +};
81136 +struct size_overflow_hash _000138_hash = {
81137 + .next = NULL,
81138 + .name = "tomoyo_write_self",
81139 + .file = "security/tomoyo/securityfs_if.c",
81140 + .param3 = 1,
81141 +};
81142 +struct size_overflow_hash _000139_hash = {
81143 + .next = NULL,
81144 + .name = "tpm_write",
81145 + .file = "drivers/char/tpm/tpm.c",
81146 + .param3 = 1,
81147 +};
81148 +struct size_overflow_hash _000140_hash = {
81149 + .next = NULL,
81150 + .name = "tty_buffer_alloc",
81151 + .file = "drivers/tty/tty_buffer.c",
81152 + .param2 = 1,
81153 +};
81154 +struct size_overflow_hash _000141_hash = {
81155 + .next = NULL,
81156 + .name = "user_instantiate",
81157 + .file = "include/keys/user-type.h",
81158 + .param3 = 1,
81159 +};
81160 +struct size_overflow_hash _000142_hash = {
81161 + .next = NULL,
81162 + .name = "user_update",
81163 + .file = "include/keys/user-type.h",
81164 + .param3 = 1,
81165 +};
81166 +struct size_overflow_hash _000143_hash = {
81167 + .next = NULL,
81168 + .name = "vc_do_resize",
81169 + .file = "drivers/tty/vt/vt.c",
81170 + .param3 = 1,
81171 + .param4 = 1,
81172 +};
81173 +struct size_overflow_hash _000145_hash = {
81174 + .next = NULL,
81175 + .name = "vcs_write",
81176 + .file = "drivers/tty/vt/vc_screen.c",
81177 + .param3 = 1,
81178 +};
81179 +struct size_overflow_hash _000146_hash = {
81180 + .next = NULL,
81181 + .name = "vga_arb_write",
81182 + .file = "drivers/gpu/vga/vgaarb.c",
81183 + .param3 = 1,
81184 +};
81185 +struct size_overflow_hash _000147_hash = {
81186 + .next = NULL,
81187 + .name = "vga_switcheroo_debugfs_write",
81188 + .file = "drivers/gpu/vga/vga_switcheroo.c",
81189 + .param3 = 1,
81190 +};
81191 +struct size_overflow_hash _000148_hash = {
81192 + .next = NULL,
81193 + .name = "__vmalloc",
81194 + .file = "include/linux/vmalloc.h",
81195 + .param1 = 1,
81196 +};
81197 +struct size_overflow_hash _000149_hash = {
81198 + .next = NULL,
81199 + .name = "vmalloc_32",
81200 + .file = "include/linux/vmalloc.h",
81201 + .param1 = 1,
81202 +};
81203 +struct size_overflow_hash _000150_hash = {
81204 + .next = NULL,
81205 + .name = "vmalloc_32_user",
81206 + .file = "include/linux/vmalloc.h",
81207 + .param1 = 1,
81208 +};
81209 +struct size_overflow_hash _000151_hash = {
81210 + .next = NULL,
81211 + .name = "vmalloc_exec",
81212 + .file = "include/linux/vmalloc.h",
81213 + .param1 = 1,
81214 +};
81215 +struct size_overflow_hash _000152_hash = {
81216 + .next = NULL,
81217 + .name = "vmalloc_node",
81218 + .file = "include/linux/vmalloc.h",
81219 + .param1 = 1,
81220 +};
81221 +struct size_overflow_hash _000153_hash = {
81222 + .next = NULL,
81223 + .name = "__vmalloc_node_flags",
81224 + .file = "mm/vmalloc.c",
81225 + .param1 = 1,
81226 +};
81227 +struct size_overflow_hash _000154_hash = {
81228 + .next = NULL,
81229 + .name = "vmalloc_user",
81230 + .file = "include/linux/vmalloc.h",
81231 + .param1 = 1,
81232 +};
81233 +struct size_overflow_hash _000155_hash = {
81234 + .next = NULL,
81235 + .name = "write",
81236 + .file = "fs/sysfs/bin.c",
81237 + .param3 = 1,
81238 +};
81239 +struct size_overflow_hash _000156_hash = {
81240 + .next = NULL,
81241 + .name = "__xip_file_write",
81242 + .file = "mm/filemap_xip.c",
81243 + .param3 = 1,
81244 +};
81245 +struct size_overflow_hash _000157_hash = {
81246 + .next = NULL,
81247 + .name = "acpi_ex_allocate_name_string",
81248 + .file = "drivers/acpi/acpica/exnames.c",
81249 + .param2 = 1,
81250 +};
81251 +struct size_overflow_hash _000158_hash = {
81252 + .next = NULL,
81253 + .name = "acpi_os_allocate_zeroed",
81254 + .file = "include/acpi/platform/aclinux.h",
81255 + .param1 = 1,
81256 +};
81257 +struct size_overflow_hash _000159_hash = {
81258 + .next = NULL,
81259 + .name = "acpi_ut_initialize_buffer",
81260 + .file = "drivers/acpi/acpica/utalloc.c",
81261 + .param2 = 1,
81262 +};
81263 +struct size_overflow_hash _000160_hash = {
81264 + .next = NULL,
81265 + .name = "add_numbered_child",
81266 + .file = "drivers/mfd/twl-core.c",
81267 + .param5 = 1,
81268 +};
81269 +struct size_overflow_hash _000161_hash = {
81270 + .next = NULL,
81271 + .name = "___alloc_bootmem_nopanic",
81272 + .file = "mm/nobootmem.c",
81273 + .param1 = 1,
81274 +};
81275 +struct size_overflow_hash _000162_hash = {
81276 + .next = NULL,
81277 + .name = "alloc_large_system_hash",
81278 + .file = "include/linux/bootmem.h",
81279 + .param2 = 1,
81280 +};
81281 +struct size_overflow_hash _000163_hash = {
81282 + .next = NULL,
81283 + .name = "audit_init_entry",
81284 + .file = "kernel/auditfilter.c",
81285 + .param1 = 1,
81286 +};
81287 +struct size_overflow_hash _000164_hash = {
81288 + .next = NULL,
81289 + .name = "__bio_map_kern",
81290 + .file = "fs/bio.c",
81291 + .param2 = 1,
81292 + .param3 = 1,
81293 +};
81294 +struct size_overflow_hash _000166_hash = {
81295 + .next = NULL,
81296 + .name = "blk_register_region",
81297 + .file = "include/linux/genhd.h",
81298 + .param1 = 1,
81299 + .param2 = 1,
81300 +};
81301 +struct size_overflow_hash _000168_hash = {
81302 + .next = NULL,
81303 + .name = "cdev_add",
81304 + .file = "include/linux/cdev.h",
81305 + .param2 = 1,
81306 + .param3 = 1,
81307 +};
81308 +struct size_overflow_hash _000170_hash = {
81309 + .next = NULL,
81310 + .name = "copy_to_user",
81311 + .file = "arch/x86/include/asm/uaccess_32.h",
81312 + .param3 = 1,
81313 +};
81314 +struct size_overflow_hash _000171_hash = {
81315 + .next = NULL,
81316 + .name = "crypto_ahash_setkey",
81317 + .file = "include/crypto/hash.h",
81318 + .param3 = 1,
81319 +};
81320 +struct size_overflow_hash _000172_hash = {
81321 + .next = NULL,
81322 + .name = "crypto_alloc_instance2",
81323 + .file = "include/crypto/algapi.h",
81324 + .param3 = 1,
81325 +};
81326 +struct size_overflow_hash _000173_hash = {
81327 + .next = NULL,
81328 + .name = "crypto_shash_setkey",
81329 + .file = "include/crypto/hash.h",
81330 + .param3 = 1,
81331 +};
81332 +struct size_overflow_hash _000174_hash = {
81333 + .next = NULL,
81334 + .name = "dev_set_alias",
81335 + .file = "include/linux/netdevice.h",
81336 + .param3 = 1,
81337 +};
81338 +struct size_overflow_hash _000175_hash = {
81339 + .next = NULL,
81340 + .name = "do_readv_writev",
81341 + .file = "fs/read_write.c",
81342 + .param4 = 1,
81343 +};
81344 +struct size_overflow_hash _000176_hash = {
81345 + .next = NULL,
81346 + .name = "getxattr",
81347 + .file = "fs/xattr.c",
81348 + .param4 = 1,
81349 +};
81350 +struct size_overflow_hash _000177_hash = {
81351 + .next = NULL,
81352 + .name = "hugetlbfs_read_actor",
81353 + .file = "fs/hugetlbfs/inode.c",
81354 + .param2 = 1,
81355 + .param5 = 1,
81356 + .param4 = 1,
81357 +};
81358 +struct size_overflow_hash _000180_hash = {
81359 + .next = NULL,
81360 + .name = "keyctl_instantiate_key",
81361 + .file = "security/keys/keyctl.c",
81362 + .param3 = 1,
81363 +};
81364 +struct size_overflow_hash _000181_hash = {
81365 + .next = NULL,
81366 + .name = "keyctl_instantiate_key_iov",
81367 + .file = "security/keys/keyctl.c",
81368 + .param3 = 1,
81369 +};
81370 +struct size_overflow_hash _000182_hash = {
81371 + .next = NULL,
81372 + .name = "__kfifo_from_user",
81373 + .file = "include/linux/kfifo.h",
81374 + .param3 = 1,
81375 +};
81376 +struct size_overflow_hash _000183_hash = {
81377 + .next = NULL,
81378 + .name = "kimage_crash_alloc",
81379 + .file = "kernel/kexec.c",
81380 + .param3 = 1,
81381 +};
81382 +struct size_overflow_hash _000184_hash = {
81383 + .next = NULL,
81384 + .name = "kimage_normal_alloc",
81385 + .file = "kernel/kexec.c",
81386 + .param3 = 1,
81387 +};
81388 +struct size_overflow_hash _000185_hash = {
81389 + .next = NULL,
81390 + .name = "mpi_alloc",
81391 + .file = "include/linux/mpi.h",
81392 + .param1 = 1,
81393 +};
81394 +struct size_overflow_hash _000186_hash = {
81395 + .next = NULL,
81396 + .name = "mpi_set_bit",
81397 + .file = "include/linux/mpi.h",
81398 + .param2 = 1,
81399 +};
81400 +struct size_overflow_hash _000187_hash = {
81401 + .next = NULL,
81402 + .name = "mpi_set_highbit",
81403 + .file = "include/linux/mpi.h",
81404 + .param2 = 1,
81405 +};
81406 +struct size_overflow_hash _000188_hash = {
81407 + .next = NULL,
81408 + .name = "neigh_hash_alloc",
81409 + .file = "net/core/neighbour.c",
81410 + .param1 = 1,
81411 +};
81412 +struct size_overflow_hash _000189_hash = {
81413 + .next = NULL,
81414 + .name = "nl_pid_hash_zalloc",
81415 + .file = "net/netlink/af_netlink.c",
81416 + .param1 = 1,
81417 +};
81418 +struct size_overflow_hash _000190_hash = {
81419 + .next = NULL,
81420 + .name = "pci_add_cap_save_buffer",
81421 + .file = "drivers/pci/pci.c",
81422 + .param3 = 1,
81423 +};
81424 +struct size_overflow_hash _000191_hash = {
81425 + .next = NULL,
81426 + .name = "pcpu_mem_zalloc",
81427 + .file = "mm/percpu.c",
81428 + .param1 = 1,
81429 +};
81430 +struct size_overflow_hash _000192_hash = {
81431 + .next = NULL,
81432 + .name = "platform_create_bundle",
81433 + .file = "include/linux/platform_device.h",
81434 + .param4 = 1,
81435 + .param6 = 1,
81436 +};
81437 +struct size_overflow_hash _000194_hash = {
81438 + .next = NULL,
81439 + .name = "process_vm_rw",
81440 + .file = "mm/process_vm_access.c",
81441 + .param3 = 1,
81442 + .param5 = 1,
81443 +};
81444 +struct size_overflow_hash _000196_hash = {
81445 + .next = NULL,
81446 + .name = "process_vm_rw_single_vec",
81447 + .file = "mm/process_vm_access.c",
81448 + .param1 = 1,
81449 + .param2 = 1,
81450 +};
81451 +struct size_overflow_hash _000198_hash = {
81452 + .next = NULL,
81453 + .name = "profile_load",
81454 + .file = "security/apparmor/apparmorfs.c",
81455 + .param3 = 1,
81456 +};
81457 +struct size_overflow_hash _000199_hash = {
81458 + .next = NULL,
81459 + .name = "profile_remove",
81460 + .file = "security/apparmor/apparmorfs.c",
81461 + .param3 = 1,
81462 +};
81463 +struct size_overflow_hash _000200_hash = {
81464 + .next = NULL,
81465 + .name = "profile_replace",
81466 + .file = "security/apparmor/apparmorfs.c",
81467 + .param3 = 1,
81468 +};
81469 +struct size_overflow_hash _000201_hash = {
81470 + .next = NULL,
81471 + .name = "regcache_rbtree_insert_to_block",
81472 + .file = "drivers/base/regmap/regcache-rbtree.c",
81473 + .param5 = 1,
81474 +};
81475 +struct size_overflow_hash _000202_hash = {
81476 + .next = NULL,
81477 + .name = "regmap_raw_write",
81478 + .file = "include/linux/regmap.h",
81479 + .param4 = 1,
81480 +};
81481 +struct size_overflow_hash _000203_hash = {
81482 + .next = NULL,
81483 + .name = "relay_alloc_page_array",
81484 + .file = "kernel/relay.c",
81485 + .param1 = 1,
81486 +};
81487 +struct size_overflow_hash _000204_hash = {
81488 + .next = NULL,
81489 + .name = "RESIZE_IF_NEEDED",
81490 + .file = "lib/mpi/mpi-internal.h",
81491 + .param2 = 1,
81492 +};
81493 +struct size_overflow_hash _000205_hash = {
81494 + .next = NULL,
81495 + .name = "security_context_to_sid",
81496 + .file = "security/selinux/ss/services.c",
81497 + .param2 = 1,
81498 +};
81499 +struct size_overflow_hash _000206_hash = {
81500 + .next = NULL,
81501 + .name = "security_context_to_sid_default",
81502 + .file = "security/selinux/ss/services.c",
81503 + .param2 = 1,
81504 +};
81505 +struct size_overflow_hash _000207_hash = {
81506 + .next = NULL,
81507 + .name = "security_context_to_sid_force",
81508 + .file = "security/selinux/ss/services.c",
81509 + .param2 = 1,
81510 +};
81511 +struct size_overflow_hash _000208_hash = {
81512 + .next = NULL,
81513 + .name = "selinux_transaction_write",
81514 + .file = "security/selinux/selinuxfs.c",
81515 + .param3 = 1,
81516 +};
81517 +struct size_overflow_hash _000209_hash = {
81518 + .next = NULL,
81519 + .name = "sel_write_access",
81520 + .file = "security/selinux/selinuxfs.c",
81521 + .param3 = 1,
81522 +};
81523 +struct size_overflow_hash _000210_hash = {
81524 + .next = NULL,
81525 + .name = "sel_write_create",
81526 + .file = "security/selinux/selinuxfs.c",
81527 + .param3 = 1,
81528 +};
81529 +struct size_overflow_hash _000211_hash = {
81530 + .next = NULL,
81531 + .name = "sel_write_member",
81532 + .file = "security/selinux/selinuxfs.c",
81533 + .param3 = 1,
81534 +};
81535 +struct size_overflow_hash _000212_hash = {
81536 + .next = NULL,
81537 + .name = "sel_write_relabel",
81538 + .file = "security/selinux/selinuxfs.c",
81539 + .param3 = 1,
81540 +};
81541 +struct size_overflow_hash _000213_hash = {
81542 + .next = NULL,
81543 + .name = "sel_write_user",
81544 + .file = "security/selinux/selinuxfs.c",
81545 + .param3 = 1,
81546 +};
81547 +struct size_overflow_hash _000214_hash = {
81548 + .next = NULL,
81549 + .name = "setkey",
81550 + .file = "crypto/cipher.c",
81551 + .param3 = 1,
81552 +};
81553 +struct size_overflow_hash _000215_hash = {
81554 + .next = NULL,
81555 + .name = "setkey",
81556 + .file = "crypto/ablkcipher.c",
81557 + .param3 = 1,
81558 +};
81559 +struct size_overflow_hash _000216_hash = {
81560 + .next = NULL,
81561 + .name = "setkey",
81562 + .file = "crypto/aead.c",
81563 + .param3 = 1,
81564 +};
81565 +struct size_overflow_hash _000217_hash = {
81566 + .next = NULL,
81567 + .name = "setkey",
81568 + .file = "crypto/blkcipher.c",
81569 + .param3 = 1,
81570 +};
81571 +struct size_overflow_hash _000218_hash = {
81572 + .next = NULL,
81573 + .name = "smk_write_access",
81574 + .file = "security/smack/smackfs.c",
81575 + .param3 = 1,
81576 +};
81577 +struct size_overflow_hash _000219_hash = {
81578 + .next = NULL,
81579 + .name = "snapshot_write",
81580 + .file = "kernel/power/user.c",
81581 + .param3 = 1,
81582 +};
81583 +struct size_overflow_hash _000220_hash = {
81584 + .next = NULL,
81585 + .name = "spi_alloc_master",
81586 + .file = "include/linux/spi/spi.h",
81587 + .param2 = 1,
81588 +};
81589 +struct size_overflow_hash _000221_hash = {
81590 + .next = NULL,
81591 + .name = "spi_register_board_info",
81592 + .file = "include/linux/spi/spi.h",
81593 + .param2 = 1,
81594 +};
81595 +struct size_overflow_hash _000222_hash = {
81596 + .next = NULL,
81597 + .name = "sys_flistxattr",
81598 + .file = "include/linux/syscalls.h",
81599 + .param3 = 1,
81600 +};
81601 +struct size_overflow_hash _000223_hash = {
81602 + .next = NULL,
81603 + .name = "sys_fsetxattr",
81604 + .file = "include/linux/syscalls.h",
81605 + .param4 = 1,
81606 +};
81607 +struct size_overflow_hash _000224_hash = {
81608 + .next = NULL,
81609 + .name = "sysfs_write_file",
81610 + .file = "fs/sysfs/file.c",
81611 + .param3 = 1,
81612 +};
81613 +struct size_overflow_hash _000225_hash = {
81614 + .next = NULL,
81615 + .name = "sys_ipc",
81616 + .file = "include/linux/syscalls.h",
81617 + .param3 = 1,
81618 +};
81619 +struct size_overflow_hash _000226_hash = {
81620 + .next = NULL,
81621 + .name = "sys_keyctl",
81622 + .file = "include/linux/syscalls.h",
81623 + .param4 = 1,
81624 +};
81625 +struct size_overflow_hash _000227_hash = {
81626 + .next = NULL,
81627 + .name = "sys_listxattr",
81628 + .file = "include/linux/syscalls.h",
81629 + .param3 = 1,
81630 +};
81631 +struct size_overflow_hash _000228_hash = {
81632 + .next = NULL,
81633 + .name = "sys_llistxattr",
81634 + .file = "include/linux/syscalls.h",
81635 + .param3 = 1,
81636 +};
81637 +struct size_overflow_hash _000229_hash = {
81638 + .next = NULL,
81639 + .name = "sys_lsetxattr",
81640 + .file = "include/linux/syscalls.h",
81641 + .param4 = 1,
81642 +};
81643 +struct size_overflow_hash _000230_hash = {
81644 + .next = NULL,
81645 + .name = "sys_sched_setaffinity",
81646 + .file = "include/linux/syscalls.h",
81647 + .param2 = 1,
81648 +};
81649 +struct size_overflow_hash _000231_hash = {
81650 + .next = NULL,
81651 + .name = "sys_semop",
81652 + .file = "include/linux/syscalls.h",
81653 + .param3 = 1,
81654 +};
81655 +struct size_overflow_hash _000232_hash = {
81656 + .next = NULL,
81657 + .name = "sys_setxattr",
81658 + .file = "include/linux/syscalls.h",
81659 + .param4 = 1,
81660 +};
81661 +struct size_overflow_hash _000233_hash = {
81662 + .next = NULL,
81663 + .name = "tnode_alloc",
81664 + .file = "net/ipv4/fib_trie.c",
81665 + .param1 = 1,
81666 +};
81667 +struct size_overflow_hash _000234_hash = {
81668 + .next = NULL,
81669 + .name = "tomoyo_commit_ok",
81670 + .file = "security/tomoyo/memory.c",
81671 + .param2 = 1,
81672 +};
81673 +struct size_overflow_hash _000235_hash = {
81674 + .next = NULL,
81675 + .name = "tomoyo_scan_bprm",
81676 + .file = "security/tomoyo/condition.c",
81677 + .param2 = 1,
81678 + .param4 = 1,
81679 +};
81680 +struct size_overflow_hash _000237_hash = {
81681 + .next = NULL,
81682 + .name = "tty_write",
81683 + .file = "drivers/tty/tty_io.c",
81684 + .param3 = 1,
81685 +};
81686 +struct size_overflow_hash _000238_hash = {
81687 + .next = NULL,
81688 + .name = "vc_resize",
81689 + .file = "include/linux/vt_kern.h",
81690 + .param2 = 1,
81691 + .param3 = 1,
81692 +};
81693 +struct size_overflow_hash _000240_hash = {
81694 + .next = NULL,
81695 + .name = "vmalloc",
81696 + .file = "include/linux/vmalloc.h",
81697 + .param1 = 1,
81698 +};
81699 +struct size_overflow_hash _000241_hash = {
81700 + .next = NULL,
81701 + .name = "vzalloc",
81702 + .file = "include/linux/vmalloc.h",
81703 + .param1 = 1,
81704 +};
81705 +struct size_overflow_hash _000242_hash = {
81706 + .next = NULL,
81707 + .name = "vzalloc_node",
81708 + .file = "include/linux/vmalloc.h",
81709 + .param1 = 1,
81710 +};
81711 +struct size_overflow_hash _000243_hash = {
81712 + .next = NULL,
81713 + .name = "xfrm_hash_alloc",
81714 + .file = "net/xfrm/xfrm_hash.c",
81715 + .param1 = 1,
81716 +};
81717 +struct size_overflow_hash _000244_hash = {
81718 + .next = NULL,
81719 + .name = "acpi_ds_build_internal_package_obj",
81720 + .file = "drivers/acpi/acpica/dsobject.c",
81721 + .param3 = 1,
81722 +};
81723 +struct size_overflow_hash _000245_hash = {
81724 + .next = NULL,
81725 + .name = "acpi_system_read_event",
81726 + .file = "drivers/acpi/event.c",
81727 + .param3 = 1,
81728 +};
81729 +struct size_overflow_hash _000246_hash = {
81730 + .next = NULL,
81731 + .name = "acpi_ut_create_buffer_object",
81732 + .file = "drivers/acpi/acpica/utobject.c",
81733 + .param1 = 1,
81734 +};
81735 +struct size_overflow_hash _000247_hash = {
81736 + .next = NULL,
81737 + .name = "acpi_ut_create_package_object",
81738 + .file = "drivers/acpi/acpica/utobject.c",
81739 + .param1 = 1,
81740 +};
81741 +struct size_overflow_hash _000248_hash = {
81742 + .next = NULL,
81743 + .name = "acpi_ut_create_string_object",
81744 + .file = "drivers/acpi/acpica/utobject.c",
81745 + .param1 = 1,
81746 +};
81747 +struct size_overflow_hash _000249_hash = {
81748 + .next = NULL,
81749 + .name = "add_child",
81750 + .file = "drivers/mfd/twl-core.c",
81751 + .param4 = 1,
81752 +};
81753 +struct size_overflow_hash _000250_hash = {
81754 + .next = NULL,
81755 + .name = "___alloc_bootmem",
81756 + .file = "mm/nobootmem.c",
81757 + .param1 = 1,
81758 +};
81759 +struct size_overflow_hash _000251_hash = {
81760 + .next = NULL,
81761 + .name = "__alloc_bootmem_nopanic",
81762 + .file = "include/linux/bootmem.h",
81763 + .param1 = 1,
81764 +};
81765 +struct size_overflow_hash _000252_hash = {
81766 + .next = NULL,
81767 + .name = "async_setkey",
81768 + .file = "crypto/blkcipher.c",
81769 + .param3 = 1,
81770 +};
81771 +struct size_overflow_hash _000253_hash = {
81772 + .next = NULL,
81773 + .name = "bio_map_kern",
81774 + .file = "include/linux/bio.h",
81775 + .param3 = 1,
81776 +};
81777 +struct size_overflow_hash _000254_hash = {
81778 + .next = NULL,
81779 + .name = "copy_oldmem_page",
81780 + .file = "include/linux/crash_dump.h",
81781 + .param3 = 1,
81782 +};
81783 +struct size_overflow_hash _000255_hash = {
81784 + .next = NULL,
81785 + .name = "do_sigpending",
81786 + .file = "include/linux/signal.h",
81787 + .param2 = 1,
81788 +};
81789 +struct size_overflow_hash _000257_hash = {
81790 + .next = NULL,
81791 + .name = "keyctl_describe_key",
81792 + .file = "security/keys/keyctl.c",
81793 + .param3 = 1,
81794 +};
81795 +struct size_overflow_hash _000258_hash = {
81796 + .next = NULL,
81797 + .name = "keyctl_get_security",
81798 + .file = "security/keys/keyctl.c",
81799 + .param3 = 1,
81800 +};
81801 +struct size_overflow_hash _000259_hash = {
81802 + .next = NULL,
81803 + .name = "keyring_read",
81804 + .file = "security/keys/keyring.c",
81805 + .param3 = 1,
81806 +};
81807 +struct size_overflow_hash _000260_hash = {
81808 + .next = NULL,
81809 + .name = "kfifo_copy_to_user",
81810 + .file = "kernel/kfifo.c",
81811 + .param3 = 1,
81812 +};
81813 +struct size_overflow_hash _000261_hash = {
81814 + .next = NULL,
81815 + .name = "mousedev_read",
81816 + .file = "drivers/input/mousedev.c",
81817 + .param3 = 1,
81818 +};
81819 +struct size_overflow_hash _000262_hash = {
81820 + .next = NULL,
81821 + .name = "mpi_lshift_limbs",
81822 + .file = "lib/mpi/mpi-bit.c",
81823 + .param2 = 1,
81824 +};
81825 +struct size_overflow_hash _000263_hash = {
81826 + .next = NULL,
81827 + .name = "neigh_hash_grow",
81828 + .file = "net/core/neighbour.c",
81829 + .param2 = 1,
81830 +};
81831 +struct size_overflow_hash _000264_hash = {
81832 + .next = NULL,
81833 + .name = "posix_clock_register",
81834 + .file = "include/linux/posix-clock.h",
81835 + .param2 = 1,
81836 +};
81837 +struct size_overflow_hash _000265_hash = {
81838 + .next = NULL,
81839 + .name = "__proc_file_read",
81840 + .file = "fs/proc/generic.c",
81841 + .param3 = 1,
81842 +};
81843 +struct size_overflow_hash _000266_hash = {
81844 + .next = NULL,
81845 + .name = "read_profile",
81846 + .file = "kernel/profile.c",
81847 + .param3 = 1,
81848 +};
81849 +struct size_overflow_hash _000267_hash = {
81850 + .next = NULL,
81851 + .name = "read_vmcore",
81852 + .file = "fs/proc/vmcore.c",
81853 + .param3 = 1,
81854 +};
81855 +struct size_overflow_hash _000268_hash = {
81856 + .next = NULL,
81857 + .name = "redirected_tty_write",
81858 + .file = "drivers/tty/tty_io.c",
81859 + .param3 = 1,
81860 +};
81861 +struct size_overflow_hash _000269_hash = {
81862 + .next = NULL,
81863 + .name = "__register_chrdev",
81864 + .file = "include/linux/fs.h",
81865 + .param2 = 1,
81866 + .param3 = 1,
81867 +};
81868 +struct size_overflow_hash _000271_hash = {
81869 + .next = NULL,
81870 + .name = "request_key_auth_read",
81871 + .file = "security/keys/request_key_auth.c",
81872 + .param3 = 1,
81873 +};
81874 +struct size_overflow_hash _000272_hash = {
81875 + .next = NULL,
81876 + .name = "shash_async_setkey",
81877 + .file = "crypto/shash.c",
81878 + .param3 = 1,
81879 +};
81880 +struct size_overflow_hash _000273_hash = {
81881 + .next = NULL,
81882 + .name = "shash_compat_setkey",
81883 + .file = "crypto/shash.c",
81884 + .param3 = 1,
81885 +};
81886 +struct size_overflow_hash _000274_hash = {
81887 + .next = NULL,
81888 + .name = "simple_read_from_buffer",
81889 + .file = "include/linux/fs.h",
81890 + .param2 = 1,
81891 + .param5 = 1,
81892 +};
81893 +struct size_overflow_hash _000276_hash = {
81894 + .next = NULL,
81895 + .name = "store_ifalias",
81896 + .file = "net/core/net-sysfs.c",
81897 + .param4 = 1,
81898 +};
81899 +struct size_overflow_hash _000277_hash = {
81900 + .next = NULL,
81901 + .name = "subbuf_read_actor",
81902 + .file = "kernel/relay.c",
81903 + .param3 = 1,
81904 +};
81905 +struct size_overflow_hash _000278_hash = {
81906 + .next = NULL,
81907 + .name = "sys_fgetxattr",
81908 + .file = "include/linux/syscalls.h",
81909 + .param4 = 1,
81910 +};
81911 +struct size_overflow_hash _000279_hash = {
81912 + .next = NULL,
81913 + .name = "sys_getxattr",
81914 + .file = "include/linux/syscalls.h",
81915 + .param4 = 1,
81916 +};
81917 +struct size_overflow_hash _000280_hash = {
81918 + .next = NULL,
81919 + .name = "sys_kexec_load",
81920 + .file = "include/linux/syscalls.h",
81921 + .param2 = 1,
81922 +};
81923 +struct size_overflow_hash _000281_hash = {
81924 + .next = NULL,
81925 + .name = "sys_lgetxattr",
81926 + .file = "include/linux/syscalls.h",
81927 + .param4 = 1,
81928 +};
81929 +struct size_overflow_hash _000282_hash = {
81930 + .next = NULL,
81931 + .name = "sys_process_vm_readv",
81932 + .file = "include/linux/syscalls.h",
81933 + .param3 = 1,
81934 + .param5 = 1,
81935 +};
81936 +struct size_overflow_hash _000284_hash = {
81937 + .next = NULL,
81938 + .name = "sys_process_vm_writev",
81939 + .file = "include/linux/syscalls.h",
81940 + .param3 = 1,
81941 + .param5 = 1,
81942 +};
81943 +struct size_overflow_hash _000286_hash = {
81944 + .next = NULL,
81945 + .name = "sys_sched_getaffinity",
81946 + .file = "include/linux/syscalls.h",
81947 + .param2 = 1,
81948 +};
81949 +struct size_overflow_hash _000287_hash = {
81950 + .next = NULL,
81951 + .name = "tomoyo_read_self",
81952 + .file = "security/tomoyo/securityfs_if.c",
81953 + .param3 = 1,
81954 +};
81955 +struct size_overflow_hash _000288_hash = {
81956 + .next = NULL,
81957 + .name = "tpm_read",
81958 + .file = "drivers/char/tpm/tpm.c",
81959 + .param3 = 1,
81960 +};
81961 +struct size_overflow_hash _000289_hash = {
81962 + .next = NULL,
81963 + .name = "user_read",
81964 + .file = "include/keys/user-type.h",
81965 + .param3 = 1,
81966 +};
81967 +struct size_overflow_hash _000290_hash = {
81968 + .next = NULL,
81969 + .name = "vcs_read",
81970 + .file = "drivers/tty/vt/vc_screen.c",
81971 + .param3 = 1,
81972 +};
81973 +struct size_overflow_hash _000291_hash = {
81974 + .next = NULL,
81975 + .name = "vfs_readv",
81976 + .file = "include/linux/fs.h",
81977 + .param3 = 1,
81978 +};
81979 +struct size_overflow_hash _000292_hash = {
81980 + .next = NULL,
81981 + .name = "vfs_writev",
81982 + .file = "include/linux/fs.h",
81983 + .param3 = 1,
81984 +};
81985 +struct size_overflow_hash _000293_hash = {
81986 + .next = NULL,
81987 + .name = "vga_arb_read",
81988 + .file = "drivers/gpu/vga/vgaarb.c",
81989 + .param3 = 1,
81990 +};
81991 +struct size_overflow_hash _000294_hash = {
81992 + .next = NULL,
81993 + .name = "xz_dec_lzma2_create",
81994 + .file = "lib/xz/xz_dec_lzma2.c",
81995 + .param2 = 1,
81996 +};
81997 +struct size_overflow_hash _000295_hash = {
81998 + .next = NULL,
81999 + .name = "aat2870_reg_read_file",
82000 + .file = "drivers/mfd/aat2870-core.c",
82001 + .param3 = 1,
82002 +};
82003 +struct size_overflow_hash _000296_hash = {
82004 + .next = NULL,
82005 + .name = "__alloc_bootmem",
82006 + .file = "include/linux/bootmem.h",
82007 + .param1 = 1,
82008 +};
82009 +struct size_overflow_hash _000297_hash = {
82010 + .next = NULL,
82011 + .name = "__alloc_bootmem_low",
82012 + .file = "include/linux/bootmem.h",
82013 + .param1 = 1,
82014 +};
82015 +struct size_overflow_hash _000298_hash = {
82016 + .next = NULL,
82017 + .name = "__alloc_bootmem_node_nopanic",
82018 + .file = "include/linux/bootmem.h",
82019 + .param2 = 1,
82020 +};
82021 +struct size_overflow_hash _000299_hash = {
82022 + .next = NULL,
82023 + .name = "blk_rq_map_kern",
82024 + .file = "include/linux/blkdev.h",
82025 + .param4 = 1,
82026 +};
82027 +struct size_overflow_hash _000300_hash = {
82028 + .next = NULL,
82029 + .name = "cgroup_read_s64",
82030 + .file = "kernel/cgroup.c",
82031 + .param5 = 1,
82032 +};
82033 +struct size_overflow_hash _000301_hash = {
82034 + .next = NULL,
82035 + .name = "cgroup_read_u64",
82036 + .file = "kernel/cgroup.c",
82037 + .param5 = 1,
82038 +};
82039 +struct size_overflow_hash _000302_hash = {
82040 + .next = NULL,
82041 + .name = "cpuset_common_file_read",
82042 + .file = "kernel/cpuset.c",
82043 + .param5 = 1,
82044 +};
82045 +struct size_overflow_hash _000303_hash = {
82046 + .next = NULL,
82047 + .name = "filter_read",
82048 + .file = "lib/dma-debug.c",
82049 + .param3 = 1,
82050 +};
82051 +struct size_overflow_hash _000304_hash = {
82052 + .next = NULL,
82053 + .name = "ima_show_htable_value",
82054 + .file = "security/integrity/ima/ima_fs.c",
82055 + .param2 = 1,
82056 +};
82057 +struct size_overflow_hash _000305_hash = {
82058 + .next = NULL,
82059 + .name = "kernel_readv",
82060 + .file = "fs/splice.c",
82061 + .param3 = 1,
82062 +};
82063 +struct size_overflow_hash _000306_hash = {
82064 + .next = NULL,
82065 + .name = "__kfifo_to_user",
82066 + .file = "include/linux/kfifo.h",
82067 + .param3 = 1,
82068 +};
82069 +struct size_overflow_hash _000307_hash = {
82070 + .next = NULL,
82071 + .name = "__kfifo_to_user_r",
82072 + .file = "include/linux/kfifo.h",
82073 + .param3 = 1,
82074 +};
82075 +struct size_overflow_hash _000308_hash = {
82076 + .next = NULL,
82077 + .name = "mqueue_read_file",
82078 + .file = "ipc/mqueue.c",
82079 + .param3 = 1,
82080 +};
82081 +struct size_overflow_hash _000309_hash = {
82082 + .next = NULL,
82083 + .name = "oom_adjust_read",
82084 + .file = "fs/proc/base.c",
82085 + .param3 = 1,
82086 +};
82087 +struct size_overflow_hash _000310_hash = {
82088 + .next = NULL,
82089 + .name = "oom_score_adj_read",
82090 + .file = "fs/proc/base.c",
82091 + .param3 = 1,
82092 +};
82093 +struct size_overflow_hash _000311_hash = {
82094 + .next = NULL,
82095 + .name = "pm_qos_power_read",
82096 + .file = "kernel/power/qos.c",
82097 + .param3 = 1,
82098 +};
82099 +struct size_overflow_hash _000312_hash = {
82100 + .next = NULL,
82101 + .name = "proc_coredump_filter_read",
82102 + .file = "fs/proc/base.c",
82103 + .param3 = 1,
82104 +};
82105 +struct size_overflow_hash _000313_hash = {
82106 + .next = NULL,
82107 + .name = "proc_fdinfo_read",
82108 + .file = "fs/proc/base.c",
82109 + .param3 = 1,
82110 +};
82111 +struct size_overflow_hash _000314_hash = {
82112 + .next = NULL,
82113 + .name = "proc_info_read",
82114 + .file = "fs/proc/base.c",
82115 + .param3 = 1,
82116 +};
82117 +struct size_overflow_hash _000315_hash = {
82118 + .next = NULL,
82119 + .name = "proc_loginuid_read",
82120 + .file = "fs/proc/base.c",
82121 + .param3 = 1,
82122 +};
82123 +struct size_overflow_hash _000316_hash = {
82124 + .next = NULL,
82125 + .name = "proc_pid_attr_read",
82126 + .file = "fs/proc/base.c",
82127 + .param3 = 1,
82128 +};
82129 +struct size_overflow_hash _000317_hash = {
82130 + .next = NULL,
82131 + .name = "proc_sessionid_read",
82132 + .file = "fs/proc/base.c",
82133 + .param3 = 1,
82134 +};
82135 +struct size_overflow_hash _000318_hash = {
82136 + .next = NULL,
82137 + .name = "pstore_file_read",
82138 + .file = "fs/pstore/inode.c",
82139 + .param3 = 1,
82140 +};
82141 +struct size_overflow_hash _000319_hash = {
82142 + .next = NULL,
82143 + .name = "read_enabled_file_bool",
82144 + .file = "kernel/kprobes.c",
82145 + .param3 = 1,
82146 +};
82147 +struct size_overflow_hash _000320_hash = {
82148 + .next = NULL,
82149 + .name = "read_file_blob",
82150 + .file = "fs/debugfs/file.c",
82151 + .param3 = 1,
82152 +};
82153 +struct size_overflow_hash _000321_hash = {
82154 + .next = NULL,
82155 + .name = "read_file_bool",
82156 + .file = "fs/debugfs/file.c",
82157 + .param3 = 1,
82158 +};
82159 +struct size_overflow_hash _000322_hash = {
82160 + .next = NULL,
82161 + .name = "read_from_oldmem",
82162 + .file = "fs/proc/vmcore.c",
82163 + .param2 = 1,
82164 +};
82165 +struct size_overflow_hash _000323_hash = {
82166 + .next = NULL,
82167 + .name = "read_oldmem",
82168 + .file = "drivers/char/mem.c",
82169 + .param3 = 1,
82170 +};
82171 +struct size_overflow_hash _000324_hash = {
82172 + .next = NULL,
82173 + .name = "res_counter_read",
82174 + .file = "include/linux/res_counter.h",
82175 + .param4 = 1,
82176 +};
82177 +struct size_overflow_hash _000325_hash = {
82178 + .next = NULL,
82179 + .name = "sel_read_avc_cache_threshold",
82180 + .file = "security/selinux/selinuxfs.c",
82181 + .param3 = 1,
82182 +};
82183 +struct size_overflow_hash _000326_hash = {
82184 + .next = NULL,
82185 + .name = "sel_read_avc_hash_stats",
82186 + .file = "security/selinux/selinuxfs.c",
82187 + .param3 = 1,
82188 +};
82189 +struct size_overflow_hash _000327_hash = {
82190 + .next = NULL,
82191 + .name = "sel_read_bool",
82192 + .file = "security/selinux/selinuxfs.c",
82193 + .param3 = 1,
82194 +};
82195 +struct size_overflow_hash _000328_hash = {
82196 + .next = NULL,
82197 + .name = "sel_read_checkreqprot",
82198 + .file = "security/selinux/selinuxfs.c",
82199 + .param3 = 1,
82200 +};
82201 +struct size_overflow_hash _000329_hash = {
82202 + .next = NULL,
82203 + .name = "sel_read_class",
82204 + .file = "security/selinux/selinuxfs.c",
82205 + .param3 = 1,
82206 +};
82207 +struct size_overflow_hash _000330_hash = {
82208 + .next = NULL,
82209 + .name = "sel_read_enforce",
82210 + .file = "security/selinux/selinuxfs.c",
82211 + .param3 = 1,
82212 +};
82213 +struct size_overflow_hash _000331_hash = {
82214 + .next = NULL,
82215 + .name = "sel_read_handle_status",
82216 + .file = "security/selinux/selinuxfs.c",
82217 + .param3 = 1,
82218 +};
82219 +struct size_overflow_hash _000332_hash = {
82220 + .next = NULL,
82221 + .name = "sel_read_handle_unknown",
82222 + .file = "security/selinux/selinuxfs.c",
82223 + .param3 = 1,
82224 +};
82225 +struct size_overflow_hash _000333_hash = {
82226 + .next = NULL,
82227 + .name = "sel_read_initcon",
82228 + .file = "security/selinux/selinuxfs.c",
82229 + .param3 = 1,
82230 +};
82231 +struct size_overflow_hash _000334_hash = {
82232 + .next = NULL,
82233 + .name = "sel_read_mls",
82234 + .file = "security/selinux/selinuxfs.c",
82235 + .param3 = 1,
82236 +};
82237 +struct size_overflow_hash _000335_hash = {
82238 + .next = NULL,
82239 + .name = "sel_read_perm",
82240 + .file = "security/selinux/selinuxfs.c",
82241 + .param3 = 1,
82242 +};
82243 +struct size_overflow_hash _000336_hash = {
82244 + .next = NULL,
82245 + .name = "sel_read_policy",
82246 + .file = "security/selinux/selinuxfs.c",
82247 + .param3 = 1,
82248 +};
82249 +struct size_overflow_hash _000337_hash = {
82250 + .next = NULL,
82251 + .name = "sel_read_policycap",
82252 + .file = "security/selinux/selinuxfs.c",
82253 + .param3 = 1,
82254 +};
82255 +struct size_overflow_hash _000338_hash = {
82256 + .next = NULL,
82257 + .name = "sel_read_policyvers",
82258 + .file = "security/selinux/selinuxfs.c",
82259 + .param3 = 1,
82260 +};
82261 +struct size_overflow_hash _000339_hash = {
82262 + .next = NULL,
82263 + .name = "simple_attr_read",
82264 + .file = "include/linux/fs.h",
82265 + .param3 = 1,
82266 +};
82267 +struct size_overflow_hash _000340_hash = {
82268 + .next = NULL,
82269 + .name = "simple_transaction_read",
82270 + .file = "include/linux/fs.h",
82271 + .param3 = 1,
82272 +};
82273 +struct size_overflow_hash _000341_hash = {
82274 + .next = NULL,
82275 + .name = "smk_read_ambient",
82276 + .file = "security/smack/smackfs.c",
82277 + .param3 = 1,
82278 +};
82279 +struct size_overflow_hash _000342_hash = {
82280 + .next = NULL,
82281 + .name = "smk_read_direct",
82282 + .file = "security/smack/smackfs.c",
82283 + .param3 = 1,
82284 +};
82285 +struct size_overflow_hash _000343_hash = {
82286 + .next = NULL,
82287 + .name = "smk_read_doi",
82288 + .file = "security/smack/smackfs.c",
82289 + .param3 = 1,
82290 +};
82291 +struct size_overflow_hash _000344_hash = {
82292 + .next = NULL,
82293 + .name = "smk_read_logging",
82294 + .file = "security/smack/smackfs.c",
82295 + .param3 = 1,
82296 +};
82297 +struct size_overflow_hash _000345_hash = {
82298 + .next = NULL,
82299 + .name = "smk_read_onlycap",
82300 + .file = "security/smack/smackfs.c",
82301 + .param3 = 1,
82302 +};
82303 +struct size_overflow_hash _000346_hash = {
82304 + .next = NULL,
82305 + .name = "snapshot_read",
82306 + .file = "kernel/power/user.c",
82307 + .param3 = 1,
82308 +};
82309 +struct size_overflow_hash _000347_hash = {
82310 + .next = NULL,
82311 + .name = "supply_map_read_file",
82312 + .file = "drivers/regulator/core.c",
82313 + .param3 = 1,
82314 +};
82315 +struct size_overflow_hash _000348_hash = {
82316 + .next = NULL,
82317 + .name = "sysfs_read_file",
82318 + .file = "fs/sysfs/file.c",
82319 + .param3 = 1,
82320 +};
82321 +struct size_overflow_hash _000349_hash = {
82322 + .next = NULL,
82323 + .name = "sys_preadv",
82324 + .file = "include/linux/syscalls.h",
82325 + .param3 = 1,
82326 +};
82327 +struct size_overflow_hash _000350_hash = {
82328 + .next = NULL,
82329 + .name = "sys_pwritev",
82330 + .file = "include/linux/syscalls.h",
82331 + .param3 = 1,
82332 +};
82333 +struct size_overflow_hash _000351_hash = {
82334 + .next = NULL,
82335 + .name = "sys_readv",
82336 + .file = "include/linux/syscalls.h",
82337 + .param3 = 1,
82338 +};
82339 +struct size_overflow_hash _000352_hash = {
82340 + .next = NULL,
82341 + .name = "sys_rt_sigpending",
82342 + .file = "include/linux/syscalls.h",
82343 + .param2 = 1,
82344 +};
82345 +struct size_overflow_hash _000353_hash = {
82346 + .next = NULL,
82347 + .name = "sys_writev",
82348 + .file = "include/linux/syscalls.h",
82349 + .param3 = 1,
82350 +};
82351 +struct size_overflow_hash _000354_hash = {
82352 + .next = NULL,
82353 + .name = "ima_show_htable_violations",
82354 + .file = "security/integrity/ima/ima_fs.c",
82355 + .param3 = 1,
82356 +};
82357 +struct size_overflow_hash _000355_hash = {
82358 + .next = NULL,
82359 + .name = "ima_show_measurements_count",
82360 + .file = "security/integrity/ima/ima_fs.c",
82361 + .param3 = 1,
82362 +};
82363 +struct size_overflow_hash _000356_hash = {
82364 + .next = NULL,
82365 + .name = "alloc_cpu_rmap",
82366 + .file = "include/linux/cpu_rmap.h",
82367 + .param1 = 1,
82368 +};
82369 +struct size_overflow_hash _000357_hash = {
82370 + .next = NULL,
82371 + .name = "alloc_page_cgroup",
82372 + .file = "mm/page_cgroup.c",
82373 + .param1 = 1,
82374 +};
82375 +struct size_overflow_hash _000358_hash = {
82376 + .next = NULL,
82377 + .name = "alloc_sched_domains",
82378 + .file = "include/linux/sched.h",
82379 + .param1 = 1,
82380 +};
82381 +struct size_overflow_hash _000359_hash = {
82382 + .next = NULL,
82383 + .name = "compat_rw_copy_check_uvector",
82384 + .file = "include/linux/compat.h",
82385 + .param3 = 1,
82386 +};
82387 +struct size_overflow_hash _000360_hash = {
82388 + .next = NULL,
82389 + .name = "compat_sys_kexec_load",
82390 + .file = "include/linux/kexec.h",
82391 + .param2 = 1,
82392 +};
82393 +struct size_overflow_hash _000361_hash = {
82394 + .next = NULL,
82395 + .name = "compat_sys_semtimedop",
82396 + .file = "include/linux/compat.h",
82397 + .param3 = 1,
82398 +};
82399 +struct size_overflow_hash _000362_hash = {
82400 + .next = NULL,
82401 + .name = "copy_from_user",
82402 + .file = "arch/x86/include/asm/uaccess_64.h",
82403 + .param3 = 1,
82404 +};
82405 +struct size_overflow_hash _000363_hash = {
82406 + .next = NULL,
82407 + .name = "__copy_from_user",
82408 + .file = "arch/x86/include/asm/uaccess_64.h",
82409 + .param3 = 1,
82410 +};
82411 +struct size_overflow_hash _000364_hash = {
82412 + .next = NULL,
82413 + .name = "__copy_from_user_inatomic",
82414 + .file = "arch/x86/include/asm/uaccess_64.h",
82415 + .param3 = 1,
82416 +};
82417 +struct size_overflow_hash _000365_hash = {
82418 + .next = NULL,
82419 + .name = "__copy_from_user_nocache",
82420 + .file = "arch/x86/include/asm/uaccess_64.h",
82421 + .param3 = 1,
82422 +};
82423 +struct size_overflow_hash _000366_hash = {
82424 + .next = NULL,
82425 + .name = "__copy_in_user",
82426 + .file = "arch/x86/include/asm/uaccess_64.h",
82427 + .param3 = 1,
82428 +};
82429 +struct size_overflow_hash _000367_hash = {
82430 + .next = NULL,
82431 + .name = "copy_in_user",
82432 + .file = "arch/x86/include/asm/uaccess_64.h",
82433 + .param3 = 1,
82434 +};
82435 +struct size_overflow_hash _000368_hash = {
82436 + .next = NULL,
82437 + .name = "__copy_to_user",
82438 + .file = "arch/x86/include/asm/uaccess_64.h",
82439 + .param3 = 1,
82440 +};
82441 +struct size_overflow_hash _000369_hash = {
82442 + .next = NULL,
82443 + .name = "copy_to_user",
82444 + .file = "arch/x86/include/asm/uaccess_64.h",
82445 + .param3 = 1,
82446 +};
82447 +struct size_overflow_hash _000370_hash = {
82448 + .next = NULL,
82449 + .name = "__copy_to_user_inatomic",
82450 + .file = "arch/x86/include/asm/uaccess_64.h",
82451 + .param3 = 1,
82452 +};
82453 +struct size_overflow_hash _000371_hash = {
82454 + .next = NULL,
82455 + .name = "kmalloc_node",
82456 + .file = "include/linux/slub_def.h",
82457 + .param1 = 1,
82458 +};
82459 +struct size_overflow_hash _000372_hash = {
82460 + .next = NULL,
82461 + .name = "pcpu_alloc_bootmem",
82462 + .file = "arch/x86/kernel/setup_percpu.c",
82463 + .param2 = 1,
82464 +};
82465 +struct size_overflow_hash _000373_hash = {
82466 + .next = NULL,
82467 + .name = "sys32_rt_sigpending",
82468 + .file = "arch/x86/include/asm/sys_ia32.h",
82469 + .param2 = 1,
82470 +};
82471 +struct size_overflow_hash _000374_hash = {
82472 + .next = NULL,
82473 + .name = "tunables_read",
82474 + .file = "arch/x86/platform/uv/tlb_uv.c",
82475 + .param3 = 1,
82476 +};
82477 +struct size_overflow_hash _000375_hash = {
82478 + .next = NULL,
82479 + .name = "compat_do_readv_writev",
82480 + .file = "fs/compat.c",
82481 + .param4 = 1,
82482 +};
82483 +struct size_overflow_hash _000376_hash = {
82484 + .next = NULL,
82485 + .name = "compat_keyctl_instantiate_key_iov",
82486 + .file = "security/keys/compat.c",
82487 + .param3 = 1,
82488 +};
82489 +struct size_overflow_hash _000377_hash = {
82490 + .next = NULL,
82491 + .name = "compat_process_vm_rw",
82492 + .file = "mm/process_vm_access.c",
82493 + .param3 = 1,
82494 + .param5 = 1,
82495 +};
82496 +struct size_overflow_hash _000379_hash = {
82497 + .next = NULL,
82498 + .name = "do_pages_stat",
82499 + .file = "mm/migrate.c",
82500 + .param2 = 1,
82501 +};
82502 +struct size_overflow_hash _000380_hash = {
82503 + .next = NULL,
82504 + .name = "kzalloc_node",
82505 + .file = "include/linux/slab.h",
82506 + .param1 = 1,
82507 +};
82508 +struct size_overflow_hash _000381_hash = {
82509 + .next = NULL,
82510 + .name = "pcpu_fc_alloc",
82511 + .file = "arch/x86/kernel/setup_percpu.c",
82512 + .param2 = 1,
82513 +};
82514 +struct size_overflow_hash _000382_hash = {
82515 + .next = NULL,
82516 + .name = "ptc_proc_write",
82517 + .file = "arch/x86/platform/uv/tlb_uv.c",
82518 + .param3 = 1,
82519 +};
82520 +struct size_overflow_hash _000383_hash = {
82521 + .next = NULL,
82522 + .name = "tunables_write",
82523 + .file = "arch/x86/platform/uv/tlb_uv.c",
82524 + .param3 = 1,
82525 +};
82526 +struct size_overflow_hash _000384_hash = {
82527 + .next = NULL,
82528 + .name = "__alloc_bootmem_low_node",
82529 + .file = "include/linux/bootmem.h",
82530 + .param2 = 1,
82531 +};
82532 +struct size_overflow_hash _000385_hash = {
82533 + .next = NULL,
82534 + .name = "__alloc_bootmem_node",
82535 + .file = "include/linux/bootmem.h",
82536 + .param2 = 1,
82537 +};
82538 +struct size_overflow_hash _000386_hash = {
82539 + .next = NULL,
82540 + .name = "compat_readv",
82541 + .file = "fs/compat.c",
82542 + .param3 = 1,
82543 +};
82544 +struct size_overflow_hash _000387_hash = {
82545 + .next = NULL,
82546 + .name = "compat_sys_keyctl",
82547 + .file = "include/linux/compat.h",
82548 + .param4 = 1,
82549 +};
82550 +struct size_overflow_hash _000388_hash = {
82551 + .next = NULL,
82552 + .name = "compat_sys_process_vm_readv",
82553 + .file = "include/linux/compat.h",
82554 + .param3 = 1,
82555 + .param5 = 1,
82556 +};
82557 +struct size_overflow_hash _000390_hash = {
82558 + .next = NULL,
82559 + .name = "compat_sys_process_vm_writev",
82560 + .file = "include/linux/compat.h",
82561 + .param3 = 1,
82562 + .param5 = 1,
82563 +};
82564 +struct size_overflow_hash _000392_hash = {
82565 + .next = NULL,
82566 + .name = "compat_writev",
82567 + .file = "fs/compat.c",
82568 + .param3 = 1,
82569 +};
82570 +struct size_overflow_hash _000393_hash = {
82571 + .next = NULL,
82572 + .name = "sys_move_pages",
82573 + .file = "include/linux/syscalls.h",
82574 + .param2 = 1,
82575 +};
82576 +struct size_overflow_hash _000394_hash = {
82577 + .next = NULL,
82578 + .name = "__alloc_bootmem_node_high",
82579 + .file = "include/linux/bootmem.h",
82580 + .param2 = 1,
82581 +};
82582 +struct size_overflow_hash _000395_hash = {
82583 + .next = NULL,
82584 + .name = "compat_sys_move_pages",
82585 + .file = "include/linux/compat.h",
82586 + .param2 = 1,
82587 +};
82588 +struct size_overflow_hash _000396_hash = {
82589 + .next = NULL,
82590 + .name = "compat_sys_preadv",
82591 + .file = "include/linux/compat.h",
82592 + .param3 = 1,
82593 +};
82594 +struct size_overflow_hash _000397_hash = {
82595 + .next = NULL,
82596 + .name = "compat_sys_pwritev",
82597 + .file = "include/linux/compat.h",
82598 + .param3 = 1,
82599 +};
82600 +struct size_overflow_hash _000398_hash = {
82601 + .next = NULL,
82602 + .name = "compat_sys_readv",
82603 + .file = "include/linux/compat.h",
82604 + .param3 = 1,
82605 +};
82606 +struct size_overflow_hash _000399_hash = {
82607 + .next = NULL,
82608 + .name = "compat_sys_writev",
82609 + .file = "include/linux/compat.h",
82610 + .param3 = 1,
82611 +};
82612 +struct size_overflow_hash _000400_hash = {
82613 + .next = NULL,
82614 + .name = "sparse_early_usemaps_alloc_node",
82615 + .file = "mm/sparse.c",
82616 + .param4 = 1,
82617 +};
82618 +struct size_overflow_hash _000401_hash = {
82619 + .next = NULL,
82620 + .name = "__earlyonly_bootmem_alloc",
82621 + .file = "mm/sparse-vmemmap.c",
82622 + .param2 = 1,
82623 +};
82624 +struct size_overflow_hash _000402_hash = {
82625 + .next = NULL,
82626 + .name = "sparse_mem_maps_populate_node",
82627 + .file = "include/linux/mm.h",
82628 + .param4 = 1,
82629 +};
82630 +struct size_overflow_hash _000403_hash = {
82631 + .next = NULL,
82632 + .name = "vmemmap_alloc_block",
82633 + .file = "include/linux/mm.h",
82634 + .param1 = 1,
82635 +};
82636 +struct size_overflow_hash _000404_hash = {
82637 + .next = NULL,
82638 + .name = "sparse_early_mem_maps_alloc_node",
82639 + .file = "mm/sparse.c",
82640 + .param4 = 1,
82641 +};
82642 +struct size_overflow_hash _000405_hash = {
82643 + .next = NULL,
82644 + .name = "vmemmap_alloc_block_buf",
82645 + .file = "include/linux/mm.h",
82646 + .param1 = 1,
82647 +};
82648 +struct size_overflow_hash _000406_hash = {
82649 + .next = NULL,
82650 + .name = "acpi_battery_write_alarm",
82651 + .file = "drivers/acpi/battery.c",
82652 + .param3 = 1,
82653 +};
82654 +struct size_overflow_hash _000407_hash = {
82655 + .next = NULL,
82656 + .name = "acpi_battery_write_alarm",
82657 + .file = "drivers/acpi/sbs.c",
82658 + .param3 = 1,
82659 +};
82660 +struct size_overflow_hash _000408_hash = {
82661 + .next = NULL,
82662 + .name = "ad7879_spi_xfer",
82663 + .file = "drivers/input/touchscreen/ad7879-spi.c",
82664 + .param3 = 1,
82665 +};
82666 +struct size_overflow_hash _000409_hash = {
82667 + .next = NULL,
82668 + .name = "add_port",
82669 + .file = "drivers/char/virtio_console.c",
82670 + .param2 = 1,
82671 +};
82672 +struct size_overflow_hash _000410_hash = {
82673 + .next = NULL,
82674 + .name = "addtgt",
82675 + .file = "drivers/block/aoe/aoecmd.c",
82676 + .param3 = 1,
82677 +};
82678 +struct size_overflow_hash _000411_hash = {
82679 + .next = NULL,
82680 + .name = "adu_read",
82681 + .file = "drivers/usb/misc/adutux.c",
82682 + .param3 = 1,
82683 +};
82684 +struct size_overflow_hash _000412_hash = {
82685 + .next = NULL,
82686 + .name = "adu_write",
82687 + .file = "drivers/usb/misc/adutux.c",
82688 + .param3 = 1,
82689 +};
82690 +struct size_overflow_hash _000413_hash = {
82691 + .next = NULL,
82692 + .name = "aer_inject_write",
82693 + .file = "drivers/pci/pcie/aer/aer_inject.c",
82694 + .param3 = 1,
82695 +};
82696 +struct size_overflow_hash _000414_hash = {
82697 + .next = NULL,
82698 + .name = "aes_decrypt_fail_read",
82699 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82700 + .param3 = 1,
82701 +};
82702 +struct size_overflow_hash _000415_hash = {
82703 + .next = NULL,
82704 + .name = "aes_decrypt_interrupt_read",
82705 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82706 + .param3 = 1,
82707 +};
82708 +struct size_overflow_hash _000416_hash = {
82709 + .next = NULL,
82710 + .name = "aes_decrypt_packets_read",
82711 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82712 + .param3 = 1,
82713 +};
82714 +struct size_overflow_hash _000417_hash = {
82715 + .next = NULL,
82716 + .name = "aes_encrypt_fail_read",
82717 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82718 + .param3 = 1,
82719 +};
82720 +struct size_overflow_hash _000418_hash = {
82721 + .next = NULL,
82722 + .name = "aes_encrypt_interrupt_read",
82723 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82724 + .param3 = 1,
82725 +};
82726 +struct size_overflow_hash _000419_hash = {
82727 + .next = NULL,
82728 + .name = "aes_encrypt_packets_read",
82729 + .file = "drivers/net/wireless/wl1251/debugfs.c",
82730 + .param3 = 1,
82731 +};
82732 +struct size_overflow_hash _000420_hash = {
82733 + .next = NULL,
82734 + .name = "afs_alloc_flat_call",
82735 + .file = "fs/afs/rxrpc.c",
82736 + .param2 = 1,
82737 + .param3 = 1,
82738 +};
82739 +struct size_overflow_hash _000422_hash = {
82740 + .next = NULL,
82741 + .name = "afs_cell_alloc",
82742 + .file = "fs/afs/cell.c",
82743 + .param2 = 1,
82744 +};
82745 +struct size_overflow_hash _000423_hash = {
82746 + .next = NULL,
82747 + .name = "afs_proc_cells_write",
82748 + .file = "fs/afs/proc.c",
82749 + .param3 = 1,
82750 +};
82751 +struct size_overflow_hash _000424_hash = {
82752 + .next = NULL,
82753 + .name = "afs_proc_rootcell_write",
82754 + .file = "fs/afs/proc.c",
82755 + .param3 = 1,
82756 +};
82757 +struct size_overflow_hash _000425_hash = {
82758 + .next = NULL,
82759 + .name = "aggr_recv_addba_req_evt",
82760 + .file = "drivers/net/wireless/ath/ath6kl/txrx.c",
82761 + .param4 = 1,
82762 +};
82763 +struct size_overflow_hash _000426_hash = {
82764 + .next = NULL,
82765 + .name = "agp_3_5_isochronous_node_enable",
82766 + .file = "drivers/char/agp/isoch.c",
82767 + .param3 = 1,
82768 +};
82769 +struct size_overflow_hash _000427_hash = {
82770 + .next = NULL,
82771 + .name = "agp_alloc_page_array",
82772 + .file = "drivers/char/agp/generic.c",
82773 + .param1 = 1,
82774 +};
82775 +struct size_overflow_hash _000428_hash = {
82776 + .next = NULL,
82777 + .name = "alg_setkey",
82778 + .file = "crypto/af_alg.c",
82779 + .param3 = 1,
82780 +};
82781 +struct size_overflow_hash _000429_hash = {
82782 + .next = NULL,
82783 + .name = "alloc_buf",
82784 + .file = "drivers/char/virtio_console.c",
82785 + .param1 = 1,
82786 +};
82787 +struct size_overflow_hash _000430_hash = {
82788 + .next = NULL,
82789 + .name = "alloc_context",
82790 + .file = "drivers/md/dm-raid1.c",
82791 + .param1 = 1,
82792 +};
82793 +struct size_overflow_hash _000431_hash = {
82794 + .next = NULL,
82795 + .name = "alloc_context",
82796 + .file = "drivers/md/dm-stripe.c",
82797 + .param1 = 1,
82798 +};
82799 +struct size_overflow_hash _000432_hash = {
82800 + .next = NULL,
82801 + .name = "__alloc_dev_table",
82802 + .file = "fs/exofs/super.c",
82803 + .param2 = 1,
82804 +};
82805 +struct size_overflow_hash _000433_hash = {
82806 + .next = NULL,
82807 + .name = "alloc_ep_req",
82808 + .file = "drivers/usb/gadget/f_midi.c",
82809 + .param2 = 1,
82810 +};
82811 +struct size_overflow_hash _000434_hash = {
82812 + .next = NULL,
82813 + .name = "alloc_flex_gd",
82814 + .file = "fs/ext4/resize.c",
82815 + .param1 = 1,
82816 +};
82817 +struct size_overflow_hash _000435_hash = {
82818 + .next = NULL,
82819 + .name = "__alloc_objio_seg",
82820 + .file = "fs/nfs/objlayout/objio_osd.c",
82821 + .param1 = 1,
82822 +};
82823 +struct size_overflow_hash _000436_hash = {
82824 + .next = NULL,
82825 + .name = "alloc_one_pg_vec_page",
82826 + .file = "net/packet/af_packet.c",
82827 + .param1 = 1,
82828 +};
82829 +struct size_overflow_hash _000437_hash = {
82830 + .next = NULL,
82831 + .name = "alloc_ring",
82832 + .file = "drivers/net/ethernet/chelsio/cxgb3/sge.c",
82833 + .param2 = 1,
82834 + .param4 = 1,
82835 +};
82836 +struct size_overflow_hash _000438_hash = {
82837 + .next = NULL,
82838 + .name = "alloc_ring",
82839 + .file = "drivers/net/ethernet/chelsio/cxgb4vf/sge.c",
82840 + .param2 = 1,
82841 + .param4 = 1,
82842 +};
82843 +struct size_overflow_hash _000441_hash = {
82844 + .next = NULL,
82845 + .name = "alloc_ts_config",
82846 + .file = "include/linux/textsearch.h",
82847 + .param1 = 1,
82848 +};
82849 +struct size_overflow_hash _000442_hash = {
82850 + .next = NULL,
82851 + .name = "altera_drscan",
82852 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82853 + .param2 = 1,
82854 +};
82855 +struct size_overflow_hash _000443_hash = {
82856 + .next = NULL,
82857 + .name = "altera_irscan",
82858 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82859 + .param2 = 1,
82860 +};
82861 +struct size_overflow_hash _000444_hash = {
82862 + .next = &_000066_hash,
82863 + .name = "altera_set_dr_post",
82864 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82865 + .param2 = 1,
82866 +};
82867 +struct size_overflow_hash _000445_hash = {
82868 + .next = NULL,
82869 + .name = "altera_set_dr_pre",
82870 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82871 + .param2 = 1,
82872 +};
82873 +struct size_overflow_hash _000446_hash = {
82874 + .next = NULL,
82875 + .name = "altera_set_ir_post",
82876 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82877 + .param2 = 1,
82878 +};
82879 +struct size_overflow_hash _000447_hash = {
82880 + .next = NULL,
82881 + .name = "altera_set_ir_pre",
82882 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82883 + .param2 = 1,
82884 +};
82885 +struct size_overflow_hash _000448_hash = {
82886 + .next = NULL,
82887 + .name = "altera_swap_dr",
82888 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82889 + .param2 = 1,
82890 +};
82891 +struct size_overflow_hash _000449_hash = {
82892 + .next = NULL,
82893 + .name = "altera_swap_ir",
82894 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
82895 + .param2 = 1,
82896 +};
82897 +struct size_overflow_hash _000450_hash = {
82898 + .next = NULL,
82899 + .name = "aoedev_flush",
82900 + .file = "drivers/block/aoe/aoedev.c",
82901 + .param2 = 1,
82902 +};
82903 +struct size_overflow_hash _000451_hash = {
82904 + .next = NULL,
82905 + .name = "asd_store_update_bios",
82906 + .file = "drivers/scsi/aic94xx/aic94xx_init.c",
82907 + .param4 = 1,
82908 +};
82909 +struct size_overflow_hash _000452_hash = {
82910 + .next = NULL,
82911 + .name = "asix_read_cmd",
82912 + .file = "drivers/net/usb/asix.c",
82913 + .param5 = 1,
82914 +};
82915 +struct size_overflow_hash _000453_hash = {
82916 + .next = NULL,
82917 + .name = "asix_write_cmd",
82918 + .file = "drivers/net/usb/asix.c",
82919 + .param5 = 1,
82920 +};
82921 +struct size_overflow_hash _000454_hash = {
82922 + .next = NULL,
82923 + .name = "asn1_octets_decode",
82924 + .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
82925 + .param2 = 1,
82926 +};
82927 +struct size_overflow_hash _000455_hash = {
82928 + .next = NULL,
82929 + .name = "asn1_oid_decode",
82930 + .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
82931 + .param2 = 1,
82932 +};
82933 +struct size_overflow_hash _000456_hash = {
82934 + .next = NULL,
82935 + .name = "asn1_oid_decode",
82936 + .file = "fs/cifs/asn1.c",
82937 + .param2 = 1,
82938 +};
82939 +struct size_overflow_hash _000457_hash = {
82940 + .next = NULL,
82941 + .name = "ath6kl_add_bss_if_needed",
82942 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
82943 + .param6 = 1,
82944 +};
82945 +struct size_overflow_hash _000458_hash = {
82946 + .next = NULL,
82947 + .name = "ath6kl_debug_roam_tbl_event",
82948 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82949 + .param3 = 1,
82950 +};
82951 +struct size_overflow_hash _000459_hash = {
82952 + .next = NULL,
82953 + .name = "ath6kl_disconnect_timeout_read",
82954 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82955 + .param3 = 1,
82956 +};
82957 +struct size_overflow_hash _000460_hash = {
82958 + .next = NULL,
82959 + .name = "ath6kl_endpoint_stats_read",
82960 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82961 + .param3 = 1,
82962 +};
82963 +struct size_overflow_hash _000461_hash = {
82964 + .next = NULL,
82965 + .name = "ath6kl_fwlog_mask_read",
82966 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82967 + .param3 = 1,
82968 +};
82969 +struct size_overflow_hash _000462_hash = {
82970 + .next = NULL,
82971 + .name = "ath6kl_fwlog_read",
82972 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82973 + .param3 = 1,
82974 +};
82975 +struct size_overflow_hash _000463_hash = {
82976 + .next = NULL,
82977 + .name = "ath6kl_keepalive_read",
82978 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82979 + .param3 = 1,
82980 +};
82981 +struct size_overflow_hash _000464_hash = {
82982 + .next = NULL,
82983 + .name = "ath6kl_lrssi_roam_read",
82984 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82985 + .param3 = 1,
82986 +};
82987 +struct size_overflow_hash _000465_hash = {
82988 + .next = NULL,
82989 + .name = "ath6kl_regdump_read",
82990 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82991 + .param3 = 1,
82992 +};
82993 +struct size_overflow_hash _000466_hash = {
82994 + .next = NULL,
82995 + .name = "ath6kl_regread_read",
82996 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
82997 + .param3 = 1,
82998 +};
82999 +struct size_overflow_hash _000467_hash = {
83000 + .next = NULL,
83001 + .name = "ath6kl_regwrite_read",
83002 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
83003 + .param3 = 1,
83004 +};
83005 +struct size_overflow_hash _000468_hash = {
83006 + .next = NULL,
83007 + .name = "ath6kl_roam_table_read",
83008 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
83009 + .param3 = 1,
83010 +};
83011 +struct size_overflow_hash _000469_hash = {
83012 + .next = NULL,
83013 + .name = "ath6kl_send_go_probe_resp",
83014 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83015 + .param3 = 1,
83016 +};
83017 +struct size_overflow_hash _000470_hash = {
83018 + .next = NULL,
83019 + .name = "ath6kl_set_ap_probe_resp_ies",
83020 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83021 + .param3 = 1,
83022 +};
83023 +struct size_overflow_hash _000471_hash = {
83024 + .next = NULL,
83025 + .name = "ath6kl_set_assoc_req_ies",
83026 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83027 + .param3 = 1,
83028 +};
83029 +struct size_overflow_hash _000472_hash = {
83030 + .next = NULL,
83031 + .name = "ath6kl_tm_rx_report_event",
83032 + .file = "drivers/net/wireless/ath/ath6kl/testmode.c",
83033 + .param3 = 1,
83034 +};
83035 +struct size_overflow_hash _000473_hash = {
83036 + .next = NULL,
83037 + .name = "ath6kl_wmi_send_action_cmd",
83038 + .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
83039 + .param7 = 1,
83040 +};
83041 +struct size_overflow_hash _000474_hash = {
83042 + .next = NULL,
83043 + .name = "ath6kl_wmi_send_mgmt_cmd",
83044 + .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
83045 + .param7 = 1,
83046 +};
83047 +struct size_overflow_hash _000475_hash = {
83048 + .next = NULL,
83049 + .name = "ath9k_debugfs_read_buf",
83050 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
83051 + .param3 = 1,
83052 +};
83053 +struct size_overflow_hash _000476_hash = {
83054 + .next = NULL,
83055 + .name = "atk_debugfs_ggrp_read",
83056 + .file = "drivers/hwmon/asus_atk0110.c",
83057 + .param3 = 1,
83058 +};
83059 +struct size_overflow_hash _000477_hash = {
83060 + .next = NULL,
83061 + .name = "atm_get_addr",
83062 + .file = "net/atm/addr.c",
83063 + .param3 = 1,
83064 +};
83065 +struct size_overflow_hash _000478_hash = {
83066 + .next = NULL,
83067 + .name = "attach_hdlc_protocol",
83068 + .file = "include/linux/hdlc.h",
83069 + .param3 = 1,
83070 +};
83071 +struct size_overflow_hash _000479_hash = {
83072 + .next = NULL,
83073 + .name = "av7110_vbi_write",
83074 + .file = "drivers/media/dvb/ttpci/av7110_v4l.c",
83075 + .param3 = 1,
83076 +};
83077 +struct size_overflow_hash _000480_hash = {
83078 + .next = NULL,
83079 + .name = "ax25_setsockopt",
83080 + .file = "net/ax25/af_ax25.c",
83081 + .param5 = 1,
83082 +};
83083 +struct size_overflow_hash _000481_hash = {
83084 + .next = NULL,
83085 + .name = "b43_debugfs_read",
83086 + .file = "drivers/net/wireless/b43/debugfs.c",
83087 + .param3 = 1,
83088 +};
83089 +struct size_overflow_hash _000482_hash = {
83090 + .next = NULL,
83091 + .name = "b43_debugfs_write",
83092 + .file = "drivers/net/wireless/b43/debugfs.c",
83093 + .param3 = 1,
83094 +};
83095 +struct size_overflow_hash _000483_hash = {
83096 + .next = NULL,
83097 + .name = "b43legacy_debugfs_read",
83098 + .file = "drivers/net/wireless/b43legacy/debugfs.c",
83099 + .param3 = 1,
83100 +};
83101 +struct size_overflow_hash _000484_hash = {
83102 + .next = NULL,
83103 + .name = "b43legacy_debugfs_write",
83104 + .file = "drivers/net/wireless/b43legacy/debugfs.c",
83105 + .param3 = 1,
83106 +};
83107 +struct size_overflow_hash _000485_hash = {
83108 + .next = NULL,
83109 + .name = "b43_nphy_load_samples",
83110 + .file = "drivers/net/wireless/b43/phy_n.c",
83111 + .param3 = 1,
83112 +};
83113 +struct size_overflow_hash _000486_hash = {
83114 + .next = NULL,
83115 + .name = "bch_alloc",
83116 + .file = "lib/bch.c",
83117 + .param1 = 1,
83118 +};
83119 +struct size_overflow_hash _000487_hash = {
83120 + .next = NULL,
83121 + .name = "bfad_debugfs_read",
83122 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
83123 + .param3 = 1,
83124 +};
83125 +struct size_overflow_hash _000488_hash = {
83126 + .next = NULL,
83127 + .name = "bfad_debugfs_read_regrd",
83128 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
83129 + .param3 = 1,
83130 +};
83131 +struct size_overflow_hash _000489_hash = {
83132 + .next = NULL,
83133 + .name = "bfad_debugfs_write_regrd",
83134 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
83135 + .param3 = 1,
83136 +};
83137 +struct size_overflow_hash _000490_hash = {
83138 + .next = NULL,
83139 + .name = "bfad_debugfs_write_regwr",
83140 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
83141 + .param3 = 1,
83142 +};
83143 +struct size_overflow_hash _000491_hash = {
83144 + .next = NULL,
83145 + .name = "bits_to_user",
83146 + .file = "drivers/input/evdev.c",
83147 + .param3 = 1,
83148 +};
83149 +struct size_overflow_hash _000492_hash = {
83150 + .next = NULL,
83151 + .name = "bl_pipe_downcall",
83152 + .file = "fs/nfs/blocklayout/blocklayoutdev.c",
83153 + .param3 = 1,
83154 +};
83155 +struct size_overflow_hash _000493_hash = {
83156 + .next = NULL,
83157 + .name = "bm_entry_read",
83158 + .file = "fs/binfmt_misc.c",
83159 + .param3 = 1,
83160 +};
83161 +struct size_overflow_hash _000494_hash = {
83162 + .next = NULL,
83163 + .name = "bm_realloc_pages",
83164 + .file = "drivers/block/drbd/drbd_bitmap.c",
83165 + .param2 = 1,
83166 +};
83167 +struct size_overflow_hash _000495_hash = {
83168 + .next = NULL,
83169 + .name = "bm_status_read",
83170 + .file = "fs/binfmt_misc.c",
83171 + .param3 = 1,
83172 +};
83173 +struct size_overflow_hash _000496_hash = {
83174 + .next = NULL,
83175 + .name = "bnad_debugfs_read",
83176 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
83177 + .param3 = 1,
83178 +};
83179 +struct size_overflow_hash _000497_hash = {
83180 + .next = NULL,
83181 + .name = "bnad_debugfs_read_regrd",
83182 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
83183 + .param3 = 1,
83184 +};
83185 +struct size_overflow_hash _000498_hash = {
83186 + .next = NULL,
83187 + .name = "bnad_debugfs_write_regrd",
83188 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
83189 + .param3 = 1,
83190 +};
83191 +struct size_overflow_hash _000499_hash = {
83192 + .next = NULL,
83193 + .name = "bnad_debugfs_write_regwr",
83194 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
83195 + .param3 = 1,
83196 +};
83197 +struct size_overflow_hash _000500_hash = {
83198 + .next = NULL,
83199 + .name = "bnx2fc_cmd_mgr_alloc",
83200 + .file = "drivers/scsi/bnx2fc/bnx2fc_io.c",
83201 + .param2 = 1,
83202 + .param3 = 1,
83203 +};
83204 +struct size_overflow_hash _000502_hash = {
83205 + .next = NULL,
83206 + .name = "btmrvl_curpsmode_read",
83207 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83208 + .param3 = 1,
83209 +};
83210 +struct size_overflow_hash _000503_hash = {
83211 + .next = NULL,
83212 + .name = "btmrvl_gpiogap_read",
83213 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83214 + .param3 = 1,
83215 +};
83216 +struct size_overflow_hash _000504_hash = {
83217 + .next = NULL,
83218 + .name = "btmrvl_gpiogap_write",
83219 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83220 + .param3 = 1,
83221 +};
83222 +struct size_overflow_hash _000505_hash = {
83223 + .next = NULL,
83224 + .name = "btmrvl_hscfgcmd_read",
83225 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83226 + .param3 = 1,
83227 +};
83228 +struct size_overflow_hash _000506_hash = {
83229 + .next = NULL,
83230 + .name = "btmrvl_hscfgcmd_write",
83231 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83232 + .param3 = 1,
83233 +};
83234 +struct size_overflow_hash _000507_hash = {
83235 + .next = &_000006_hash,
83236 + .name = "btmrvl_hscmd_read",
83237 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83238 + .param3 = 1,
83239 +};
83240 +struct size_overflow_hash _000508_hash = {
83241 + .next = NULL,
83242 + .name = "btmrvl_hscmd_write",
83243 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83244 + .param3 = 1,
83245 +};
83246 +struct size_overflow_hash _000509_hash = {
83247 + .next = NULL,
83248 + .name = "btmrvl_hsmode_read",
83249 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83250 + .param3 = 1,
83251 +};
83252 +struct size_overflow_hash _000510_hash = {
83253 + .next = NULL,
83254 + .name = "btmrvl_hsmode_write",
83255 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83256 + .param3 = 1,
83257 +};
83258 +struct size_overflow_hash _000511_hash = {
83259 + .next = NULL,
83260 + .name = "btmrvl_hsstate_read",
83261 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83262 + .param3 = 1,
83263 +};
83264 +struct size_overflow_hash _000512_hash = {
83265 + .next = NULL,
83266 + .name = "btmrvl_pscmd_read",
83267 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83268 + .param3 = 1,
83269 +};
83270 +struct size_overflow_hash _000513_hash = {
83271 + .next = NULL,
83272 + .name = "btmrvl_pscmd_write",
83273 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83274 + .param3 = 1,
83275 +};
83276 +struct size_overflow_hash _000514_hash = {
83277 + .next = NULL,
83278 + .name = "btmrvl_psmode_read",
83279 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83280 + .param3 = 1,
83281 +};
83282 +struct size_overflow_hash _000515_hash = {
83283 + .next = NULL,
83284 + .name = "btmrvl_psmode_write",
83285 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83286 + .param3 = 1,
83287 +};
83288 +struct size_overflow_hash _000516_hash = {
83289 + .next = NULL,
83290 + .name = "btmrvl_psstate_read",
83291 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83292 + .param3 = 1,
83293 +};
83294 +struct size_overflow_hash _000517_hash = {
83295 + .next = NULL,
83296 + .name = "btmrvl_txdnldready_read",
83297 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
83298 + .param3 = 1,
83299 +};
83300 +struct size_overflow_hash _000518_hash = {
83301 + .next = NULL,
83302 + .name = "btrfs_alloc_delayed_item",
83303 + .file = "fs/btrfs/delayed-inode.c",
83304 + .param1 = 1,
83305 +};
83306 +struct size_overflow_hash _000519_hash = {
83307 + .next = NULL,
83308 + .name = "btrfs_copy_from_user",
83309 + .file = "fs/btrfs/file.c",
83310 + .param3 = 1,
83311 +};
83312 +struct size_overflow_hash _000520_hash = {
83313 + .next = NULL,
83314 + .name = "__btrfs_map_block",
83315 + .file = "fs/btrfs/volumes.c",
83316 + .param3 = 1,
83317 +};
83318 +struct size_overflow_hash _000521_hash = {
83319 + .next = NULL,
83320 + .name = "__c4iw_init_resource_fifo",
83321 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
83322 + .param3 = 1,
83323 +};
83324 +struct size_overflow_hash _000522_hash = {
83325 + .next = NULL,
83326 + .name = "cache_do_downcall",
83327 + .file = "net/sunrpc/cache.c",
83328 + .param3 = 1,
83329 +};
83330 +struct size_overflow_hash _000523_hash = {
83331 + .next = NULL,
83332 + .name = "cachefiles_daemon_write",
83333 + .file = "fs/cachefiles/daemon.c",
83334 + .param3 = 1,
83335 +};
83336 +struct size_overflow_hash _000524_hash = {
83337 + .next = NULL,
83338 + .name = "cache_read",
83339 + .file = "net/sunrpc/cache.c",
83340 + .param3 = 1,
83341 +};
83342 +struct size_overflow_hash _000525_hash = {
83343 + .next = NULL,
83344 + .name = "ca_extend",
83345 + .file = "drivers/md/persistent-data/dm-space-map-checker.c",
83346 + .param2 = 1,
83347 +};
83348 +struct size_overflow_hash _000526_hash = {
83349 + .next = NULL,
83350 + .name = "calc_hmac",
83351 + .file = "security/keys/encrypted-keys/encrypted.c",
83352 + .param3 = 1,
83353 +};
83354 +struct size_overflow_hash _000527_hash = {
83355 + .next = NULL,
83356 + .name = "capi_write",
83357 + .file = "drivers/isdn/capi/capi.c",
83358 + .param3 = 1,
83359 +};
83360 +struct size_overflow_hash _000528_hash = {
83361 + .next = NULL,
83362 + .name = "carl9170_cmd_buf",
83363 + .file = "drivers/net/wireless/ath/carl9170/cmd.c",
83364 + .param3 = 1,
83365 +};
83366 +struct size_overflow_hash _000529_hash = {
83367 + .next = NULL,
83368 + .name = "carl9170_debugfs_read",
83369 + .file = "drivers/net/wireless/ath/carl9170/debug.c",
83370 + .param3 = 1,
83371 +};
83372 +struct size_overflow_hash _000530_hash = {
83373 + .next = NULL,
83374 + .name = "carl9170_debugfs_write",
83375 + .file = "drivers/net/wireless/ath/carl9170/debug.c",
83376 + .param3 = 1,
83377 +};
83378 +struct size_overflow_hash _000531_hash = {
83379 + .next = NULL,
83380 + .name = "cciss_proc_write",
83381 + .file = "drivers/block/cciss.c",
83382 + .param3 = 1,
83383 +};
83384 +struct size_overflow_hash _000532_hash = {
83385 + .next = NULL,
83386 + .name = "ceph_buffer_new",
83387 + .file = "include/linux/ceph/buffer.h",
83388 + .param1 = 1,
83389 +};
83390 +struct size_overflow_hash _000533_hash = {
83391 + .next = NULL,
83392 + .name = "ceph_copy_page_vector_to_user",
83393 + .file = "include/linux/ceph/libceph.h",
83394 + .param4 = 1,
83395 +};
83396 +struct size_overflow_hash _000534_hash = {
83397 + .next = NULL,
83398 + .name = "ceph_copy_user_to_page_vector",
83399 + .file = "include/linux/ceph/libceph.h",
83400 + .param4 = 1,
83401 +};
83402 +struct size_overflow_hash _000535_hash = {
83403 + .next = NULL,
83404 + .name = "ceph_read_dir",
83405 + .file = "fs/ceph/dir.c",
83406 + .param3 = 1,
83407 +};
83408 +struct size_overflow_hash _000536_hash = {
83409 + .next = NULL,
83410 + .name = "ceph_setxattr",
83411 + .file = "fs/ceph/xattr.c",
83412 + .param4 = 1,
83413 +};
83414 +struct size_overflow_hash _000537_hash = {
83415 + .next = NULL,
83416 + .name = "cfg80211_connect_result",
83417 + .file = "include/net/cfg80211.h",
83418 + .param4 = 1,
83419 + .param6 = 1,
83420 +};
83421 +struct size_overflow_hash _000539_hash = {
83422 + .next = NULL,
83423 + .name = "cfg80211_disconnected",
83424 + .file = "include/net/cfg80211.h",
83425 + .param4 = 1,
83426 +};
83427 +struct size_overflow_hash _000540_hash = {
83428 + .next = NULL,
83429 + .name = "cfg80211_inform_bss",
83430 + .file = "include/net/cfg80211.h",
83431 + .param8 = 1,
83432 +};
83433 +struct size_overflow_hash _000541_hash = {
83434 + .next = NULL,
83435 + .name = "cfg80211_inform_bss_frame",
83436 + .file = "include/net/cfg80211.h",
83437 + .param4 = 1,
83438 +};
83439 +struct size_overflow_hash _000542_hash = {
83440 + .next = NULL,
83441 + .name = "cfg80211_roamed_bss",
83442 + .file = "include/net/cfg80211.h",
83443 + .param4 = 1,
83444 + .param6 = 1,
83445 +};
83446 +struct size_overflow_hash _000544_hash = {
83447 + .next = NULL,
83448 + .name = "cfi_read_pri",
83449 + .file = "include/linux/mtd/cfi.h",
83450 + .param3 = 1,
83451 +};
83452 +struct size_overflow_hash _000545_hash = {
83453 + .next = NULL,
83454 + .name = "channel_type_read",
83455 + .file = "net/mac80211/debugfs.c",
83456 + .param3 = 1,
83457 +};
83458 +struct size_overflow_hash _000546_hash = {
83459 + .next = NULL,
83460 + .name = "cifs_idmap_key_instantiate",
83461 + .file = "fs/cifs/cifsacl.c",
83462 + .param3 = 1,
83463 +};
83464 +struct size_overflow_hash _000547_hash = {
83465 + .next = NULL,
83466 + .name = "cifs_readdata_alloc",
83467 + .file = "fs/cifs/cifssmb.c",
83468 + .param1 = 1,
83469 +};
83470 +struct size_overflow_hash _000548_hash = {
83471 + .next = NULL,
83472 + .name = "cifs_security_flags_proc_write",
83473 + .file = "fs/cifs/cifs_debug.c",
83474 + .param3 = 1,
83475 +};
83476 +struct size_overflow_hash _000549_hash = {
83477 + .next = NULL,
83478 + .name = "cifs_setxattr",
83479 + .file = "fs/cifs/xattr.c",
83480 + .param4 = 1,
83481 +};
83482 +struct size_overflow_hash _000550_hash = {
83483 + .next = NULL,
83484 + .name = "cifs_spnego_key_instantiate",
83485 + .file = "fs/cifs/cifs_spnego.c",
83486 + .param3 = 1,
83487 +};
83488 +struct size_overflow_hash _000551_hash = {
83489 + .next = NULL,
83490 + .name = "cifs_writedata_alloc",
83491 + .file = "fs/cifs/cifssmb.c",
83492 + .param1 = 1,
83493 +};
83494 +struct size_overflow_hash _000552_hash = {
83495 + .next = NULL,
83496 + .name = "ci_ll_write",
83497 + .file = "drivers/media/dvb/ttpci/av7110_ca.c",
83498 + .param4 = 1,
83499 +};
83500 +struct size_overflow_hash _000553_hash = {
83501 + .next = NULL,
83502 + .name = "clusterip_proc_write",
83503 + .file = "net/ipv4/netfilter/ipt_CLUSTERIP.c",
83504 + .param3 = 1,
83505 +};
83506 +struct size_overflow_hash _000554_hash = {
83507 + .next = &_000108_hash,
83508 + .name = "cm4040_write",
83509 + .file = "drivers/char/pcmcia/cm4040_cs.c",
83510 + .param3 = 1,
83511 +};
83512 +struct size_overflow_hash _000555_hash = {
83513 + .next = NULL,
83514 + .name = "cm_copy_private_data",
83515 + .file = "drivers/infiniband/core/cm.c",
83516 + .param2 = 1,
83517 +};
83518 +struct size_overflow_hash _000556_hash = {
83519 + .next = NULL,
83520 + .name = "cmm_write",
83521 + .file = "drivers/char/pcmcia/cm4000_cs.c",
83522 + .param3 = 1,
83523 +};
83524 +struct size_overflow_hash _000557_hash = {
83525 + .next = NULL,
83526 + .name = "cm_write",
83527 + .file = "drivers/acpi/custom_method.c",
83528 + .param3 = 1,
83529 +};
83530 +struct size_overflow_hash _000558_hash = {
83531 + .next = NULL,
83532 + .name = "coda_psdev_read",
83533 + .file = "fs/coda/psdev.c",
83534 + .param3 = 1,
83535 +};
83536 +struct size_overflow_hash _000559_hash = {
83537 + .next = NULL,
83538 + .name = "coda_psdev_write",
83539 + .file = "fs/coda/psdev.c",
83540 + .param3 = 1,
83541 +};
83542 +struct size_overflow_hash _000560_hash = {
83543 + .next = NULL,
83544 + .name = "codec_list_read_file",
83545 + .file = "sound/soc/soc-core.c",
83546 + .param3 = 1,
83547 +};
83548 +struct size_overflow_hash _000561_hash = {
83549 + .next = NULL,
83550 + .name = "codec_reg_read_file",
83551 + .file = "sound/soc/soc-core.c",
83552 + .param3 = 1,
83553 +};
83554 +struct size_overflow_hash _000562_hash = {
83555 + .next = NULL,
83556 + .name = "command_file_write",
83557 + .file = "drivers/misc/ibmasm/ibmasmfs.c",
83558 + .param3 = 1,
83559 +};
83560 +struct size_overflow_hash _000563_hash = {
83561 + .next = NULL,
83562 + .name = "command_write",
83563 + .file = "drivers/uwb/uwb-debug.c",
83564 + .param3 = 1,
83565 +};
83566 +struct size_overflow_hash _000564_hash = {
83567 + .next = NULL,
83568 + .name = "concat_writev",
83569 + .file = "drivers/mtd/mtdconcat.c",
83570 + .param3 = 1,
83571 +};
83572 +struct size_overflow_hash _000565_hash = {
83573 + .next = NULL,
83574 + .name = "configfs_read_file",
83575 + .file = "fs/configfs/file.c",
83576 + .param3 = 1,
83577 +};
83578 +struct size_overflow_hash _000566_hash = {
83579 + .next = NULL,
83580 + .name = "context_alloc",
83581 + .file = "drivers/md/dm-raid.c",
83582 + .param3 = 1,
83583 +};
83584 +struct size_overflow_hash _000567_hash = {
83585 + .next = NULL,
83586 + .name = "copy_counters_to_user",
83587 + .file = "net/bridge/netfilter/ebtables.c",
83588 + .param5 = 1,
83589 +};
83590 +struct size_overflow_hash _000568_hash = {
83591 + .next = NULL,
83592 + .name = "copy_entries_to_user",
83593 + .file = "net/ipv6/netfilter/ip6_tables.c",
83594 + .param1 = 1,
83595 +};
83596 +struct size_overflow_hash _000569_hash = {
83597 + .next = NULL,
83598 + .name = "copy_entries_to_user",
83599 + .file = "net/ipv4/netfilter/arp_tables.c",
83600 + .param1 = 1,
83601 +};
83602 +struct size_overflow_hash _000570_hash = {
83603 + .next = NULL,
83604 + .name = "copy_entries_to_user",
83605 + .file = "net/ipv4/netfilter/ip_tables.c",
83606 + .param1 = 1,
83607 +};
83608 +struct size_overflow_hash _000571_hash = {
83609 + .next = NULL,
83610 + .name = "copy_from_user_toio",
83611 + .file = "include/sound/core.h",
83612 + .param3 = 1,
83613 +};
83614 +struct size_overflow_hash _000572_hash = {
83615 + .next = NULL,
83616 + .name = "copy_macs",
83617 + .file = "net/atm/mpc.c",
83618 + .param4 = 1,
83619 +};
83620 +struct size_overflow_hash _000573_hash = {
83621 + .next = NULL,
83622 + .name = "copy_to_user_fromio",
83623 + .file = "include/sound/core.h",
83624 + .param3 = 1,
83625 +};
83626 +struct size_overflow_hash _000574_hash = {
83627 + .next = NULL,
83628 + .name = "cosa_write",
83629 + .file = "drivers/net/wan/cosa.c",
83630 + .param3 = 1,
83631 +};
83632 +struct size_overflow_hash _000575_hash = {
83633 + .next = NULL,
83634 + .name = "create_attr_set",
83635 + .file = "drivers/platform/x86/thinkpad_acpi.c",
83636 + .param1 = 1,
83637 +};
83638 +struct size_overflow_hash _000576_hash = {
83639 + .next = NULL,
83640 + .name = "create_entry",
83641 + .file = "fs/binfmt_misc.c",
83642 + .param2 = 1,
83643 +};
83644 +struct size_overflow_hash _000577_hash = {
83645 + .next = NULL,
83646 + .name = "create_gpadl_header",
83647 + .file = "drivers/hv/channel.c",
83648 + .param2 = 1,
83649 +};
83650 +struct size_overflow_hash _000578_hash = {
83651 + .next = NULL,
83652 + .name = "create_queues",
83653 + .file = "drivers/atm/ambassador.c",
83654 + .param2 = 1,
83655 + .param3 = 1,
83656 +};
83657 +struct size_overflow_hash _000580_hash = {
83658 + .next = NULL,
83659 + .name = "_create_sg_bios",
83660 + .file = "drivers/scsi/osd/osd_initiator.c",
83661 + .param4 = 1,
83662 +};
83663 +struct size_overflow_hash _000581_hash = {
83664 + .next = NULL,
83665 + .name = "cryptd_alloc_instance",
83666 + .file = "crypto/cryptd.c",
83667 + .param2 = 1,
83668 + .param3 = 1,
83669 +};
83670 +struct size_overflow_hash _000583_hash = {
83671 + .next = NULL,
83672 + .name = "cryptd_hash_setkey",
83673 + .file = "crypto/cryptd.c",
83674 + .param3 = 1,
83675 +};
83676 +struct size_overflow_hash _000584_hash = {
83677 + .next = NULL,
83678 + .name = "crypto_authenc_esn_setkey",
83679 + .file = "crypto/authencesn.c",
83680 + .param3 = 1,
83681 +};
83682 +struct size_overflow_hash _000585_hash = {
83683 + .next = NULL,
83684 + .name = "crypto_authenc_setkey",
83685 + .file = "crypto/authenc.c",
83686 + .param3 = 1,
83687 +};
83688 +struct size_overflow_hash _000586_hash = {
83689 + .next = NULL,
83690 + .name = "ctrl_out",
83691 + .file = "drivers/usb/misc/usbtest.c",
83692 + .param3 = 1,
83693 + .param5 = 1,
83694 +};
83695 +struct size_overflow_hash _000588_hash = {
83696 + .next = NULL,
83697 + .name = "cx18_copy_buf_to_user",
83698 + .file = "drivers/media/video/cx18/cx18-fileops.c",
83699 + .param4 = 1,
83700 +};
83701 +struct size_overflow_hash _000589_hash = {
83702 + .next = NULL,
83703 + .name = "cx24116_writeregN",
83704 + .file = "drivers/media/dvb/frontends/cx24116.c",
83705 + .param4 = 1,
83706 +};
83707 +struct size_overflow_hash _000590_hash = {
83708 + .next = NULL,
83709 + .name = "cxgb_alloc_mem",
83710 + .file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
83711 + .param1 = 1,
83712 +};
83713 +struct size_overflow_hash _000591_hash = {
83714 + .next = NULL,
83715 + .name = "cxgbi_alloc_big_mem",
83716 + .file = "drivers/scsi/cxgbi/libcxgbi.h",
83717 + .param1 = 1,
83718 +};
83719 +struct size_overflow_hash _000592_hash = {
83720 + .next = NULL,
83721 + .name = "cxgbi_device_register",
83722 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
83723 + .param1 = 1,
83724 + .param2 = 1,
83725 +};
83726 +struct size_overflow_hash _000594_hash = {
83727 + .next = NULL,
83728 + .name = "__cxio_init_resource_fifo",
83729 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
83730 + .param3 = 1,
83731 +};
83732 +struct size_overflow_hash _000595_hash = {
83733 + .next = NULL,
83734 + .name = "dac960_user_command_proc_write",
83735 + .file = "drivers/block/DAC960.c",
83736 + .param3 = 1,
83737 +};
83738 +struct size_overflow_hash _000596_hash = {
83739 + .next = NULL,
83740 + .name = "dai_list_read_file",
83741 + .file = "sound/soc/soc-core.c",
83742 + .param3 = 1,
83743 +};
83744 +struct size_overflow_hash _000597_hash = {
83745 + .next = NULL,
83746 + .name = "dapm_bias_read_file",
83747 + .file = "sound/soc/soc-dapm.c",
83748 + .param3 = 1,
83749 +};
83750 +struct size_overflow_hash _000598_hash = {
83751 + .next = NULL,
83752 + .name = "dapm_widget_power_read_file",
83753 + .file = "sound/soc/soc-dapm.c",
83754 + .param3 = 1,
83755 +};
83756 +struct size_overflow_hash _000599_hash = {
83757 + .next = NULL,
83758 + .name = "datablob_format",
83759 + .file = "security/keys/encrypted-keys/encrypted.c",
83760 + .param2 = 1,
83761 +};
83762 +struct size_overflow_hash _000600_hash = {
83763 + .next = NULL,
83764 + .name = "dbgfs_frame",
83765 + .file = "drivers/net/caif/caif_spi.c",
83766 + .param3 = 1,
83767 +};
83768 +struct size_overflow_hash _000601_hash = {
83769 + .next = NULL,
83770 + .name = "dbgfs_state",
83771 + .file = "drivers/net/caif/caif_spi.c",
83772 + .param3 = 1,
83773 +};
83774 +struct size_overflow_hash _000602_hash = {
83775 + .next = NULL,
83776 + .name = "dccp_feat_clone_sp_val",
83777 + .file = "net/dccp/feat.c",
83778 + .param3 = 1,
83779 +};
83780 +struct size_overflow_hash _000603_hash = {
83781 + .next = NULL,
83782 + .name = "dccp_setsockopt_ccid",
83783 + .file = "net/dccp/proto.c",
83784 + .param4 = 1,
83785 +};
83786 +struct size_overflow_hash _000604_hash = {
83787 + .next = NULL,
83788 + .name = "dccp_setsockopt_service",
83789 + .file = "net/dccp/proto.c",
83790 + .param4 = 1,
83791 +};
83792 +struct size_overflow_hash _000605_hash = {
83793 + .next = NULL,
83794 + .name = "ddb_input_read",
83795 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
83796 + .param3 = 1,
83797 +};
83798 +struct size_overflow_hash _000606_hash = {
83799 + .next = NULL,
83800 + .name = "ddb_output_write",
83801 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
83802 + .param3 = 1,
83803 +};
83804 +struct size_overflow_hash _000607_hash = {
83805 + .next = NULL,
83806 + .name = "ddp_make_gl",
83807 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
83808 + .param1 = 1,
83809 +};
83810 +struct size_overflow_hash _000608_hash = {
83811 + .next = NULL,
83812 + .name = "debugfs_read",
83813 + .file = "drivers/infiniband/hw/cxgb4/device.c",
83814 + .param3 = 1,
83815 +};
83816 +struct size_overflow_hash _000609_hash = {
83817 + .next = NULL,
83818 + .name = "debugfs_read",
83819 + .file = "drivers/char/virtio_console.c",
83820 + .param3 = 1,
83821 +};
83822 +struct size_overflow_hash _000610_hash = {
83823 + .next = NULL,
83824 + .name = "debug_output",
83825 + .file = "drivers/usb/host/ohci-dbg.c",
83826 + .param3 = 1,
83827 +};
83828 +struct size_overflow_hash _000611_hash = {
83829 + .next = NULL,
83830 + .name = "debug_output",
83831 + .file = "drivers/usb/host/ehci-dbg.c",
83832 + .param3 = 1,
83833 +};
83834 +struct size_overflow_hash _000612_hash = {
83835 + .next = NULL,
83836 + .name = "debug_read",
83837 + .file = "fs/ocfs2/dlm/dlmdebug.c",
83838 + .param3 = 1,
83839 +};
83840 +struct size_overflow_hash _000613_hash = {
83841 + .next = NULL,
83842 + .name = "dev_config",
83843 + .file = "drivers/usb/gadget/inode.c",
83844 + .param3 = 1,
83845 +};
83846 +struct size_overflow_hash _000614_hash = {
83847 + .next = NULL,
83848 + .name = "device_write",
83849 + .file = "fs/dlm/user.c",
83850 + .param3 = 1,
83851 +};
83852 +struct size_overflow_hash _000615_hash = {
83853 + .next = NULL,
83854 + .name = "dev_read",
83855 + .file = "drivers/media/video/gspca/gspca.c",
83856 + .param3 = 1,
83857 +};
83858 +struct size_overflow_hash _000616_hash = {
83859 + .next = NULL,
83860 + .name = "dfs_file_read",
83861 + .file = "drivers/mtd/ubi/debug.c",
83862 + .param3 = 1,
83863 +};
83864 +struct size_overflow_hash _000617_hash = {
83865 + .next = NULL,
83866 + .name = "dfs_file_write",
83867 + .file = "drivers/mtd/ubi/debug.c",
83868 + .param3 = 1,
83869 +};
83870 +struct size_overflow_hash _000618_hash = {
83871 + .next = NULL,
83872 + .name = "direct_entry",
83873 + .file = "drivers/misc/lkdtm.c",
83874 + .param3 = 1,
83875 +};
83876 +struct size_overflow_hash _000619_hash = {
83877 + .next = NULL,
83878 + .name = "dispatch_proc_write",
83879 + .file = "drivers/platform/x86/thinkpad_acpi.c",
83880 + .param3 = 1,
83881 +};
83882 +struct size_overflow_hash _000620_hash = {
83883 + .next = NULL,
83884 + .name = "diva_os_malloc",
83885 + .file = "drivers/isdn/hardware/eicon/platform.h",
83886 + .param2 = 1,
83887 +};
83888 +struct size_overflow_hash _000621_hash = {
83889 + .next = NULL,
83890 + .name = "dlmfs_file_read",
83891 + .file = "fs/ocfs2/dlmfs/dlmfs.c",
83892 + .param3 = 1,
83893 +};
83894 +struct size_overflow_hash _000622_hash = {
83895 + .next = NULL,
83896 + .name = "dlmfs_file_write",
83897 + .file = "fs/ocfs2/dlmfs/dlmfs.c",
83898 + .param3 = 1,
83899 +};
83900 +struct size_overflow_hash _000623_hash = {
83901 + .next = NULL,
83902 + .name = "dma_attach",
83903 + .file = "drivers/net/wireless/brcm80211/brcmsmac/dma.c",
83904 + .param6 = 1,
83905 + .param7 = 1,
83906 +};
83907 +struct size_overflow_hash _000625_hash = {
83908 + .next = NULL,
83909 + .name = "dma_rx_errors_read",
83910 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83911 + .param3 = 1,
83912 +};
83913 +struct size_overflow_hash _000626_hash = {
83914 + .next = NULL,
83915 + .name = "dma_rx_requested_read",
83916 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83917 + .param3 = 1,
83918 +};
83919 +struct size_overflow_hash _000627_hash = {
83920 + .next = NULL,
83921 + .name = "dma_show_regs",
83922 + .file = "drivers/tty/serial/mfd.c",
83923 + .param3 = 1,
83924 +};
83925 +struct size_overflow_hash _000628_hash = {
83926 + .next = NULL,
83927 + .name = "dma_tx_errors_read",
83928 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83929 + .param3 = 1,
83930 +};
83931 +struct size_overflow_hash _000629_hash = {
83932 + .next = NULL,
83933 + .name = "dma_tx_requested_read",
83934 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83935 + .param3 = 1,
83936 +};
83937 +struct size_overflow_hash _000630_hash = {
83938 + .next = NULL,
83939 + .name = "dm_read",
83940 + .file = "drivers/net/usb/dm9601.c",
83941 + .param3 = 1,
83942 +};
83943 +struct size_overflow_hash _000631_hash = {
83944 + .next = NULL,
83945 + .name = "dm_vcalloc",
83946 + .file = "include/linux/device-mapper.h",
83947 + .param1 = 1,
83948 + .param2 = 1,
83949 +};
83950 +struct size_overflow_hash _000633_hash = {
83951 + .next = NULL,
83952 + .name = "dm_write",
83953 + .file = "drivers/net/usb/dm9601.c",
83954 + .param3 = 1,
83955 +};
83956 +struct size_overflow_hash _000634_hash = {
83957 + .next = NULL,
83958 + .name = "__dn_setsockopt",
83959 + .file = "net/decnet/af_decnet.c",
83960 + .param5 = 1,
83961 +};
83962 +struct size_overflow_hash _000635_hash = {
83963 + .next = NULL,
83964 + .name = "dns_query",
83965 + .file = "include/linux/dns_resolver.h",
83966 + .param3 = 1,
83967 +};
83968 +struct size_overflow_hash _000636_hash = {
83969 + .next = NULL,
83970 + .name = "dns_resolver_instantiate",
83971 + .file = "net/dns_resolver/dns_key.c",
83972 + .param3 = 1,
83973 +};
83974 +struct size_overflow_hash _000637_hash = {
83975 + .next = NULL,
83976 + .name = "dns_resolver_read",
83977 + .file = "net/dns_resolver/dns_key.c",
83978 + .param3 = 1,
83979 +};
83980 +struct size_overflow_hash _000638_hash = {
83981 + .next = NULL,
83982 + .name = "do_add_counters",
83983 + .file = "net/ipv6/netfilter/ip6_tables.c",
83984 + .param3 = 1,
83985 +};
83986 +struct size_overflow_hash _000639_hash = {
83987 + .next = NULL,
83988 + .name = "do_add_counters",
83989 + .file = "net/ipv4/netfilter/ip_tables.c",
83990 + .param3 = 1,
83991 +};
83992 +struct size_overflow_hash _000640_hash = {
83993 + .next = NULL,
83994 + .name = "do_add_counters",
83995 + .file = "net/ipv4/netfilter/arp_tables.c",
83996 + .param3 = 1,
83997 +};
83998 +struct size_overflow_hash _000641_hash = {
83999 + .next = NULL,
84000 + .name = "__do_config_autodelink",
84001 + .file = "drivers/usb/storage/realtek_cr.c",
84002 + .param3 = 1,
84003 +};
84004 +struct size_overflow_hash _000642_hash = {
84005 + .next = NULL,
84006 + .name = "do_ipv6_setsockopt",
84007 + .file = "net/ipv6/ipv6_sockglue.c",
84008 + .param5 = 1,
84009 +};
84010 +struct size_overflow_hash _000643_hash = {
84011 + .next = NULL,
84012 + .name = "do_ip_vs_set_ctl",
84013 + .file = "net/netfilter/ipvs/ip_vs_ctl.c",
84014 + .param4 = 1,
84015 +};
84016 +struct size_overflow_hash _000644_hash = {
84017 + .next = NULL,
84018 + .name = "do_register_entry",
84019 + .file = "drivers/misc/lkdtm.c",
84020 + .param4 = 1,
84021 +};
84022 +struct size_overflow_hash _000645_hash = {
84023 + .next = NULL,
84024 + .name = "__do_replace",
84025 + .file = "net/ipv6/netfilter/ip6_tables.c",
84026 + .param5 = 1,
84027 +};
84028 +struct size_overflow_hash _000646_hash = {
84029 + .next = NULL,
84030 + .name = "__do_replace",
84031 + .file = "net/ipv4/netfilter/ip_tables.c",
84032 + .param5 = 1,
84033 +};
84034 +struct size_overflow_hash _000647_hash = {
84035 + .next = NULL,
84036 + .name = "__do_replace",
84037 + .file = "net/ipv4/netfilter/arp_tables.c",
84038 + .param5 = 1,
84039 +};
84040 +struct size_overflow_hash _000648_hash = {
84041 + .next = NULL,
84042 + .name = "do_sync",
84043 + .file = "fs/gfs2/quota.c",
84044 + .param1 = 1,
84045 +};
84046 +struct size_overflow_hash _000649_hash = {
84047 + .next = NULL,
84048 + .name = "do_update_counters",
84049 + .file = "net/bridge/netfilter/ebtables.c",
84050 + .param4 = 1,
84051 +};
84052 +struct size_overflow_hash _000650_hash = {
84053 + .next = NULL,
84054 + .name = "driver_state_read",
84055 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
84056 + .param3 = 1,
84057 +};
84058 +struct size_overflow_hash _000651_hash = {
84059 + .next = NULL,
84060 + .name = "dsp_write",
84061 + .file = "sound/oss/msnd_pinnacle.c",
84062 + .param2 = 1,
84063 +};
84064 +struct size_overflow_hash _000652_hash = {
84065 + .next = NULL,
84066 + .name = "dvb_aplay",
84067 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
84068 + .param3 = 1,
84069 +};
84070 +struct size_overflow_hash _000653_hash = {
84071 + .next = NULL,
84072 + .name = "dvb_ca_en50221_io_write",
84073 + .file = "drivers/media/dvb/dvb-core/dvb_ca_en50221.c",
84074 + .param3 = 1,
84075 +};
84076 +struct size_overflow_hash _000654_hash = {
84077 + .next = NULL,
84078 + .name = "dvb_dmxdev_set_buffer_size",
84079 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
84080 + .param2 = 1,
84081 +};
84082 +struct size_overflow_hash _000655_hash = {
84083 + .next = NULL,
84084 + .name = "dvbdmx_write",
84085 + .file = "drivers/media/dvb/dvb-core/dvb_demux.c",
84086 + .param3 = 1,
84087 +};
84088 +struct size_overflow_hash _000656_hash = {
84089 + .next = NULL,
84090 + .name = "dvb_dvr_set_buffer_size",
84091 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
84092 + .param2 = 1,
84093 +};
84094 +struct size_overflow_hash _000657_hash = {
84095 + .next = NULL,
84096 + .name = "dvb_play",
84097 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
84098 + .param3 = 1,
84099 +};
84100 +struct size_overflow_hash _000658_hash = {
84101 + .next = NULL,
84102 + .name = "dvb_ringbuffer_pkt_read_user",
84103 + .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
84104 + .param5 = 1,
84105 +};
84106 +struct size_overflow_hash _000659_hash = {
84107 + .next = NULL,
84108 + .name = "dvb_ringbuffer_read_user",
84109 + .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
84110 + .param3 = 1,
84111 +};
84112 +struct size_overflow_hash _000660_hash = {
84113 + .next = NULL,
84114 + .name = "dw210x_op_rw",
84115 + .file = "drivers/media/dvb/dvb-usb/dw2102.c",
84116 + .param6 = 1,
84117 +};
84118 +struct size_overflow_hash _000661_hash = {
84119 + .next = NULL,
84120 + .name = "dwc3_mode_write",
84121 + .file = "drivers/usb/dwc3/debugfs.c",
84122 + .param3 = 1,
84123 +};
84124 +struct size_overflow_hash _000662_hash = {
84125 + .next = NULL,
84126 + .name = "econet_sendmsg",
84127 + .file = "net/econet/af_econet.c",
84128 + .param4 = 1,
84129 +};
84130 +struct size_overflow_hash _000663_hash = {
84131 + .next = NULL,
84132 + .name = "ecryptfs_copy_filename",
84133 + .file = "fs/ecryptfs/crypto.c",
84134 + .param4 = 1,
84135 +};
84136 +struct size_overflow_hash _000664_hash = {
84137 + .next = NULL,
84138 + .name = "ecryptfs_miscdev_write",
84139 + .file = "fs/ecryptfs/miscdev.c",
84140 + .param3 = 1,
84141 +};
84142 +struct size_overflow_hash _000665_hash = {
84143 + .next = NULL,
84144 + .name = "ecryptfs_send_miscdev",
84145 + .file = "fs/ecryptfs/miscdev.c",
84146 + .param2 = 1,
84147 +};
84148 +struct size_overflow_hash _000666_hash = {
84149 + .next = NULL,
84150 + .name = "edac_device_alloc_ctl_info",
84151 + .file = "drivers/edac/edac_device.c",
84152 + .param1 = 1,
84153 +};
84154 +struct size_overflow_hash _000667_hash = {
84155 + .next = NULL,
84156 + .name = "edac_mc_alloc",
84157 + .file = "drivers/edac/edac_mc.c",
84158 + .param1 = 1,
84159 +};
84160 +struct size_overflow_hash _000668_hash = {
84161 + .next = NULL,
84162 + .name = "edac_pci_alloc_ctl_info",
84163 + .file = "drivers/edac/edac_pci.c",
84164 + .param1 = 1,
84165 +};
84166 +struct size_overflow_hash _000669_hash = {
84167 + .next = NULL,
84168 + .name = "efivar_create_sysfs_entry",
84169 + .file = "drivers/firmware/efivars.c",
84170 + .param2 = 1,
84171 +};
84172 +struct size_overflow_hash _000670_hash = {
84173 + .next = NULL,
84174 + .name = "efx_tsoh_heap_alloc",
84175 + .file = "drivers/net/ethernet/sfc/tx.c",
84176 + .param2 = 1,
84177 +};
84178 +struct size_overflow_hash _000671_hash = {
84179 + .next = NULL,
84180 + .name = "encrypted_instantiate",
84181 + .file = "security/keys/encrypted-keys/encrypted.c",
84182 + .param3 = 1,
84183 +};
84184 +struct size_overflow_hash _000672_hash = {
84185 + .next = NULL,
84186 + .name = "encrypted_update",
84187 + .file = "security/keys/encrypted-keys/encrypted.c",
84188 + .param3 = 1,
84189 +};
84190 +struct size_overflow_hash _000673_hash = {
84191 + .next = NULL,
84192 + .name = "ep0_write",
84193 + .file = "drivers/usb/gadget/inode.c",
84194 + .param3 = 1,
84195 +};
84196 +struct size_overflow_hash _000674_hash = {
84197 + .next = NULL,
84198 + .name = "ep_read",
84199 + .file = "drivers/usb/gadget/inode.c",
84200 + .param3 = 1,
84201 +};
84202 +struct size_overflow_hash _000675_hash = {
84203 + .next = NULL,
84204 + .name = "ep_write",
84205 + .file = "drivers/usb/gadget/inode.c",
84206 + .param3 = 1,
84207 +};
84208 +struct size_overflow_hash _000676_hash = {
84209 + .next = NULL,
84210 + .name = "erst_dbg_write",
84211 + .file = "drivers/acpi/apei/erst-dbg.c",
84212 + .param3 = 1,
84213 +};
84214 +struct size_overflow_hash _000677_hash = {
84215 + .next = NULL,
84216 + .name = "et61x251_read",
84217 + .file = "drivers/media/video/et61x251/et61x251_core.c",
84218 + .param3 = 1,
84219 +};
84220 +struct size_overflow_hash _000678_hash = {
84221 + .next = NULL,
84222 + .name = "event_calibration_read",
84223 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84224 + .param3 = 1,
84225 +};
84226 +struct size_overflow_hash _000679_hash = {
84227 + .next = NULL,
84228 + .name = "event_heart_beat_read",
84229 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84230 + .param3 = 1,
84231 +};
84232 +struct size_overflow_hash _000680_hash = {
84233 + .next = NULL,
84234 + .name = "event_oom_late_read",
84235 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84236 + .param3 = 1,
84237 +};
84238 +struct size_overflow_hash _000681_hash = {
84239 + .next = NULL,
84240 + .name = "event_phy_transmit_error_read",
84241 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84242 + .param3 = 1,
84243 +};
84244 +struct size_overflow_hash _000682_hash = {
84245 + .next = NULL,
84246 + .name = "event_rx_mem_empty_read",
84247 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84248 + .param3 = 1,
84249 +};
84250 +struct size_overflow_hash _000683_hash = {
84251 + .next = NULL,
84252 + .name = "event_rx_mismatch_read",
84253 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84254 + .param3 = 1,
84255 +};
84256 +struct size_overflow_hash _000684_hash = {
84257 + .next = NULL,
84258 + .name = "event_rx_pool_read",
84259 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84260 + .param3 = 1,
84261 +};
84262 +struct size_overflow_hash _000685_hash = {
84263 + .next = NULL,
84264 + .name = "event_tx_stuck_read",
84265 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84266 + .param3 = 1,
84267 +};
84268 +struct size_overflow_hash _000686_hash = {
84269 + .next = NULL,
84270 + .name = "excessive_retries_read",
84271 + .file = "drivers/net/wireless/wl1251/debugfs.c",
84272 + .param3 = 1,
84273 +};
84274 +struct size_overflow_hash _000687_hash = {
84275 + .next = NULL,
84276 + .name = "exofs_read_lookup_dev_table",
84277 + .file = "fs/exofs/super.c",
84278 + .param3 = 1,
84279 +};
84280 +struct size_overflow_hash _000688_hash = {
84281 + .next = NULL,
84282 + .name = "ext4_kvmalloc",
84283 + .file = "fs/ext4/super.c",
84284 + .param1 = 1,
84285 +};
84286 +struct size_overflow_hash _000689_hash = {
84287 + .next = NULL,
84288 + .name = "ext4_kvzalloc",
84289 + .file = "fs/ext4/super.c",
84290 + .param1 = 1,
84291 +};
84292 +struct size_overflow_hash _000690_hash = {
84293 + .next = NULL,
84294 + .name = "extend_netdev_table",
84295 + .file = "net/core/netprio_cgroup.c",
84296 + .param2 = 1,
84297 +};
84298 +struct size_overflow_hash _000691_hash = {
84299 + .next = NULL,
84300 + .name = "fd_copyin",
84301 + .file = "drivers/block/floppy.c",
84302 + .param3 = 1,
84303 +};
84304 +struct size_overflow_hash _000692_hash = {
84305 + .next = NULL,
84306 + .name = "fd_copyout",
84307 + .file = "drivers/block/floppy.c",
84308 + .param3 = 1,
84309 +};
84310 +struct size_overflow_hash _000693_hash = {
84311 + .next = NULL,
84312 + .name = "__ffs_ep0_read_events",
84313 + .file = "drivers/usb/gadget/f_fs.c",
84314 + .param3 = 1,
84315 +};
84316 +struct size_overflow_hash _000694_hash = {
84317 + .next = NULL,
84318 + .name = "ffs_epfile_io",
84319 + .file = "drivers/usb/gadget/f_fs.c",
84320 + .param3 = 1,
84321 +};
84322 +struct size_overflow_hash _000695_hash = {
84323 + .next = NULL,
84324 + .name = "ffs_prepare_buffer",
84325 + .file = "drivers/usb/gadget/f_fs.c",
84326 + .param2 = 1,
84327 +};
84328 +struct size_overflow_hash _000696_hash = {
84329 + .next = NULL,
84330 + .name = "f_hidg_read",
84331 + .file = "drivers/usb/gadget/f_hid.c",
84332 + .param3 = 1,
84333 +};
84334 +struct size_overflow_hash _000697_hash = {
84335 + .next = NULL,
84336 + .name = "f_hidg_write",
84337 + .file = "drivers/usb/gadget/f_hid.c",
84338 + .param3 = 1,
84339 +};
84340 +struct size_overflow_hash _000698_hash = {
84341 + .next = NULL,
84342 + .name = "fill_write_buffer",
84343 + .file = "fs/configfs/file.c",
84344 + .param3 = 1,
84345 +};
84346 +struct size_overflow_hash _000699_hash = {
84347 + .next = NULL,
84348 + .name = "flexcop_device_kmalloc",
84349 + .file = "drivers/media/dvb/b2c2/flexcop.c",
84350 + .param1 = 1,
84351 +};
84352 +struct size_overflow_hash _000700_hash = {
84353 + .next = NULL,
84354 + .name = "fops_read",
84355 + .file = "drivers/media/video/saa7164/saa7164-encoder.c",
84356 + .param3 = 1,
84357 +};
84358 +struct size_overflow_hash _000701_hash = {
84359 + .next = NULL,
84360 + .name = "fops_read",
84361 + .file = "drivers/media/video/saa7164/saa7164-vbi.c",
84362 + .param3 = 1,
84363 +};
84364 +struct size_overflow_hash _000702_hash = {
84365 + .next = NULL,
84366 + .name = "format_devstat_counter",
84367 + .file = "net/mac80211/debugfs.c",
84368 + .param3 = 1,
84369 +};
84370 +struct size_overflow_hash _000703_hash = {
84371 + .next = NULL,
84372 + .name = "fragmentation_threshold_read",
84373 + .file = "net/wireless/debugfs.c",
84374 + .param3 = 1,
84375 +};
84376 +struct size_overflow_hash _000704_hash = {
84377 + .next = NULL,
84378 + .name = "frame_alloc",
84379 + .file = "drivers/media/video/gspca/gspca.c",
84380 + .param4 = 1,
84381 +};
84382 +struct size_overflow_hash _000705_hash = {
84383 + .next = NULL,
84384 + .name = "ftdi_elan_write",
84385 + .file = "drivers/usb/misc/ftdi-elan.c",
84386 + .param3 = 1,
84387 +};
84388 +struct size_overflow_hash _000706_hash = {
84389 + .next = NULL,
84390 + .name = "fuse_conn_limit_read",
84391 + .file = "fs/fuse/control.c",
84392 + .param3 = 1,
84393 +};
84394 +struct size_overflow_hash _000707_hash = {
84395 + .next = NULL,
84396 + .name = "fuse_conn_limit_write",
84397 + .file = "fs/fuse/control.c",
84398 + .param3 = 1,
84399 +};
84400 +struct size_overflow_hash _000708_hash = {
84401 + .next = &_000531_hash,
84402 + .name = "fuse_conn_waiting_read",
84403 + .file = "fs/fuse/control.c",
84404 + .param3 = 1,
84405 +};
84406 +struct size_overflow_hash _000709_hash = {
84407 + .next = NULL,
84408 + .name = "garp_attr_create",
84409 + .file = "net/802/garp.c",
84410 + .param3 = 1,
84411 +};
84412 +struct size_overflow_hash _000710_hash = {
84413 + .next = NULL,
84414 + .name = "get_alua_req",
84415 + .file = "drivers/scsi/device_handler/scsi_dh_alua.c",
84416 + .param3 = 1,
84417 +};
84418 +struct size_overflow_hash _000711_hash = {
84419 + .next = NULL,
84420 + .name = "get_derived_key",
84421 + .file = "security/keys/encrypted-keys/encrypted.c",
84422 + .param4 = 1,
84423 +};
84424 +struct size_overflow_hash _000712_hash = {
84425 + .next = NULL,
84426 + .name = "getdqbuf",
84427 + .file = "fs/quota/quota_tree.c",
84428 + .param1 = 1,
84429 +};
84430 +struct size_overflow_hash _000713_hash = {
84431 + .next = NULL,
84432 + .name = "get_fdb_entries",
84433 + .file = "net/bridge/br_ioctl.c",
84434 + .param3 = 1,
84435 +};
84436 +struct size_overflow_hash _000714_hash = {
84437 + .next = NULL,
84438 + .name = "get_rdac_req",
84439 + .file = "drivers/scsi/device_handler/scsi_dh_rdac.c",
84440 + .param3 = 1,
84441 +};
84442 +struct size_overflow_hash _000715_hash = {
84443 + .next = NULL,
84444 + .name = "get_registers",
84445 + .file = "drivers/net/usb/pegasus.c",
84446 + .param3 = 1,
84447 +};
84448 +struct size_overflow_hash _000716_hash = {
84449 + .next = NULL,
84450 + .name = "get_server_iovec",
84451 + .file = "fs/cifs/connect.c",
84452 + .param2 = 1,
84453 +};
84454 +struct size_overflow_hash _000717_hash = {
84455 + .next = NULL,
84456 + .name = "get_ucode_user",
84457 + .file = "arch/x86/kernel/microcode_intel.c",
84458 + .param3 = 1,
84459 +};
84460 +struct size_overflow_hash _000718_hash = {
84461 + .next = NULL,
84462 + .name = "gfs2_alloc_sort_buffer",
84463 + .file = "fs/gfs2/dir.c",
84464 + .param1 = 1,
84465 +};
84466 +struct size_overflow_hash _000719_hash = {
84467 + .next = NULL,
84468 + .name = "gfs2_glock_nq_m",
84469 + .file = "fs/gfs2/glock.c",
84470 + .param1 = 1,
84471 +};
84472 +struct size_overflow_hash _000720_hash = {
84473 + .next = NULL,
84474 + .name = "gigaset_initdriver",
84475 + .file = "drivers/isdn/gigaset/common.c",
84476 + .param2 = 1,
84477 +};
84478 +struct size_overflow_hash _000721_hash = {
84479 + .next = NULL,
84480 + .name = "gpio_power_read",
84481 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
84482 + .param3 = 1,
84483 +};
84484 +struct size_overflow_hash _000722_hash = {
84485 + .next = NULL,
84486 + .name = "gs_alloc_req",
84487 + .file = "drivers/usb/gadget/u_serial.c",
84488 + .param2 = 1,
84489 +};
84490 +struct size_overflow_hash _000723_hash = {
84491 + .next = NULL,
84492 + .name = "gs_buf_alloc",
84493 + .file = "drivers/usb/gadget/u_serial.c",
84494 + .param2 = 1,
84495 +};
84496 +struct size_overflow_hash _000724_hash = {
84497 + .next = NULL,
84498 + .name = "gss_pipe_downcall",
84499 + .file = "net/sunrpc/auth_gss/auth_gss.c",
84500 + .param3 = 1,
84501 +};
84502 +struct size_overflow_hash _000725_hash = {
84503 + .next = NULL,
84504 + .name = "handle_request",
84505 + .file = "drivers/firewire/core-cdev.c",
84506 + .param9 = 1,
84507 +};
84508 +struct size_overflow_hash _000726_hash = {
84509 + .next = NULL,
84510 + .name = "hash_new",
84511 + .file = "net/batman-adv/hash.c",
84512 + .param1 = 1,
84513 +};
84514 +struct size_overflow_hash _000727_hash = {
84515 + .next = NULL,
84516 + .name = "hash_setkey",
84517 + .file = "crypto/algif_hash.c",
84518 + .param3 = 1,
84519 +};
84520 +struct size_overflow_hash _000728_hash = {
84521 + .next = NULL,
84522 + .name = "hcd_buffer_alloc",
84523 + .file = "include/linux/usb/hcd.h",
84524 + .param2 = 1,
84525 +};
84526 +struct size_overflow_hash _000729_hash = {
84527 + .next = NULL,
84528 + .name = "hci_sock_setsockopt",
84529 + .file = "net/bluetooth/hci_sock.c",
84530 + .param5 = 1,
84531 +};
84532 +struct size_overflow_hash _000730_hash = {
84533 + .next = NULL,
84534 + .name = "hdpvr_read",
84535 + .file = "drivers/media/video/hdpvr/hdpvr-video.c",
84536 + .param3 = 1,
84537 +};
84538 +struct size_overflow_hash _000731_hash = {
84539 + .next = NULL,
84540 + .name = "hidraw_get_report",
84541 + .file = "drivers/hid/hidraw.c",
84542 + .param3 = 1,
84543 +};
84544 +struct size_overflow_hash _000732_hash = {
84545 + .next = NULL,
84546 + .name = "hidraw_read",
84547 + .file = "drivers/hid/hidraw.c",
84548 + .param3 = 1,
84549 +};
84550 +struct size_overflow_hash _000733_hash = {
84551 + .next = NULL,
84552 + .name = "hidraw_send_report",
84553 + .file = "drivers/hid/hidraw.c",
84554 + .param3 = 1,
84555 +};
84556 +struct size_overflow_hash _000734_hash = {
84557 + .next = NULL,
84558 + .name = "hid_register_field",
84559 + .file = "drivers/hid/hid-core.c",
84560 + .param2 = 1,
84561 + .param3 = 1,
84562 +};
84563 +struct size_overflow_hash _000736_hash = {
84564 + .next = NULL,
84565 + .name = "hpfs_translate_name",
84566 + .file = "fs/hpfs/name.c",
84567 + .param3 = 1,
84568 +};
84569 +struct size_overflow_hash _000737_hash = {
84570 + .next = NULL,
84571 + .name = "hpi_alloc_control_cache",
84572 + .file = "sound/pci/asihpi/hpicmn.c",
84573 + .param1 = 1,
84574 +};
84575 +struct size_overflow_hash _000738_hash = {
84576 + .next = NULL,
84577 + .name = "ht40allow_map_read",
84578 + .file = "net/wireless/debugfs.c",
84579 + .param3 = 1,
84580 +};
84581 +struct size_overflow_hash _000739_hash = {
84582 + .next = NULL,
84583 + .name = "__hwahc_dev_set_key",
84584 + .file = "drivers/usb/host/hwa-hc.c",
84585 + .param5 = 1,
84586 +};
84587 +struct size_overflow_hash _000740_hash = {
84588 + .next = NULL,
84589 + .name = "hwflags_read",
84590 + .file = "net/mac80211/debugfs.c",
84591 + .param3 = 1,
84592 +};
84593 +struct size_overflow_hash _000741_hash = {
84594 + .next = NULL,
84595 + .name = "hysdn_conf_read",
84596 + .file = "drivers/isdn/hysdn/hysdn_procconf.c",
84597 + .param3 = 1,
84598 +};
84599 +struct size_overflow_hash _000742_hash = {
84600 + .next = NULL,
84601 + .name = "hysdn_conf_write",
84602 + .file = "drivers/isdn/hysdn/hysdn_procconf.c",
84603 + .param3 = 1,
84604 +};
84605 +struct size_overflow_hash _000743_hash = {
84606 + .next = NULL,
84607 + .name = "hysdn_log_write",
84608 + .file = "drivers/isdn/hysdn/hysdn_proclog.c",
84609 + .param3 = 1,
84610 +};
84611 +struct size_overflow_hash _000744_hash = {
84612 + .next = NULL,
84613 + .name = "i2400m_rx_stats_read",
84614 + .file = "drivers/net/wimax/i2400m/debugfs.c",
84615 + .param3 = 1,
84616 +};
84617 +struct size_overflow_hash _000745_hash = {
84618 + .next = NULL,
84619 + .name = "i2400m_tx_stats_read",
84620 + .file = "drivers/net/wimax/i2400m/debugfs.c",
84621 + .param3 = 1,
84622 +};
84623 +struct size_overflow_hash _000746_hash = {
84624 + .next = NULL,
84625 + .name = "__i2400mu_send_barker",
84626 + .file = "drivers/net/wimax/i2400m/usb.c",
84627 + .param3 = 1,
84628 +};
84629 +struct size_overflow_hash _000747_hash = {
84630 + .next = NULL,
84631 + .name = "i2400m_zrealloc_2x",
84632 + .file = "drivers/net/wimax/i2400m/fw.c",
84633 + .param3 = 1,
84634 +};
84635 +struct size_overflow_hash _000748_hash = {
84636 + .next = NULL,
84637 + .name = "i2cdev_read",
84638 + .file = "drivers/i2c/i2c-dev.c",
84639 + .param3 = 1,
84640 +};
84641 +struct size_overflow_hash _000749_hash = {
84642 + .next = &_000459_hash,
84643 + .name = "i2cdev_write",
84644 + .file = "drivers/i2c/i2c-dev.c",
84645 + .param3 = 1,
84646 +};
84647 +struct size_overflow_hash _000750_hash = {
84648 + .next = NULL,
84649 + .name = "ib_alloc_device",
84650 + .file = "include/rdma/ib_verbs.h",
84651 + .param1 = 1,
84652 +};
84653 +struct size_overflow_hash _000751_hash = {
84654 + .next = NULL,
84655 + .name = "ib_copy_from_udata",
84656 + .file = "include/rdma/ib_verbs.h",
84657 + .param3 = 1,
84658 +};
84659 +struct size_overflow_hash _000752_hash = {
84660 + .next = NULL,
84661 + .name = "ib_copy_to_udata",
84662 + .file = "include/rdma/ib_verbs.h",
84663 + .param3 = 1,
84664 +};
84665 +struct size_overflow_hash _000753_hash = {
84666 + .next = NULL,
84667 + .name = "ibmasm_new_command",
84668 + .file = "drivers/misc/ibmasm/command.c",
84669 + .param2 = 1,
84670 +};
84671 +struct size_overflow_hash _000754_hash = {
84672 + .next = NULL,
84673 + .name = "ib_ucm_alloc_data",
84674 + .file = "drivers/infiniband/core/ucm.c",
84675 + .param3 = 1,
84676 +};
84677 +struct size_overflow_hash _000755_hash = {
84678 + .next = NULL,
84679 + .name = "ib_umad_write",
84680 + .file = "drivers/infiniband/core/user_mad.c",
84681 + .param3 = 1,
84682 +};
84683 +struct size_overflow_hash _000756_hash = {
84684 + .next = NULL,
84685 + .name = "ib_uverbs_unmarshall_recv",
84686 + .file = "drivers/infiniband/core/uverbs_cmd.c",
84687 + .param5 = 1,
84688 +};
84689 +struct size_overflow_hash _000757_hash = {
84690 + .next = NULL,
84691 + .name = "ide_driver_proc_write",
84692 + .file = "drivers/ide/ide-proc.c",
84693 + .param3 = 1,
84694 +};
84695 +struct size_overflow_hash _000758_hash = {
84696 + .next = NULL,
84697 + .name = "ide_queue_pc_tail",
84698 + .file = "include/linux/ide.h",
84699 + .param5 = 1,
84700 +};
84701 +struct size_overflow_hash _000759_hash = {
84702 + .next = NULL,
84703 + .name = "ide_raw_taskfile",
84704 + .file = "include/linux/ide.h",
84705 + .param4 = 1,
84706 +};
84707 +struct size_overflow_hash _000760_hash = {
84708 + .next = NULL,
84709 + .name = "ide_settings_proc_write",
84710 + .file = "drivers/ide/ide-proc.c",
84711 + .param3 = 1,
84712 +};
84713 +struct size_overflow_hash _000761_hash = {
84714 + .next = NULL,
84715 + .name = "idetape_chrdev_read",
84716 + .file = "drivers/ide/ide-tape.c",
84717 + .param3 = 1,
84718 +};
84719 +struct size_overflow_hash _000762_hash = {
84720 + .next = NULL,
84721 + .name = "idetape_chrdev_write",
84722 + .file = "drivers/ide/ide-tape.c",
84723 + .param3 = 1,
84724 +};
84725 +struct size_overflow_hash _000763_hash = {
84726 + .next = NULL,
84727 + .name = "idmouse_read",
84728 + .file = "drivers/usb/misc/idmouse.c",
84729 + .param3 = 1,
84730 +};
84731 +struct size_overflow_hash _000764_hash = {
84732 + .next = NULL,
84733 + .name = "ieee80211_build_probe_req",
84734 + .file = "net/mac80211/util.c",
84735 + .param7 = 1,
84736 +};
84737 +struct size_overflow_hash _000765_hash = {
84738 + .next = NULL,
84739 + .name = "ieee80211_if_read",
84740 + .file = "net/mac80211/debugfs_netdev.c",
84741 + .param3 = 1,
84742 +};
84743 +struct size_overflow_hash _000766_hash = {
84744 + .next = NULL,
84745 + .name = "ieee80211_if_write",
84746 + .file = "net/mac80211/debugfs_netdev.c",
84747 + .param3 = 1,
84748 +};
84749 +struct size_overflow_hash _000767_hash = {
84750 + .next = NULL,
84751 + .name = "ieee80211_key_alloc",
84752 + .file = "net/mac80211/key.c",
84753 + .param3 = 1,
84754 +};
84755 +struct size_overflow_hash _000768_hash = {
84756 + .next = NULL,
84757 + .name = "ieee80211_mgmt_tx",
84758 + .file = "net/mac80211/cfg.c",
84759 + .param9 = 1,
84760 +};
84761 +struct size_overflow_hash _000769_hash = {
84762 + .next = NULL,
84763 + .name = "ikconfig_read_current",
84764 + .file = "kernel/configs.c",
84765 + .param3 = 1,
84766 +};
84767 +struct size_overflow_hash _000770_hash = {
84768 + .next = NULL,
84769 + .name = "il3945_sta_dbgfs_stats_table_read",
84770 + .file = "drivers/net/wireless/iwlegacy/3945-rs.c",
84771 + .param3 = 1,
84772 +};
84773 +struct size_overflow_hash _000771_hash = {
84774 + .next = NULL,
84775 + .name = "il3945_ucode_general_stats_read",
84776 + .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
84777 + .param3 = 1,
84778 +};
84779 +struct size_overflow_hash _000772_hash = {
84780 + .next = NULL,
84781 + .name = "il3945_ucode_rx_stats_read",
84782 + .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
84783 + .param3 = 1,
84784 +};
84785 +struct size_overflow_hash _000773_hash = {
84786 + .next = NULL,
84787 + .name = "il3945_ucode_tx_stats_read",
84788 + .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
84789 + .param3 = 1,
84790 +};
84791 +struct size_overflow_hash _000774_hash = {
84792 + .next = NULL,
84793 + .name = "il4965_rs_sta_dbgfs_rate_scale_data_read",
84794 + .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
84795 + .param3 = 1,
84796 +};
84797 +struct size_overflow_hash _000775_hash = {
84798 + .next = NULL,
84799 + .name = "il4965_rs_sta_dbgfs_scale_table_read",
84800 + .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
84801 + .param3 = 1,
84802 +};
84803 +struct size_overflow_hash _000776_hash = {
84804 + .next = NULL,
84805 + .name = "il4965_rs_sta_dbgfs_stats_table_read",
84806 + .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
84807 + .param3 = 1,
84808 +};
84809 +struct size_overflow_hash _000777_hash = {
84810 + .next = NULL,
84811 + .name = "il4965_ucode_general_stats_read",
84812 + .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
84813 + .param3 = 1,
84814 +};
84815 +struct size_overflow_hash _000778_hash = {
84816 + .next = NULL,
84817 + .name = "il4965_ucode_rx_stats_read",
84818 + .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
84819 + .param3 = 1,
84820 +};
84821 +struct size_overflow_hash _000779_hash = {
84822 + .next = NULL,
84823 + .name = "il4965_ucode_tx_stats_read",
84824 + .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
84825 + .param3 = 1,
84826 +};
84827 +struct size_overflow_hash _000780_hash = {
84828 + .next = NULL,
84829 + .name = "il_dbgfs_chain_noise_read",
84830 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84831 + .param3 = 1,
84832 +};
84833 +struct size_overflow_hash _000781_hash = {
84834 + .next = NULL,
84835 + .name = "il_dbgfs_channels_read",
84836 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84837 + .param3 = 1,
84838 +};
84839 +struct size_overflow_hash _000782_hash = {
84840 + .next = NULL,
84841 + .name = "il_dbgfs_disable_ht40_read",
84842 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84843 + .param3 = 1,
84844 +};
84845 +struct size_overflow_hash _000783_hash = {
84846 + .next = NULL,
84847 + .name = "il_dbgfs_fh_reg_read",
84848 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84849 + .param3 = 1,
84850 +};
84851 +struct size_overflow_hash _000784_hash = {
84852 + .next = NULL,
84853 + .name = "il_dbgfs_force_reset_read",
84854 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84855 + .param3 = 1,
84856 +};
84857 +struct size_overflow_hash _000785_hash = {
84858 + .next = NULL,
84859 + .name = "il_dbgfs_interrupt_read",
84860 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84861 + .param3 = 1,
84862 +};
84863 +struct size_overflow_hash _000786_hash = {
84864 + .next = NULL,
84865 + .name = "il_dbgfs_missed_beacon_read",
84866 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84867 + .param3 = 1,
84868 +};
84869 +struct size_overflow_hash _000787_hash = {
84870 + .next = NULL,
84871 + .name = "il_dbgfs_nvm_read",
84872 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84873 + .param3 = 1,
84874 +};
84875 +struct size_overflow_hash _000788_hash = {
84876 + .next = NULL,
84877 + .name = "il_dbgfs_power_save_status_read",
84878 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84879 + .param3 = 1,
84880 +};
84881 +struct size_overflow_hash _000789_hash = {
84882 + .next = NULL,
84883 + .name = "il_dbgfs_qos_read",
84884 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84885 + .param3 = 1,
84886 +};
84887 +struct size_overflow_hash _000790_hash = {
84888 + .next = &_000221_hash,
84889 + .name = "il_dbgfs_rxon_filter_flags_read",
84890 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84891 + .param3 = 1,
84892 +};
84893 +struct size_overflow_hash _000791_hash = {
84894 + .next = NULL,
84895 + .name = "il_dbgfs_rxon_flags_read",
84896 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84897 + .param3 = 1,
84898 +};
84899 +struct size_overflow_hash _000792_hash = {
84900 + .next = NULL,
84901 + .name = "il_dbgfs_rx_queue_read",
84902 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84903 + .param3 = 1,
84904 +};
84905 +struct size_overflow_hash _000793_hash = {
84906 + .next = NULL,
84907 + .name = "il_dbgfs_rx_stats_read",
84908 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84909 + .param3 = 1,
84910 +};
84911 +struct size_overflow_hash _000794_hash = {
84912 + .next = NULL,
84913 + .name = "il_dbgfs_sensitivity_read",
84914 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84915 + .param3 = 1,
84916 +};
84917 +struct size_overflow_hash _000795_hash = {
84918 + .next = NULL,
84919 + .name = "il_dbgfs_sram_read",
84920 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84921 + .param3 = 1,
84922 +};
84923 +struct size_overflow_hash _000796_hash = {
84924 + .next = NULL,
84925 + .name = "il_dbgfs_stations_read",
84926 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84927 + .param3 = 1,
84928 +};
84929 +struct size_overflow_hash _000797_hash = {
84930 + .next = NULL,
84931 + .name = "il_dbgfs_status_read",
84932 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84933 + .param3 = 1,
84934 +};
84935 +struct size_overflow_hash _000798_hash = {
84936 + .next = NULL,
84937 + .name = "il_dbgfs_traffic_log_read",
84938 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84939 + .param3 = 1,
84940 +};
84941 +struct size_overflow_hash _000799_hash = {
84942 + .next = NULL,
84943 + .name = "il_dbgfs_tx_queue_read",
84944 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84945 + .param3 = 1,
84946 +};
84947 +struct size_overflow_hash _000800_hash = {
84948 + .next = NULL,
84949 + .name = "il_dbgfs_tx_stats_read",
84950 + .file = "drivers/net/wireless/iwlegacy/debug.c",
84951 + .param3 = 1,
84952 +};
84953 +struct size_overflow_hash _000801_hash = {
84954 + .next = NULL,
84955 + .name = "ilo_read",
84956 + .file = "drivers/misc/hpilo.c",
84957 + .param3 = 1,
84958 +};
84959 +struct size_overflow_hash _000802_hash = {
84960 + .next = NULL,
84961 + .name = "ilo_write",
84962 + .file = "drivers/misc/hpilo.c",
84963 + .param3 = 1,
84964 +};
84965 +struct size_overflow_hash _000803_hash = {
84966 + .next = NULL,
84967 + .name = "init_data_container",
84968 + .file = "fs/btrfs/backref.c",
84969 + .param1 = 1,
84970 +};
84971 +struct size_overflow_hash _000804_hash = {
84972 + .next = NULL,
84973 + .name = "init_list_set",
84974 + .file = "net/netfilter/ipset/ip_set_list_set.c",
84975 + .param2 = 1,
84976 + .param3 = 1,
84977 +};
84978 +struct size_overflow_hash _000806_hash = {
84979 + .next = NULL,
84980 + .name = "interpret_user_input",
84981 + .file = "fs/ubifs/debug.c",
84982 + .param2 = 1,
84983 +};
84984 +struct size_overflow_hash _000807_hash = {
84985 + .next = NULL,
84986 + .name = "int_proc_write",
84987 + .file = "drivers/net/wireless/ray_cs.c",
84988 + .param3 = 1,
84989 +};
84990 +struct size_overflow_hash _000808_hash = {
84991 + .next = NULL,
84992 + .name = "iowarrior_read",
84993 + .file = "drivers/usb/misc/iowarrior.c",
84994 + .param3 = 1,
84995 +};
84996 +struct size_overflow_hash _000809_hash = {
84997 + .next = NULL,
84998 + .name = "iowarrior_write",
84999 + .file = "drivers/usb/misc/iowarrior.c",
85000 + .param3 = 1,
85001 +};
85002 +struct size_overflow_hash _000810_hash = {
85003 + .next = NULL,
85004 + .name = "ip_set_alloc",
85005 + .file = "include/linux/netfilter/ipset/ip_set.h",
85006 + .param1 = 1,
85007 +};
85008 +struct size_overflow_hash _000811_hash = {
85009 + .next = NULL,
85010 + .name = "ip_vs_conn_fill_param_sync",
85011 + .file = "net/netfilter/ipvs/ip_vs_sync.c",
85012 + .param6 = 1,
85013 +};
85014 +struct size_overflow_hash _000812_hash = {
85015 + .next = NULL,
85016 + .name = "irda_setsockopt",
85017 + .file = "net/irda/af_irda.c",
85018 + .param5 = 1,
85019 +};
85020 +struct size_overflow_hash _000813_hash = {
85021 + .next = NULL,
85022 + .name = "ir_lirc_transmit_ir",
85023 + .file = "drivers/media/rc/ir-lirc-codec.c",
85024 + .param3 = 1,
85025 +};
85026 +struct size_overflow_hash _000814_hash = {
85027 + .next = NULL,
85028 + .name = "irnet_ctrl_write",
85029 + .file = "net/irda/irnet/irnet_ppp.c",
85030 + .param3 = 1,
85031 +};
85032 +struct size_overflow_hash _000815_hash = {
85033 + .next = NULL,
85034 + .name = "iscsi_decode_text_input",
85035 + .file = "drivers/target/iscsi/iscsi_target_parameters.c",
85036 + .param4 = 1,
85037 +};
85038 +struct size_overflow_hash _000816_hash = {
85039 + .next = NULL,
85040 + .name = "iscsit_dump_data_payload",
85041 + .file = "drivers/target/iscsi/iscsi_target_erl1.c",
85042 + .param2 = 1,
85043 +};
85044 +struct size_overflow_hash _000817_hash = {
85045 + .next = NULL,
85046 + .name = "isdn_read",
85047 + .file = "drivers/isdn/i4l/isdn_common.c",
85048 + .param3 = 1,
85049 +};
85050 +struct size_overflow_hash _000818_hash = {
85051 + .next = NULL,
85052 + .name = "iso_callback",
85053 + .file = "drivers/firewire/core-cdev.c",
85054 + .param3 = 1,
85055 +};
85056 +struct size_overflow_hash _000819_hash = {
85057 + .next = NULL,
85058 + .name = "iso_packets_buffer_init",
85059 + .file = "sound/firewire/packets-buffer.c",
85060 + .param3 = 1,
85061 +};
85062 +struct size_overflow_hash _000820_hash = {
85063 + .next = NULL,
85064 + .name = "iso_sched_alloc",
85065 + .file = "drivers/usb/host/ehci-sched.c",
85066 + .param1 = 1,
85067 +};
85068 +struct size_overflow_hash _000821_hash = {
85069 + .next = NULL,
85070 + .name = "isr_cmd_cmplt_read",
85071 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85072 + .param3 = 1,
85073 +};
85074 +struct size_overflow_hash _000822_hash = {
85075 + .next = NULL,
85076 + .name = "isr_commands_read",
85077 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85078 + .param3 = 1,
85079 +};
85080 +struct size_overflow_hash _000823_hash = {
85081 + .next = NULL,
85082 + .name = "isr_decrypt_done_read",
85083 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85084 + .param3 = 1,
85085 +};
85086 +struct size_overflow_hash _000824_hash = {
85087 + .next = NULL,
85088 + .name = "isr_dma0_done_read",
85089 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85090 + .param3 = 1,
85091 +};
85092 +struct size_overflow_hash _000825_hash = {
85093 + .next = NULL,
85094 + .name = "isr_dma1_done_read",
85095 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85096 + .param3 = 1,
85097 +};
85098 +struct size_overflow_hash _000826_hash = {
85099 + .next = NULL,
85100 + .name = "isr_fiqs_read",
85101 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85102 + .param3 = 1,
85103 +};
85104 +struct size_overflow_hash _000827_hash = {
85105 + .next = NULL,
85106 + .name = "isr_host_acknowledges_read",
85107 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85108 + .param3 = 1,
85109 +};
85110 +struct size_overflow_hash _000828_hash = {
85111 + .next = &_000629_hash,
85112 + .name = "isr_hw_pm_mode_changes_read",
85113 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85114 + .param3 = 1,
85115 +};
85116 +struct size_overflow_hash _000829_hash = {
85117 + .next = &_000329_hash,
85118 + .name = "isr_irqs_read",
85119 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85120 + .param3 = 1,
85121 +};
85122 +struct size_overflow_hash _000830_hash = {
85123 + .next = NULL,
85124 + .name = "isr_low_rssi_read",
85125 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85126 + .param3 = 1,
85127 +};
85128 +struct size_overflow_hash _000831_hash = {
85129 + .next = NULL,
85130 + .name = "isr_pci_pm_read",
85131 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85132 + .param3 = 1,
85133 +};
85134 +struct size_overflow_hash _000832_hash = {
85135 + .next = NULL,
85136 + .name = "isr_rx_headers_read",
85137 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85138 + .param3 = 1,
85139 +};
85140 +struct size_overflow_hash _000833_hash = {
85141 + .next = NULL,
85142 + .name = "isr_rx_mem_overflow_read",
85143 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85144 + .param3 = 1,
85145 +};
85146 +struct size_overflow_hash _000834_hash = {
85147 + .next = NULL,
85148 + .name = "isr_rx_procs_read",
85149 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85150 + .param3 = 1,
85151 +};
85152 +struct size_overflow_hash _000835_hash = {
85153 + .next = NULL,
85154 + .name = "isr_rx_rdys_read",
85155 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85156 + .param3 = 1,
85157 +};
85158 +struct size_overflow_hash _000836_hash = {
85159 + .next = NULL,
85160 + .name = "isr_tx_exch_complete_read",
85161 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85162 + .param3 = 1,
85163 +};
85164 +struct size_overflow_hash _000837_hash = {
85165 + .next = NULL,
85166 + .name = "isr_tx_procs_read",
85167 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85168 + .param3 = 1,
85169 +};
85170 +struct size_overflow_hash _000838_hash = {
85171 + .next = NULL,
85172 + .name = "isr_wakeups_read",
85173 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85174 + .param3 = 1,
85175 +};
85176 +struct size_overflow_hash _000839_hash = {
85177 + .next = NULL,
85178 + .name = "ivtv_copy_buf_to_user",
85179 + .file = "drivers/media/video/ivtv/ivtv-fileops.c",
85180 + .param4 = 1,
85181 +};
85182 +struct size_overflow_hash _000840_hash = {
85183 + .next = NULL,
85184 + .name = "iwl_dbgfs_bt_traffic_read",
85185 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85186 + .param3 = 1,
85187 +};
85188 +struct size_overflow_hash _000841_hash = {
85189 + .next = NULL,
85190 + .name = "iwl_dbgfs_chain_noise_read",
85191 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85192 + .param3 = 1,
85193 +};
85194 +struct size_overflow_hash _000842_hash = {
85195 + .next = NULL,
85196 + .name = "iwl_dbgfs_channels_read",
85197 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85198 + .param3 = 1,
85199 +};
85200 +struct size_overflow_hash _000843_hash = {
85201 + .next = NULL,
85202 + .name = "iwl_dbgfs_current_sleep_command_read",
85203 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85204 + .param3 = 1,
85205 +};
85206 +struct size_overflow_hash _000844_hash = {
85207 + .next = NULL,
85208 + .name = "iwl_dbgfs_debug_level_read",
85209 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85210 + .param3 = 1,
85211 +};
85212 +struct size_overflow_hash _000845_hash = {
85213 + .next = NULL,
85214 + .name = "iwl_dbgfs_debug_level_write",
85215 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85216 + .param3 = 1,
85217 +};
85218 +struct size_overflow_hash _000846_hash = {
85219 + .next = NULL,
85220 + .name = "iwl_dbgfs_disable_ht40_read",
85221 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85222 + .param3 = 1,
85223 +};
85224 +struct size_overflow_hash _000847_hash = {
85225 + .next = NULL,
85226 + .name = "iwl_dbgfs_fh_reg_read",
85227 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85228 + .param3 = 1,
85229 +};
85230 +struct size_overflow_hash _000848_hash = {
85231 + .next = NULL,
85232 + .name = "iwl_dbgfs_force_reset_read",
85233 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85234 + .param3 = 1,
85235 +};
85236 +struct size_overflow_hash _000849_hash = {
85237 + .next = NULL,
85238 + .name = "iwl_dbgfs_interrupt_read",
85239 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85240 + .param3 = 1,
85241 +};
85242 +struct size_overflow_hash _000850_hash = {
85243 + .next = NULL,
85244 + .name = "iwl_dbgfs_log_event_read",
85245 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85246 + .param3 = 1,
85247 +};
85248 +struct size_overflow_hash _000851_hash = {
85249 + .next = NULL,
85250 + .name = "iwl_dbgfs_missed_beacon_read",
85251 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85252 + .param3 = 1,
85253 +};
85254 +struct size_overflow_hash _000852_hash = {
85255 + .next = NULL,
85256 + .name = "iwl_dbgfs_nvm_read",
85257 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85258 + .param3 = 1,
85259 +};
85260 +struct size_overflow_hash _000853_hash = {
85261 + .next = NULL,
85262 + .name = "iwl_dbgfs_plcp_delta_read",
85263 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85264 + .param3 = 1,
85265 +};
85266 +struct size_overflow_hash _000854_hash = {
85267 + .next = NULL,
85268 + .name = "iwl_dbgfs_power_save_status_read",
85269 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85270 + .param3 = 1,
85271 +};
85272 +struct size_overflow_hash _000855_hash = {
85273 + .next = NULL,
85274 + .name = "iwl_dbgfs_protection_mode_read",
85275 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85276 + .param3 = 1,
85277 +};
85278 +struct size_overflow_hash _000856_hash = {
85279 + .next = NULL,
85280 + .name = "iwl_dbgfs_qos_read",
85281 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85282 + .param3 = 1,
85283 +};
85284 +struct size_overflow_hash _000857_hash = {
85285 + .next = NULL,
85286 + .name = "iwl_dbgfs_reply_tx_error_read",
85287 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85288 + .param3 = 1,
85289 +};
85290 +struct size_overflow_hash _000858_hash = {
85291 + .next = NULL,
85292 + .name = "iwl_dbgfs_rx_handlers_read",
85293 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85294 + .param3 = 1,
85295 +};
85296 +struct size_overflow_hash _000859_hash = {
85297 + .next = NULL,
85298 + .name = "iwl_dbgfs_rxon_filter_flags_read",
85299 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85300 + .param3 = 1,
85301 +};
85302 +struct size_overflow_hash _000860_hash = {
85303 + .next = NULL,
85304 + .name = "iwl_dbgfs_rxon_flags_read",
85305 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85306 + .param3 = 1,
85307 +};
85308 +struct size_overflow_hash _000861_hash = {
85309 + .next = NULL,
85310 + .name = "iwl_dbgfs_rx_queue_read",
85311 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85312 + .param3 = 1,
85313 +};
85314 +struct size_overflow_hash _000862_hash = {
85315 + .next = NULL,
85316 + .name = "iwl_dbgfs_rx_statistics_read",
85317 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85318 + .param3 = 1,
85319 +};
85320 +struct size_overflow_hash _000863_hash = {
85321 + .next = NULL,
85322 + .name = "iwl_dbgfs_sensitivity_read",
85323 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85324 + .param3 = 1,
85325 +};
85326 +struct size_overflow_hash _000864_hash = {
85327 + .next = NULL,
85328 + .name = "iwl_dbgfs_sleep_level_override_read",
85329 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85330 + .param3 = 1,
85331 +};
85332 +struct size_overflow_hash _000865_hash = {
85333 + .next = NULL,
85334 + .name = "iwl_dbgfs_sram_read",
85335 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85336 + .param3 = 1,
85337 +};
85338 +struct size_overflow_hash _000866_hash = {
85339 + .next = NULL,
85340 + .name = "iwl_dbgfs_stations_read",
85341 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85342 + .param3 = 1,
85343 +};
85344 +struct size_overflow_hash _000867_hash = {
85345 + .next = NULL,
85346 + .name = "iwl_dbgfs_status_read",
85347 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85348 + .param3 = 1,
85349 +};
85350 +struct size_overflow_hash _000868_hash = {
85351 + .next = NULL,
85352 + .name = "iwl_dbgfs_temperature_read",
85353 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85354 + .param3 = 1,
85355 +};
85356 +struct size_overflow_hash _000869_hash = {
85357 + .next = NULL,
85358 + .name = "iwl_dbgfs_thermal_throttling_read",
85359 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85360 + .param3 = 1,
85361 +};
85362 +struct size_overflow_hash _000870_hash = {
85363 + .next = NULL,
85364 + .name = "iwl_dbgfs_traffic_log_read",
85365 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85366 + .param3 = 1,
85367 +};
85368 +struct size_overflow_hash _000871_hash = {
85369 + .next = NULL,
85370 + .name = "iwl_dbgfs_tx_queue_read",
85371 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
85372 + .param3 = 1,
85373 +};
85374 +struct size_overflow_hash _000872_hash = {
85375 + .next = NULL,
85376 + .name = "iwl_dbgfs_tx_statistics_read",
85377 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85378 + .param3 = 1,
85379 +};
85380 +struct size_overflow_hash _000873_hash = {
85381 + .next = NULL,
85382 + .name = "iwl_dbgfs_ucode_bt_stats_read",
85383 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85384 + .param3 = 1,
85385 +};
85386 +struct size_overflow_hash _000874_hash = {
85387 + .next = NULL,
85388 + .name = "iwl_dbgfs_ucode_general_stats_read",
85389 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85390 + .param3 = 1,
85391 +};
85392 +struct size_overflow_hash _000875_hash = {
85393 + .next = NULL,
85394 + .name = "iwl_dbgfs_ucode_rx_stats_read",
85395 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85396 + .param3 = 1,
85397 +};
85398 +struct size_overflow_hash _000876_hash = {
85399 + .next = NULL,
85400 + .name = "iwl_dbgfs_ucode_tracing_read",
85401 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85402 + .param3 = 1,
85403 +};
85404 +struct size_overflow_hash _000877_hash = {
85405 + .next = NULL,
85406 + .name = "iwl_dbgfs_ucode_tx_stats_read",
85407 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85408 + .param3 = 1,
85409 +};
85410 +struct size_overflow_hash _000878_hash = {
85411 + .next = NULL,
85412 + .name = "iwl_dbgfs_wowlan_sram_read",
85413 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
85414 + .param3 = 1,
85415 +};
85416 +struct size_overflow_hash _000879_hash = {
85417 + .next = NULL,
85418 + .name = "iwmct_fw_parser_init",
85419 + .file = "drivers/misc/iwmc3200top/fw-download.c",
85420 + .param4 = 1,
85421 +};
85422 +struct size_overflow_hash _000880_hash = {
85423 + .next = NULL,
85424 + .name = "iwm_notif_send",
85425 + .file = "drivers/net/wireless/iwmc3200wifi/main.c",
85426 + .param6 = 1,
85427 +};
85428 +struct size_overflow_hash _000881_hash = {
85429 + .next = NULL,
85430 + .name = "iwm_ntf_calib_res",
85431 + .file = "drivers/net/wireless/iwmc3200wifi/rx.c",
85432 + .param3 = 1,
85433 +};
85434 +struct size_overflow_hash _000882_hash = {
85435 + .next = NULL,
85436 + .name = "iwm_umac_set_config_var",
85437 + .file = "drivers/net/wireless/iwmc3200wifi/commands.c",
85438 + .param4 = 1,
85439 +};
85440 +struct size_overflow_hash _000883_hash = {
85441 + .next = NULL,
85442 + .name = "jbd2_alloc",
85443 + .file = "include/linux/jbd2.h",
85444 + .param1 = 1,
85445 +};
85446 +struct size_overflow_hash _000884_hash = {
85447 + .next = NULL,
85448 + .name = "key_algorithm_read",
85449 + .file = "net/mac80211/debugfs_key.c",
85450 + .param3 = 1,
85451 +};
85452 +struct size_overflow_hash _000885_hash = {
85453 + .next = NULL,
85454 + .name = "key_icverrors_read",
85455 + .file = "net/mac80211/debugfs_key.c",
85456 + .param3 = 1,
85457 +};
85458 +struct size_overflow_hash _000886_hash = {
85459 + .next = NULL,
85460 + .name = "key_key_read",
85461 + .file = "net/mac80211/debugfs_key.c",
85462 + .param3 = 1,
85463 +};
85464 +struct size_overflow_hash _000887_hash = {
85465 + .next = NULL,
85466 + .name = "key_replays_read",
85467 + .file = "net/mac80211/debugfs_key.c",
85468 + .param3 = 1,
85469 +};
85470 +struct size_overflow_hash _000888_hash = {
85471 + .next = NULL,
85472 + .name = "key_rx_spec_read",
85473 + .file = "net/mac80211/debugfs_key.c",
85474 + .param3 = 1,
85475 +};
85476 +struct size_overflow_hash _000889_hash = {
85477 + .next = NULL,
85478 + .name = "key_tx_spec_read",
85479 + .file = "net/mac80211/debugfs_key.c",
85480 + .param3 = 1,
85481 +};
85482 +struct size_overflow_hash _000890_hash = {
85483 + .next = NULL,
85484 + .name = "kmem_alloc",
85485 + .file = "fs/xfs/kmem.c",
85486 + .param1 = 1,
85487 +};
85488 +struct size_overflow_hash _000891_hash = {
85489 + .next = NULL,
85490 + .name = "kmem_zalloc_large",
85491 + .file = "fs/xfs/kmem.h",
85492 + .param1 = 1,
85493 +};
85494 +struct size_overflow_hash _000892_hash = {
85495 + .next = NULL,
85496 + .name = "kone_receive",
85497 + .file = "drivers/hid/hid-roccat-kone.c",
85498 + .param4 = 1,
85499 +};
85500 +struct size_overflow_hash _000893_hash = {
85501 + .next = NULL,
85502 + .name = "kone_send",
85503 + .file = "drivers/hid/hid-roccat-kone.c",
85504 + .param4 = 1,
85505 +};
85506 +struct size_overflow_hash _000894_hash = {
85507 + .next = NULL,
85508 + .name = "kvm_read_guest_atomic",
85509 + .file = "include/linux/kvm_host.h",
85510 + .param4 = 1,
85511 +};
85512 +struct size_overflow_hash _000895_hash = {
85513 + .next = NULL,
85514 + .name = "kvm_read_guest_cached",
85515 + .file = "include/linux/kvm_host.h",
85516 + .param4 = 1,
85517 +};
85518 +struct size_overflow_hash _000896_hash = {
85519 + .next = NULL,
85520 + .name = "kvm_set_irq_routing",
85521 + .file = "include/linux/kvm_host.h",
85522 + .param3 = 1,
85523 +};
85524 +struct size_overflow_hash _000897_hash = {
85525 + .next = NULL,
85526 + .name = "kvm_write_guest_cached",
85527 + .file = "include/linux/kvm_host.h",
85528 + .param4 = 1,
85529 +};
85530 +struct size_overflow_hash _000898_hash = {
85531 + .next = NULL,
85532 + .name = "l2cap_sock_setsockopt",
85533 + .file = "net/bluetooth/l2cap_sock.c",
85534 + .param5 = 1,
85535 +};
85536 +struct size_overflow_hash _000899_hash = {
85537 + .next = NULL,
85538 + .name = "l2cap_sock_setsockopt_old",
85539 + .file = "net/bluetooth/l2cap_sock.c",
85540 + .param4 = 1,
85541 +};
85542 +struct size_overflow_hash _000900_hash = {
85543 + .next = NULL,
85544 + .name = "lane2_associate_req",
85545 + .file = "net/atm/lec.c",
85546 + .param4 = 1,
85547 +};
85548 +struct size_overflow_hash _000901_hash = {
85549 + .next = NULL,
85550 + .name = "lbs_debugfs_read",
85551 + .file = "drivers/net/wireless/libertas/debugfs.c",
85552 + .param3 = 1,
85553 +};
85554 +struct size_overflow_hash _000902_hash = {
85555 + .next = NULL,
85556 + .name = "lbs_debugfs_write",
85557 + .file = "drivers/net/wireless/libertas/debugfs.c",
85558 + .param3 = 1,
85559 +};
85560 +struct size_overflow_hash _000903_hash = {
85561 + .next = NULL,
85562 + .name = "lbs_dev_info",
85563 + .file = "drivers/net/wireless/libertas/debugfs.c",
85564 + .param3 = 1,
85565 +};
85566 +struct size_overflow_hash _000904_hash = {
85567 + .next = NULL,
85568 + .name = "lbs_host_sleep_read",
85569 + .file = "drivers/net/wireless/libertas/debugfs.c",
85570 + .param3 = 1,
85571 +};
85572 +struct size_overflow_hash _000905_hash = {
85573 + .next = NULL,
85574 + .name = "lbs_rdbbp_read",
85575 + .file = "drivers/net/wireless/libertas/debugfs.c",
85576 + .param3 = 1,
85577 +};
85578 +struct size_overflow_hash _000906_hash = {
85579 + .next = NULL,
85580 + .name = "lbs_rdmac_read",
85581 + .file = "drivers/net/wireless/libertas/debugfs.c",
85582 + .param3 = 1,
85583 +};
85584 +struct size_overflow_hash _000907_hash = {
85585 + .next = NULL,
85586 + .name = "lbs_rdrf_read",
85587 + .file = "drivers/net/wireless/libertas/debugfs.c",
85588 + .param3 = 1,
85589 +};
85590 +struct size_overflow_hash _000908_hash = {
85591 + .next = NULL,
85592 + .name = "lbs_sleepparams_read",
85593 + .file = "drivers/net/wireless/libertas/debugfs.c",
85594 + .param3 = 1,
85595 +};
85596 +struct size_overflow_hash _000909_hash = {
85597 + .next = NULL,
85598 + .name = "lbs_threshold_read",
85599 + .file = "drivers/net/wireless/libertas/debugfs.c",
85600 + .param5 = 1,
85601 +};
85602 +struct size_overflow_hash _000910_hash = {
85603 + .next = NULL,
85604 + .name = "lc_create",
85605 + .file = "include/linux/lru_cache.h",
85606 + .param3 = 1,
85607 +};
85608 +struct size_overflow_hash _000911_hash = {
85609 + .next = NULL,
85610 + .name = "lcd_write",
85611 + .file = "drivers/usb/misc/usblcd.c",
85612 + .param3 = 1,
85613 +};
85614 +struct size_overflow_hash _000912_hash = {
85615 + .next = NULL,
85616 + .name = "leaf_dealloc",
85617 + .file = "fs/gfs2/dir.c",
85618 + .param3 = 1,
85619 +};
85620 +struct size_overflow_hash _000913_hash = {
85621 + .next = NULL,
85622 + .name = "__lgread",
85623 + .file = "drivers/lguest/core.c",
85624 + .param4 = 1,
85625 +};
85626 +struct size_overflow_hash _000914_hash = {
85627 + .next = NULL,
85628 + .name = "__lgwrite",
85629 + .file = "drivers/lguest/core.c",
85630 + .param4 = 1,
85631 +};
85632 +struct size_overflow_hash _000915_hash = {
85633 + .next = NULL,
85634 + .name = "link_send_sections_long",
85635 + .file = "net/tipc/link.c",
85636 + .param4 = 1,
85637 +};
85638 +struct size_overflow_hash _000916_hash = {
85639 + .next = NULL,
85640 + .name = "lirc_buffer_init",
85641 + .file = "include/media/lirc_dev.h",
85642 + .param2 = 1,
85643 + .param3 = 1,
85644 +};
85645 +struct size_overflow_hash _000918_hash = {
85646 + .next = NULL,
85647 + .name = "lkdtm_debugfs_read",
85648 + .file = "drivers/misc/lkdtm.c",
85649 + .param3 = 1,
85650 +};
85651 +struct size_overflow_hash _000919_hash = {
85652 + .next = NULL,
85653 + .name = "LoadBitmap",
85654 + .file = "drivers/media/dvb/ttpci/av7110_hw.c",
85655 + .param2 = 1,
85656 +};
85657 +struct size_overflow_hash _000920_hash = {
85658 + .next = NULL,
85659 + .name = "long_retry_limit_read",
85660 + .file = "net/wireless/debugfs.c",
85661 + .param3 = 1,
85662 +};
85663 +struct size_overflow_hash _000921_hash = {
85664 + .next = NULL,
85665 + .name = "lpfc_debugfs_dif_err_read",
85666 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85667 + .param3 = 1,
85668 +};
85669 +struct size_overflow_hash _000922_hash = {
85670 + .next = NULL,
85671 + .name = "lpfc_debugfs_dif_err_write",
85672 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85673 + .param3 = 1,
85674 +};
85675 +struct size_overflow_hash _000923_hash = {
85676 + .next = NULL,
85677 + .name = "lpfc_debugfs_read",
85678 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85679 + .param3 = 1,
85680 +};
85681 +struct size_overflow_hash _000924_hash = {
85682 + .next = NULL,
85683 + .name = "lpfc_idiag_baracc_read",
85684 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85685 + .param3 = 1,
85686 +};
85687 +struct size_overflow_hash _000925_hash = {
85688 + .next = NULL,
85689 + .name = "lpfc_idiag_ctlacc_read",
85690 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85691 + .param3 = 1,
85692 +};
85693 +struct size_overflow_hash _000926_hash = {
85694 + .next = NULL,
85695 + .name = "lpfc_idiag_drbacc_read",
85696 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85697 + .param3 = 1,
85698 +};
85699 +struct size_overflow_hash _000927_hash = {
85700 + .next = NULL,
85701 + .name = "lpfc_idiag_extacc_read",
85702 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85703 + .param3 = 1,
85704 +};
85705 +struct size_overflow_hash _000928_hash = {
85706 + .next = NULL,
85707 + .name = "lpfc_idiag_mbxacc_read",
85708 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85709 + .param3 = 1,
85710 +};
85711 +struct size_overflow_hash _000929_hash = {
85712 + .next = NULL,
85713 + .name = "lpfc_idiag_pcicfg_read",
85714 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85715 + .param3 = 1,
85716 +};
85717 +struct size_overflow_hash _000930_hash = {
85718 + .next = NULL,
85719 + .name = "lpfc_idiag_queacc_read",
85720 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85721 + .param3 = 1,
85722 +};
85723 +struct size_overflow_hash _000931_hash = {
85724 + .next = NULL,
85725 + .name = "lpfc_idiag_queinfo_read",
85726 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
85727 + .param3 = 1,
85728 +};
85729 +struct size_overflow_hash _000932_hash = {
85730 + .next = NULL,
85731 + .name = "lpfc_sli4_queue_alloc",
85732 + .file = "drivers/scsi/lpfc/lpfc_sli.c",
85733 + .param3 = 1,
85734 +};
85735 +struct size_overflow_hash _000933_hash = {
85736 + .next = NULL,
85737 + .name = "lp_write",
85738 + .file = "drivers/char/lp.c",
85739 + .param3 = 1,
85740 +};
85741 +struct size_overflow_hash _000934_hash = {
85742 + .next = NULL,
85743 + .name = "mac80211_format_buffer",
85744 + .file = "net/mac80211/debugfs.c",
85745 + .param2 = 1,
85746 +};
85747 +struct size_overflow_hash _000935_hash = {
85748 + .next = NULL,
85749 + .name = "mce_write",
85750 + .file = "arch/x86/kernel/cpu/mcheck/mce-inject.c",
85751 + .param3 = 1,
85752 +};
85753 +struct size_overflow_hash _000936_hash = {
85754 + .next = NULL,
85755 + .name = "mcs7830_get_reg",
85756 + .file = "drivers/net/usb/mcs7830.c",
85757 + .param3 = 1,
85758 +};
85759 +struct size_overflow_hash _000937_hash = {
85760 + .next = NULL,
85761 + .name = "mcs7830_set_reg",
85762 + .file = "drivers/net/usb/mcs7830.c",
85763 + .param3 = 1,
85764 +};
85765 +struct size_overflow_hash _000938_hash = {
85766 + .next = NULL,
85767 + .name = "mdc800_device_read",
85768 + .file = "drivers/usb/image/mdc800.c",
85769 + .param3 = 1,
85770 +};
85771 +struct size_overflow_hash _000939_hash = {
85772 + .next = NULL,
85773 + .name = "mdiobus_alloc_size",
85774 + .file = "include/linux/phy.h",
85775 + .param1 = 1,
85776 +};
85777 +struct size_overflow_hash _000940_hash = {
85778 + .next = NULL,
85779 + .name = "media_entity_init",
85780 + .file = "include/media/media-entity.h",
85781 + .param2 = 1,
85782 + .param4 = 1,
85783 +};
85784 +struct size_overflow_hash _000942_hash = {
85785 + .next = NULL,
85786 + .name = "memstick_alloc_host",
85787 + .file = "include/linux/memstick.h",
85788 + .param1 = 1,
85789 +};
85790 +struct size_overflow_hash _000943_hash = {
85791 + .next = NULL,
85792 + .name = "mgmt_control",
85793 + .file = "include/net/bluetooth/hci_core.h",
85794 + .param3 = 1,
85795 +};
85796 +struct size_overflow_hash _000944_hash = {
85797 + .next = NULL,
85798 + .name = "mgmt_pending_add",
85799 + .file = "net/bluetooth/mgmt.c",
85800 + .param5 = 1,
85801 +};
85802 +struct size_overflow_hash _000945_hash = {
85803 + .next = &_000321_hash,
85804 + .name = "mic_calc_failure_read",
85805 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85806 + .param3 = 1,
85807 +};
85808 +struct size_overflow_hash _000946_hash = {
85809 + .next = NULL,
85810 + .name = "mic_rx_pkts_read",
85811 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85812 + .param3 = 1,
85813 +};
85814 +struct size_overflow_hash _000947_hash = {
85815 + .next = NULL,
85816 + .name = "minstrel_stats_read",
85817 + .file = "net/mac80211/rc80211_minstrel_debugfs.c",
85818 + .param3 = 1,
85819 +};
85820 +struct size_overflow_hash _000948_hash = {
85821 + .next = NULL,
85822 + .name = "mlx4_en_create_rx_ring",
85823 + .file = "drivers/net/ethernet/mellanox/mlx4/en_rx.c",
85824 + .param3 = 1,
85825 +};
85826 +struct size_overflow_hash _000949_hash = {
85827 + .next = NULL,
85828 + .name = "mlx4_en_create_tx_ring",
85829 + .file = "drivers/net/ethernet/mellanox/mlx4/en_tx.c",
85830 + .param4 = 1,
85831 +};
85832 +struct size_overflow_hash _000950_hash = {
85833 + .next = NULL,
85834 + .name = "mmc_ext_csd_read",
85835 + .file = "drivers/mmc/core/debugfs.c",
85836 + .param3 = 1,
85837 +};
85838 +struct size_overflow_hash _000951_hash = {
85839 + .next = NULL,
85840 + .name = "mmc_send_bus_test",
85841 + .file = "drivers/mmc/core/mmc_ops.c",
85842 + .param4 = 1,
85843 +};
85844 +struct size_overflow_hash _000952_hash = {
85845 + .next = NULL,
85846 + .name = "mmc_send_cxd_data",
85847 + .file = "drivers/mmc/core/mmc_ops.c",
85848 + .param5 = 1,
85849 +};
85850 +struct size_overflow_hash _000953_hash = {
85851 + .next = NULL,
85852 + .name = "mmc_test_alloc_mem",
85853 + .file = "drivers/mmc/card/mmc_test.c",
85854 + .param3 = 1,
85855 +};
85856 +struct size_overflow_hash _000954_hash = {
85857 + .next = NULL,
85858 + .name = "mon_bin_get_event",
85859 + .file = "drivers/usb/mon/mon_bin.c",
85860 + .param4 = 1,
85861 +};
85862 +struct size_overflow_hash _000955_hash = {
85863 + .next = NULL,
85864 + .name = "mon_stat_read",
85865 + .file = "drivers/usb/mon/mon_stat.c",
85866 + .param3 = 1,
85867 +};
85868 +struct size_overflow_hash _000956_hash = {
85869 + .next = NULL,
85870 + .name = "mptctl_getiocinfo",
85871 + .file = "drivers/message/fusion/mptctl.c",
85872 + .param2 = 1,
85873 +};
85874 +struct size_overflow_hash _000957_hash = {
85875 + .next = NULL,
85876 + .name = "msnd_fifo_alloc",
85877 + .file = "sound/oss/msnd.c",
85878 + .param2 = 1,
85879 +};
85880 +struct size_overflow_hash _000958_hash = {
85881 + .next = NULL,
85882 + .name = "mtdchar_readoob",
85883 + .file = "drivers/mtd/mtdchar.c",
85884 + .param4 = 1,
85885 +};
85886 +struct size_overflow_hash _000959_hash = {
85887 + .next = NULL,
85888 + .name = "mtdchar_write",
85889 + .file = "drivers/mtd/mtdchar.c",
85890 + .param3 = 1,
85891 +};
85892 +struct size_overflow_hash _000960_hash = {
85893 + .next = NULL,
85894 + .name = "mtdchar_writeoob",
85895 + .file = "drivers/mtd/mtdchar.c",
85896 + .param4 = 1,
85897 +};
85898 +struct size_overflow_hash _000961_hash = {
85899 + .next = NULL,
85900 + .name = "mtdswap_init",
85901 + .file = "drivers/mtd/mtdswap.c",
85902 + .param2 = 1,
85903 +};
85904 +struct size_overflow_hash _000962_hash = {
85905 + .next = NULL,
85906 + .name = "mtf_test_write",
85907 + .file = "drivers/mmc/card/mmc_test.c",
85908 + .param3 = 1,
85909 +};
85910 +struct size_overflow_hash _000963_hash = {
85911 + .next = NULL,
85912 + .name = "musb_test_mode_write",
85913 + .file = "drivers/usb/musb/musb_debugfs.c",
85914 + .param3 = 1,
85915 +};
85916 +struct size_overflow_hash _000964_hash = {
85917 + .next = NULL,
85918 + .name = "mvumi_alloc_mem_resource",
85919 + .file = "drivers/scsi/mvumi.c",
85920 + .param3 = 1,
85921 +};
85922 +struct size_overflow_hash _000965_hash = {
85923 + .next = NULL,
85924 + .name = "mwifiex_alloc_sdio_mpa_buffers",
85925 + .file = "drivers/net/wireless/mwifiex/sdio.c",
85926 + .param2 = 1,
85927 + .param3 = 1,
85928 +};
85929 +struct size_overflow_hash _000967_hash = {
85930 + .next = NULL,
85931 + .name = "mwifiex_debug_read",
85932 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85933 + .param3 = 1,
85934 +};
85935 +struct size_overflow_hash _000968_hash = {
85936 + .next = NULL,
85937 + .name = "mwifiex_get_common_rates",
85938 + .file = "drivers/net/wireless/mwifiex/join.c",
85939 + .param3 = 1,
85940 +};
85941 +struct size_overflow_hash _000969_hash = {
85942 + .next = NULL,
85943 + .name = "mwifiex_getlog_read",
85944 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85945 + .param3 = 1,
85946 +};
85947 +struct size_overflow_hash _000970_hash = {
85948 + .next = NULL,
85949 + .name = "mwifiex_info_read",
85950 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85951 + .param3 = 1,
85952 +};
85953 +struct size_overflow_hash _000971_hash = {
85954 + .next = NULL,
85955 + .name = "mwifiex_rdeeprom_read",
85956 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85957 + .param3 = 1,
85958 +};
85959 +struct size_overflow_hash _000972_hash = {
85960 + .next = NULL,
85961 + .name = "mwifiex_regrdwr_read",
85962 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
85963 + .param3 = 1,
85964 +};
85965 +struct size_overflow_hash _000973_hash = {
85966 + .next = NULL,
85967 + .name = "mwifiex_update_curr_bss_params",
85968 + .file = "drivers/net/wireless/mwifiex/scan.c",
85969 + .param5 = 1,
85970 +};
85971 +struct size_overflow_hash _000974_hash = {
85972 + .next = NULL,
85973 + .name = "nand_bch_init",
85974 + .file = "include/linux/mtd/nand_bch.h",
85975 + .param2 = 1,
85976 + .param3 = 1,
85977 +};
85978 +struct size_overflow_hash _000976_hash = {
85979 + .next = NULL,
85980 + .name = "ncp_file_write",
85981 + .file = "fs/ncpfs/file.c",
85982 + .param3 = 1,
85983 +};
85984 +struct size_overflow_hash _000977_hash = {
85985 + .next = NULL,
85986 + .name = "ncp__vol2io",
85987 + .file = "fs/ncpfs/ncplib_kernel.c",
85988 + .param5 = 1,
85989 +};
85990 +struct size_overflow_hash _000978_hash = {
85991 + .next = NULL,
85992 + .name = "new_bind_ctl",
85993 + .file = "sound/pci/hda/patch_realtek.c",
85994 + .param2 = 1,
85995 +};
85996 +struct size_overflow_hash _000979_hash = {
85997 + .next = NULL,
85998 + .name = "nfc_llcp_build_tlv",
85999 + .file = "net/nfc/llcp/commands.c",
86000 + .param3 = 1,
86001 +};
86002 +struct size_overflow_hash _000980_hash = {
86003 + .next = NULL,
86004 + .name = "nfs4_alloc_slots",
86005 + .file = "fs/nfs/nfs4proc.c",
86006 + .param1 = 1,
86007 +};
86008 +struct size_overflow_hash _000981_hash = {
86009 + .next = NULL,
86010 + .name = "nfs4_write_cached_acl",
86011 + .file = "fs/nfs/nfs4proc.c",
86012 + .param3 = 1,
86013 + .param4 = 1,
86014 +};
86015 +struct size_overflow_hash _000982_hash = {
86016 + .next = NULL,
86017 + .name = "nfsctl_transaction_read",
86018 + .file = "fs/nfsd/nfsctl.c",
86019 + .param3 = 1,
86020 +};
86021 +struct size_overflow_hash _000983_hash = {
86022 + .next = NULL,
86023 + .name = "nfsctl_transaction_write",
86024 + .file = "fs/nfsd/nfsctl.c",
86025 + .param3 = 1,
86026 +};
86027 +struct size_overflow_hash _000984_hash = {
86028 + .next = NULL,
86029 + .name = "nfsd_cache_update",
86030 + .file = "fs/nfsd/nfscache.c",
86031 + .param3 = 1,
86032 +};
86033 +struct size_overflow_hash _000985_hash = {
86034 + .next = NULL,
86035 + .name = "nfs_idmap_get_desc",
86036 + .file = "fs/nfs/idmap.c",
86037 + .param2 = 1,
86038 + .param4 = 1,
86039 +};
86040 +struct size_overflow_hash _000987_hash = {
86041 + .next = NULL,
86042 + .name = "nfs_readdata_alloc",
86043 + .file = "include/linux/nfs_fs.h",
86044 + .param1 = 1,
86045 +};
86046 +struct size_overflow_hash _000988_hash = {
86047 + .next = NULL,
86048 + .name = "nfs_readdir_make_qstr",
86049 + .file = "fs/nfs/dir.c",
86050 + .param3 = 1,
86051 +};
86052 +struct size_overflow_hash _000989_hash = {
86053 + .next = NULL,
86054 + .name = "nfs_writedata_alloc",
86055 + .file = "include/linux/nfs_fs.h",
86056 + .param1 = 1,
86057 +};
86058 +struct size_overflow_hash _000990_hash = {
86059 + .next = NULL,
86060 + .name = "nsm_create_handle",
86061 + .file = "fs/lockd/mon.c",
86062 + .param4 = 1,
86063 +};
86064 +struct size_overflow_hash _000991_hash = {
86065 + .next = NULL,
86066 + .name = "ntfs_copy_from_user",
86067 + .file = "fs/ntfs/file.c",
86068 + .param3 = 1,
86069 + .param5 = 1,
86070 +};
86071 +struct size_overflow_hash _000993_hash = {
86072 + .next = NULL,
86073 + .name = "__ntfs_copy_from_user_iovec_inatomic",
86074 + .file = "fs/ntfs/file.c",
86075 + .param3 = 1,
86076 + .param4 = 1,
86077 +};
86078 +struct size_overflow_hash _000995_hash = {
86079 + .next = NULL,
86080 + .name = "__ntfs_malloc",
86081 + .file = "fs/ntfs/malloc.h",
86082 + .param1 = 1,
86083 +};
86084 +struct size_overflow_hash _000996_hash = {
86085 + .next = NULL,
86086 + .name = "nvme_alloc_iod",
86087 + .file = "drivers/block/nvme.c",
86088 + .param1 = 1,
86089 +};
86090 +struct size_overflow_hash _000997_hash = {
86091 + .next = NULL,
86092 + .name = "nvram_write",
86093 + .file = "drivers/char/nvram.c",
86094 + .param3 = 1,
86095 +};
86096 +struct size_overflow_hash _000998_hash = {
86097 + .next = NULL,
86098 + .name = "o2hb_debug_read",
86099 + .file = "fs/ocfs2/cluster/heartbeat.c",
86100 + .param3 = 1,
86101 +};
86102 +struct size_overflow_hash _000999_hash = {
86103 + .next = NULL,
86104 + .name = "o2net_debug_read",
86105 + .file = "fs/ocfs2/cluster/netdebug.c",
86106 + .param3 = 1,
86107 +};
86108 +struct size_overflow_hash _001000_hash = {
86109 + .next = NULL,
86110 + .name = "o2net_send_message_vec",
86111 + .file = "fs/ocfs2/cluster/tcp.c",
86112 + .param4 = 1,
86113 +};
86114 +struct size_overflow_hash _001001_hash = {
86115 + .next = NULL,
86116 + .name = "ocfs2_control_cfu",
86117 + .file = "fs/ocfs2/stack_user.c",
86118 + .param2 = 1,
86119 +};
86120 +struct size_overflow_hash _001002_hash = {
86121 + .next = NULL,
86122 + .name = "ocfs2_control_read",
86123 + .file = "fs/ocfs2/stack_user.c",
86124 + .param3 = 1,
86125 +};
86126 +struct size_overflow_hash _001003_hash = {
86127 + .next = NULL,
86128 + .name = "ocfs2_debug_read",
86129 + .file = "fs/ocfs2/super.c",
86130 + .param3 = 1,
86131 +};
86132 +struct size_overflow_hash _001004_hash = {
86133 + .next = NULL,
86134 + .name = "opera1_xilinx_rw",
86135 + .file = "drivers/media/dvb/dvb-usb/opera1.c",
86136 + .param5 = 1,
86137 +};
86138 +struct size_overflow_hash _001005_hash = {
86139 + .next = NULL,
86140 + .name = "oprofilefs_str_to_user",
86141 + .file = "include/linux/oprofile.h",
86142 + .param3 = 1,
86143 +};
86144 +struct size_overflow_hash _001006_hash = {
86145 + .next = NULL,
86146 + .name = "oprofilefs_ulong_from_user",
86147 + .file = "include/linux/oprofile.h",
86148 + .param3 = 1,
86149 +};
86150 +struct size_overflow_hash _001007_hash = {
86151 + .next = &_000626_hash,
86152 + .name = "oprofilefs_ulong_to_user",
86153 + .file = "include/linux/oprofile.h",
86154 + .param3 = 1,
86155 +};
86156 +struct size_overflow_hash _001008_hash = {
86157 + .next = NULL,
86158 + .name = "_ore_get_io_state",
86159 + .file = "fs/exofs/ore.c",
86160 + .param3 = 1,
86161 +};
86162 +struct size_overflow_hash _001009_hash = {
86163 + .next = NULL,
86164 + .name = "_osd_realloc_seg",
86165 + .file = "drivers/scsi/osd/osd_initiator.c",
86166 + .param3 = 1,
86167 +};
86168 +struct size_overflow_hash _001010_hash = {
86169 + .next = NULL,
86170 + .name = "_osd_req_list_objects",
86171 + .file = "drivers/scsi/osd/osd_initiator.c",
86172 + .param6 = 1,
86173 +};
86174 +struct size_overflow_hash _001011_hash = {
86175 + .next = NULL,
86176 + .name = "osd_req_read_kern",
86177 + .file = "include/scsi/osd_initiator.h",
86178 + .param5 = 1,
86179 +};
86180 +struct size_overflow_hash _001012_hash = {
86181 + .next = NULL,
86182 + .name = "osd_req_write_kern",
86183 + .file = "include/scsi/osd_initiator.h",
86184 + .param5 = 1,
86185 +};
86186 +struct size_overflow_hash _001013_hash = {
86187 + .next = NULL,
86188 + .name = "osst_execute",
86189 + .file = "drivers/scsi/osst.c",
86190 + .param6 = 1,
86191 +};
86192 +struct size_overflow_hash _001014_hash = {
86193 + .next = NULL,
86194 + .name = "otp_read",
86195 + .file = "drivers/mtd/devices/mtd_dataflash.c",
86196 + .param2 = 1,
86197 + .param5 = 1,
86198 +};
86199 +struct size_overflow_hash _001016_hash = {
86200 + .next = NULL,
86201 + .name = "packet_buffer_init",
86202 + .file = "drivers/firewire/nosy.c",
86203 + .param2 = 1,
86204 +};
86205 +struct size_overflow_hash _001017_hash = {
86206 + .next = NULL,
86207 + .name = "packet_setsockopt",
86208 + .file = "net/packet/af_packet.c",
86209 + .param5 = 1,
86210 +};
86211 +struct size_overflow_hash _001018_hash = {
86212 + .next = NULL,
86213 + .name = "parse_arg",
86214 + .file = "drivers/platform/x86/asus_acpi.c",
86215 + .param2 = 1,
86216 +};
86217 +struct size_overflow_hash _001019_hash = {
86218 + .next = NULL,
86219 + .name = "parse_command",
86220 + .file = "fs/binfmt_misc.c",
86221 + .param2 = 1,
86222 +};
86223 +struct size_overflow_hash _001020_hash = {
86224 + .next = NULL,
86225 + .name = "pcmcia_replace_cis",
86226 + .file = "drivers/pcmcia/cistpl.c",
86227 + .param3 = 1,
86228 +};
86229 +struct size_overflow_hash _001021_hash = {
86230 + .next = NULL,
86231 + .name = "pcnet32_realloc_rx_ring",
86232 + .file = "drivers/net/ethernet/amd/pcnet32.c",
86233 + .param3 = 1,
86234 +};
86235 +struct size_overflow_hash _001022_hash = {
86236 + .next = NULL,
86237 + .name = "pcnet32_realloc_tx_ring",
86238 + .file = "drivers/net/ethernet/amd/pcnet32.c",
86239 + .param3 = 1,
86240 +};
86241 +struct size_overflow_hash _001023_hash = {
86242 + .next = NULL,
86243 + .name = "pgctrl_write",
86244 + .file = "net/core/pktgen.c",
86245 + .param3 = 1,
86246 +};
86247 +struct size_overflow_hash _001024_hash = {
86248 + .next = NULL,
86249 + .name = "pg_read",
86250 + .file = "drivers/block/paride/pg.c",
86251 + .param3 = 1,
86252 +};
86253 +struct size_overflow_hash _001025_hash = {
86254 + .next = NULL,
86255 + .name = "pg_write",
86256 + .file = "drivers/block/paride/pg.c",
86257 + .param3 = 1,
86258 +};
86259 +struct size_overflow_hash _001026_hash = {
86260 + .next = NULL,
86261 + .name = "picolcd_debug_eeprom_read",
86262 + .file = "drivers/hid/hid-picolcd.c",
86263 + .param3 = 1,
86264 +};
86265 +struct size_overflow_hash _001027_hash = {
86266 + .next = NULL,
86267 + .name = "pkt_add",
86268 + .file = "drivers/usb/serial/garmin_gps.c",
86269 + .param3 = 1,
86270 +};
86271 +struct size_overflow_hash _001028_hash = {
86272 + .next = NULL,
86273 + .name = "pktgen_if_write",
86274 + .file = "net/core/pktgen.c",
86275 + .param3 = 1,
86276 +};
86277 +struct size_overflow_hash _001029_hash = {
86278 + .next = NULL,
86279 + .name = "platform_list_read_file",
86280 + .file = "sound/soc/soc-core.c",
86281 + .param3 = 1,
86282 +};
86283 +struct size_overflow_hash _001030_hash = {
86284 + .next = NULL,
86285 + .name = "pm8001_store_update_fw",
86286 + .file = "drivers/scsi/pm8001/pm8001_ctl.c",
86287 + .param4 = 1,
86288 +};
86289 +struct size_overflow_hash _001031_hash = {
86290 + .next = NULL,
86291 + .name = "port_show_regs",
86292 + .file = "drivers/tty/serial/mfd.c",
86293 + .param3 = 1,
86294 +};
86295 +struct size_overflow_hash _001032_hash = {
86296 + .next = NULL,
86297 + .name = "ppp_cp_parse_cr",
86298 + .file = "drivers/net/wan/hdlc_ppp.c",
86299 + .param4 = 1,
86300 +};
86301 +struct size_overflow_hash _001033_hash = {
86302 + .next = NULL,
86303 + .name = "ppp_write",
86304 + .file = "drivers/net/ppp/ppp_generic.c",
86305 + .param3 = 1,
86306 +};
86307 +struct size_overflow_hash _001034_hash = {
86308 + .next = NULL,
86309 + .name = "pp_read",
86310 + .file = "drivers/char/ppdev.c",
86311 + .param3 = 1,
86312 +};
86313 +struct size_overflow_hash _001035_hash = {
86314 + .next = NULL,
86315 + .name = "pp_write",
86316 + .file = "drivers/char/ppdev.c",
86317 + .param3 = 1,
86318 +};
86319 +struct size_overflow_hash _001036_hash = {
86320 + .next = NULL,
86321 + .name = "printer_read",
86322 + .file = "drivers/usb/gadget/printer.c",
86323 + .param3 = 1,
86324 +};
86325 +struct size_overflow_hash _001037_hash = {
86326 + .next = NULL,
86327 + .name = "printer_req_alloc",
86328 + .file = "drivers/usb/gadget/printer.c",
86329 + .param2 = 1,
86330 +};
86331 +struct size_overflow_hash _001038_hash = {
86332 + .next = NULL,
86333 + .name = "printer_write",
86334 + .file = "drivers/usb/gadget/printer.c",
86335 + .param3 = 1,
86336 +};
86337 +struct size_overflow_hash _001039_hash = {
86338 + .next = NULL,
86339 + .name = "prism2_set_genericelement",
86340 + .file = "drivers/net/wireless/hostap/hostap_ioctl.c",
86341 + .param3 = 1,
86342 +};
86343 +struct size_overflow_hash _001040_hash = {
86344 + .next = NULL,
86345 + .name = "proc_read",
86346 + .file = "drivers/net/wireless/airo.c",
86347 + .param3 = 1,
86348 +};
86349 +struct size_overflow_hash _001041_hash = {
86350 + .next = NULL,
86351 + .name = "proc_scsi_devinfo_write",
86352 + .file = "drivers/scsi/scsi_devinfo.c",
86353 + .param3 = 1,
86354 +};
86355 +struct size_overflow_hash _001042_hash = {
86356 + .next = NULL,
86357 + .name = "proc_scsi_write",
86358 + .file = "drivers/scsi/scsi_proc.c",
86359 + .param3 = 1,
86360 +};
86361 +struct size_overflow_hash _001043_hash = {
86362 + .next = NULL,
86363 + .name = "proc_scsi_write_proc",
86364 + .file = "drivers/scsi/scsi_proc.c",
86365 + .param3 = 1,
86366 +};
86367 +struct size_overflow_hash _001044_hash = {
86368 + .next = NULL,
86369 + .name = "proc_write",
86370 + .file = "drivers/net/wireless/airo.c",
86371 + .param3 = 1,
86372 +};
86373 +struct size_overflow_hash _001045_hash = {
86374 + .next = NULL,
86375 + .name = "provide_user_output",
86376 + .file = "fs/ubifs/debug.c",
86377 + .param3 = 1,
86378 +};
86379 +struct size_overflow_hash _001046_hash = {
86380 + .next = NULL,
86381 + .name = "ps_pspoll_max_apturn_read",
86382 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86383 + .param3 = 1,
86384 +};
86385 +struct size_overflow_hash _001047_hash = {
86386 + .next = NULL,
86387 + .name = "ps_pspoll_timeouts_read",
86388 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86389 + .param3 = 1,
86390 +};
86391 +struct size_overflow_hash _001048_hash = {
86392 + .next = NULL,
86393 + .name = "ps_pspoll_utilization_read",
86394 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86395 + .param3 = 1,
86396 +};
86397 +struct size_overflow_hash _001049_hash = {
86398 + .next = NULL,
86399 + .name = "ps_upsd_max_apturn_read",
86400 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86401 + .param3 = 1,
86402 +};
86403 +struct size_overflow_hash _001050_hash = {
86404 + .next = NULL,
86405 + .name = "ps_upsd_max_sptime_read",
86406 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86407 + .param3 = 1,
86408 +};
86409 +struct size_overflow_hash _001051_hash = {
86410 + .next = NULL,
86411 + .name = "ps_upsd_timeouts_read",
86412 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86413 + .param3 = 1,
86414 +};
86415 +struct size_overflow_hash _001052_hash = {
86416 + .next = NULL,
86417 + .name = "ps_upsd_utilization_read",
86418 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86419 + .param3 = 1,
86420 +};
86421 +struct size_overflow_hash _001053_hash = {
86422 + .next = NULL,
86423 + .name = "pti_char_write",
86424 + .file = "drivers/misc/pti.c",
86425 + .param3 = 1,
86426 +};
86427 +struct size_overflow_hash _001054_hash = {
86428 + .next = NULL,
86429 + .name = "pt_read",
86430 + .file = "drivers/block/paride/pt.c",
86431 + .param3 = 1,
86432 +};
86433 +struct size_overflow_hash _001055_hash = {
86434 + .next = NULL,
86435 + .name = "pt_write",
86436 + .file = "drivers/block/paride/pt.c",
86437 + .param3 = 1,
86438 +};
86439 +struct size_overflow_hash _001056_hash = {
86440 + .next = NULL,
86441 + .name = "pvr2_ioread_read",
86442 + .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
86443 + .param3 = 1,
86444 +};
86445 +struct size_overflow_hash _001057_hash = {
86446 + .next = NULL,
86447 + .name = "pvr2_ioread_set_sync_key",
86448 + .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
86449 + .param3 = 1,
86450 +};
86451 +struct size_overflow_hash _001058_hash = {
86452 + .next = NULL,
86453 + .name = "pvr2_stream_buffer_count",
86454 + .file = "drivers/media/video/pvrusb2/pvrusb2-io.c",
86455 + .param2 = 1,
86456 +};
86457 +struct size_overflow_hash _001059_hash = {
86458 + .next = NULL,
86459 + .name = "pwr_disable_ps_read",
86460 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86461 + .param3 = 1,
86462 +};
86463 +struct size_overflow_hash _001060_hash = {
86464 + .next = NULL,
86465 + .name = "pwr_elp_enter_read",
86466 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86467 + .param3 = 1,
86468 +};
86469 +struct size_overflow_hash _001061_hash = {
86470 + .next = NULL,
86471 + .name = "pwr_enable_ps_read",
86472 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86473 + .param3 = 1,
86474 +};
86475 +struct size_overflow_hash _001062_hash = {
86476 + .next = NULL,
86477 + .name = "pwr_fix_tsf_ps_read",
86478 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86479 + .param3 = 1,
86480 +};
86481 +struct size_overflow_hash _001063_hash = {
86482 + .next = NULL,
86483 + .name = "pwr_missing_bcns_read",
86484 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86485 + .param3 = 1,
86486 +};
86487 +struct size_overflow_hash _001064_hash = {
86488 + .next = NULL,
86489 + .name = "pwr_power_save_off_read",
86490 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86491 + .param3 = 1,
86492 +};
86493 +struct size_overflow_hash _001065_hash = {
86494 + .next = NULL,
86495 + .name = "pwr_ps_enter_read",
86496 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86497 + .param3 = 1,
86498 +};
86499 +struct size_overflow_hash _001066_hash = {
86500 + .next = NULL,
86501 + .name = "pwr_rcvd_awake_beacons_read",
86502 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86503 + .param3 = 1,
86504 +};
86505 +struct size_overflow_hash _001067_hash = {
86506 + .next = NULL,
86507 + .name = "pwr_rcvd_beacons_read",
86508 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86509 + .param3 = 1,
86510 +};
86511 +struct size_overflow_hash _001068_hash = {
86512 + .next = NULL,
86513 + .name = "pwr_tx_without_ps_read",
86514 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86515 + .param3 = 1,
86516 +};
86517 +struct size_overflow_hash _001069_hash = {
86518 + .next = NULL,
86519 + .name = "pwr_tx_with_ps_read",
86520 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86521 + .param3 = 1,
86522 +};
86523 +struct size_overflow_hash _001070_hash = {
86524 + .next = NULL,
86525 + .name = "pwr_wake_on_host_read",
86526 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86527 + .param3 = 1,
86528 +};
86529 +struct size_overflow_hash _001071_hash = {
86530 + .next = NULL,
86531 + .name = "pwr_wake_on_timer_exp_read",
86532 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86533 + .param3 = 1,
86534 +};
86535 +struct size_overflow_hash _001072_hash = {
86536 + .next = NULL,
86537 + .name = "qc_capture",
86538 + .file = "drivers/media/video/c-qcam.c",
86539 + .param3 = 1,
86540 +};
86541 +struct size_overflow_hash _001073_hash = {
86542 + .next = NULL,
86543 + .name = "qla2x00_get_ctx_bsg_sp",
86544 + .file = "drivers/scsi/qla2xxx/qla_bsg.c",
86545 + .param3 = 1,
86546 +};
86547 +struct size_overflow_hash _001074_hash = {
86548 + .next = NULL,
86549 + .name = "qla2x00_get_ctx_sp",
86550 + .file = "drivers/scsi/qla2xxx/qla_init.c",
86551 + .param3 = 1,
86552 +};
86553 +struct size_overflow_hash _001075_hash = {
86554 + .next = NULL,
86555 + .name = "qlcnic_alloc_msix_entries",
86556 + .file = "drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c",
86557 + .param2 = 1,
86558 +};
86559 +struct size_overflow_hash _001076_hash = {
86560 + .next = NULL,
86561 + .name = "queues_read",
86562 + .file = "net/mac80211/debugfs.c",
86563 + .param3 = 1,
86564 +};
86565 +struct size_overflow_hash _001077_hash = {
86566 + .next = NULL,
86567 + .name = "r3964_write",
86568 + .file = "drivers/tty/n_r3964.c",
86569 + .param4 = 1,
86570 +};
86571 +struct size_overflow_hash _001078_hash = {
86572 + .next = NULL,
86573 + .name = "raw_setsockopt",
86574 + .file = "net/can/raw.c",
86575 + .param5 = 1,
86576 +};
86577 +struct size_overflow_hash _001079_hash = {
86578 + .next = NULL,
86579 + .name = "ray_cs_essid_proc_write",
86580 + .file = "drivers/net/wireless/ray_cs.c",
86581 + .param3 = 1,
86582 +};
86583 +struct size_overflow_hash _001080_hash = {
86584 + .next = NULL,
86585 + .name = "rbd_snap_add",
86586 + .file = "drivers/block/rbd.c",
86587 + .param4 = 1,
86588 +};
86589 +struct size_overflow_hash _001081_hash = {
86590 + .next = NULL,
86591 + .name = "rcname_read",
86592 + .file = "net/mac80211/rate.c",
86593 + .param3 = 1,
86594 +};
86595 +struct size_overflow_hash _001082_hash = {
86596 + .next = NULL,
86597 + .name = "rds_message_alloc",
86598 + .file = "net/rds/message.c",
86599 + .param1 = 1,
86600 +};
86601 +struct size_overflow_hash _001083_hash = {
86602 + .next = NULL,
86603 + .name = "rds_page_copy_user",
86604 + .file = "net/rds/page.c",
86605 + .param4 = 1,
86606 +};
86607 +struct size_overflow_hash _001084_hash = {
86608 + .next = NULL,
86609 + .name = "read",
86610 + .file = "drivers/pci/hotplug/cpqphp_sysfs.c",
86611 + .param3 = 1,
86612 +};
86613 +struct size_overflow_hash _001085_hash = {
86614 + .next = NULL,
86615 + .name = "read_4k_modal_eeprom",
86616 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86617 + .param3 = 1,
86618 +};
86619 +struct size_overflow_hash _001086_hash = {
86620 + .next = NULL,
86621 + .name = "read_9287_modal_eeprom",
86622 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86623 + .param3 = 1,
86624 +};
86625 +struct size_overflow_hash _001087_hash = {
86626 + .next = NULL,
86627 + .name = "read_buf",
86628 + .file = "fs/nfsd/nfs4xdr.c",
86629 + .param2 = 1,
86630 +};
86631 +struct size_overflow_hash _001088_hash = {
86632 + .next = NULL,
86633 + .name = "read_cis_cache",
86634 + .file = "drivers/pcmcia/cistpl.c",
86635 + .param4 = 1,
86636 +};
86637 +struct size_overflow_hash _001089_hash = {
86638 + .next = NULL,
86639 + .name = "read_def_modal_eeprom",
86640 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86641 + .param3 = 1,
86642 +};
86643 +struct size_overflow_hash _001090_hash = {
86644 + .next = NULL,
86645 + .name = "read_file_ani",
86646 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86647 + .param3 = 1,
86648 +};
86649 +struct size_overflow_hash _001091_hash = {
86650 + .next = NULL,
86651 + .name = "read_file_antenna",
86652 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86653 + .param3 = 1,
86654 +};
86655 +struct size_overflow_hash _001092_hash = {
86656 + .next = NULL,
86657 + .name = "read_file_base_eeprom",
86658 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86659 + .param3 = 1,
86660 +};
86661 +struct size_overflow_hash _001093_hash = {
86662 + .next = NULL,
86663 + .name = "read_file_base_eeprom",
86664 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86665 + .param3 = 1,
86666 +};
86667 +struct size_overflow_hash _001094_hash = {
86668 + .next = NULL,
86669 + .name = "read_file_beacon",
86670 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86671 + .param3 = 1,
86672 +};
86673 +struct size_overflow_hash _001095_hash = {
86674 + .next = NULL,
86675 + .name = "read_file_credit_dist_stats",
86676 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
86677 + .param3 = 1,
86678 +};
86679 +struct size_overflow_hash _001096_hash = {
86680 + .next = NULL,
86681 + .name = "read_file_debug",
86682 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86683 + .param3 = 1,
86684 +};
86685 +struct size_overflow_hash _001097_hash = {
86686 + .next = NULL,
86687 + .name = "read_file_debug",
86688 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86689 + .param3 = 1,
86690 +};
86691 +struct size_overflow_hash _001098_hash = {
86692 + .next = NULL,
86693 + .name = "read_file_debug",
86694 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86695 + .param3 = 1,
86696 +};
86697 +struct size_overflow_hash _001099_hash = {
86698 + .next = NULL,
86699 + .name = "read_file_disable_ani",
86700 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86701 + .param3 = 1,
86702 +};
86703 +struct size_overflow_hash _001100_hash = {
86704 + .next = NULL,
86705 + .name = "read_file_dma",
86706 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86707 + .param3 = 1,
86708 +};
86709 +struct size_overflow_hash _001101_hash = {
86710 + .next = NULL,
86711 + .name = "read_file_dump_nfcal",
86712 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86713 + .param3 = 1,
86714 +};
86715 +struct size_overflow_hash _001102_hash = {
86716 + .next = NULL,
86717 + .name = "read_file_frameerrors",
86718 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86719 + .param3 = 1,
86720 +};
86721 +struct size_overflow_hash _001103_hash = {
86722 + .next = NULL,
86723 + .name = "read_file_interrupt",
86724 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86725 + .param3 = 1,
86726 +};
86727 +struct size_overflow_hash _001104_hash = {
86728 + .next = NULL,
86729 + .name = "read_file_misc",
86730 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86731 + .param3 = 1,
86732 +};
86733 +struct size_overflow_hash _001105_hash = {
86734 + .next = NULL,
86735 + .name = "read_file_misc",
86736 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86737 + .param3 = 1,
86738 +};
86739 +struct size_overflow_hash _001106_hash = {
86740 + .next = NULL,
86741 + .name = "read_file_modal_eeprom",
86742 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86743 + .param3 = 1,
86744 +};
86745 +struct size_overflow_hash _001107_hash = {
86746 + .next = NULL,
86747 + .name = "read_file_queue",
86748 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
86749 + .param3 = 1,
86750 +};
86751 +struct size_overflow_hash _001108_hash = {
86752 + .next = NULL,
86753 + .name = "read_file_queue",
86754 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86755 + .param3 = 1,
86756 +};
86757 +struct size_overflow_hash _001109_hash = {
86758 + .next = NULL,
86759 + .name = "read_file_rcstat",
86760 + .file = "drivers/net/wireless/ath/ath9k/rc.c",
86761 + .param3 = 1,
86762 +};
86763 +struct size_overflow_hash _001110_hash = {
86764 + .next = NULL,
86765 + .name = "read_file_recv",
86766 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86767 + .param3 = 1,
86768 +};
86769 +struct size_overflow_hash _001111_hash = {
86770 + .next = NULL,
86771 + .name = "read_file_recv",
86772 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86773 + .param3 = 1,
86774 +};
86775 +struct size_overflow_hash _001112_hash = {
86776 + .next = NULL,
86777 + .name = "read_file_regidx",
86778 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86779 + .param3 = 1,
86780 +};
86781 +struct size_overflow_hash _001113_hash = {
86782 + .next = &_001103_hash,
86783 + .name = "read_file_regval",
86784 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86785 + .param3 = 1,
86786 +};
86787 +struct size_overflow_hash _001114_hash = {
86788 + .next = NULL,
86789 + .name = "read_file_rx_chainmask",
86790 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86791 + .param3 = 1,
86792 +};
86793 +struct size_overflow_hash _001115_hash = {
86794 + .next = NULL,
86795 + .name = "read_file_slot",
86796 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86797 + .param3 = 1,
86798 +};
86799 +struct size_overflow_hash _001116_hash = {
86800 + .next = NULL,
86801 + .name = "read_file_stations",
86802 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86803 + .param3 = 1,
86804 +};
86805 +struct size_overflow_hash _001117_hash = {
86806 + .next = NULL,
86807 + .name = "read_file_tgt_int_stats",
86808 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86809 + .param3 = 1,
86810 +};
86811 +struct size_overflow_hash _001118_hash = {
86812 + .next = NULL,
86813 + .name = "read_file_tgt_rx_stats",
86814 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86815 + .param3 = 1,
86816 +};
86817 +struct size_overflow_hash _001119_hash = {
86818 + .next = NULL,
86819 + .name = "read_file_tgt_stats",
86820 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
86821 + .param3 = 1,
86822 +};
86823 +struct size_overflow_hash _001120_hash = {
86824 + .next = NULL,
86825 + .name = "read_file_tgt_tx_stats",
86826 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86827 + .param3 = 1,
86828 +};
86829 +struct size_overflow_hash _001121_hash = {
86830 + .next = NULL,
86831 + .name = "read_file_tx_chainmask",
86832 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86833 + .param3 = 1,
86834 +};
86835 +struct size_overflow_hash _001122_hash = {
86836 + .next = NULL,
86837 + .name = "read_file_war_stats",
86838 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
86839 + .param3 = 1,
86840 +};
86841 +struct size_overflow_hash _001123_hash = {
86842 + .next = NULL,
86843 + .name = "read_file_wiphy",
86844 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86845 + .param3 = 1,
86846 +};
86847 +struct size_overflow_hash _001124_hash = {
86848 + .next = NULL,
86849 + .name = "read_file_xmit",
86850 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
86851 + .param3 = 1,
86852 +};
86853 +struct size_overflow_hash _001125_hash = {
86854 + .next = NULL,
86855 + .name = "read_file_xmit",
86856 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
86857 + .param3 = 1,
86858 +};
86859 +struct size_overflow_hash _001126_hash = {
86860 + .next = NULL,
86861 + .name = "read_flush",
86862 + .file = "net/sunrpc/cache.c",
86863 + .param3 = 1,
86864 +};
86865 +struct size_overflow_hash _001127_hash = {
86866 + .next = NULL,
86867 + .name = "realloc_buffer",
86868 + .file = "drivers/scsi/device_handler/scsi_dh_alua.c",
86869 + .param2 = 1,
86870 +};
86871 +struct size_overflow_hash _001128_hash = {
86872 + .next = NULL,
86873 + .name = "receive_DataRequest",
86874 + .file = "drivers/block/drbd/drbd_receiver.c",
86875 + .param3 = 1,
86876 +};
86877 +struct size_overflow_hash _001129_hash = {
86878 + .next = NULL,
86879 + .name = "recent_mt_proc_write",
86880 + .file = "net/netfilter/xt_recent.c",
86881 + .param3 = 1,
86882 +};
86883 +struct size_overflow_hash _001130_hash = {
86884 + .next = NULL,
86885 + .name = "redrat3_transmit_ir",
86886 + .file = "drivers/media/rc/redrat3.c",
86887 + .param3 = 1,
86888 +};
86889 +struct size_overflow_hash _001131_hash = {
86890 + .next = NULL,
86891 + .name = "reg_w_buf",
86892 + .file = "drivers/media/video/gspca/t613.c",
86893 + .param3 = 1,
86894 +};
86895 +struct size_overflow_hash _001132_hash = {
86896 + .next = NULL,
86897 + .name = "reg_w_ixbuf",
86898 + .file = "drivers/media/video/gspca/t613.c",
86899 + .param4 = 1,
86900 +};
86901 +struct size_overflow_hash _001133_hash = {
86902 + .next = NULL,
86903 + .name = "reiserfs_allocate_list_bitmaps",
86904 + .file = "include/linux/reiserfs_fs.h",
86905 + .param3 = 1,
86906 +};
86907 +struct size_overflow_hash _001134_hash = {
86908 + .next = NULL,
86909 + .name = "reiserfs_resize",
86910 + .file = "include/linux/reiserfs_fs_sb.h",
86911 + .param2 = 1,
86912 +};
86913 +struct size_overflow_hash _001135_hash = {
86914 + .next = NULL,
86915 + .name = "remote_settings_file_write",
86916 + .file = "drivers/misc/ibmasm/ibmasmfs.c",
86917 + .param3 = 1,
86918 +};
86919 +struct size_overflow_hash _001136_hash = {
86920 + .next = NULL,
86921 + .name = "_req_append_segment",
86922 + .file = "drivers/scsi/osd/osd_initiator.c",
86923 + .param2 = 1,
86924 +};
86925 +struct size_overflow_hash _001137_hash = {
86926 + .next = NULL,
86927 + .name = "retry_count_read",
86928 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86929 + .param3 = 1,
86930 +};
86931 +struct size_overflow_hash _001138_hash = {
86932 + .next = NULL,
86933 + .name = "revalidate",
86934 + .file = "drivers/block/aoe/aoechr.c",
86935 + .param2 = 1,
86936 +};
86937 +struct size_overflow_hash _001139_hash = {
86938 + .next = NULL,
86939 + .name = "rfcomm_sock_setsockopt",
86940 + .file = "net/bluetooth/rfcomm/sock.c",
86941 + .param5 = 1,
86942 +};
86943 +struct size_overflow_hash _001140_hash = {
86944 + .next = NULL,
86945 + .name = "rfkill_fop_read",
86946 + .file = "net/rfkill/core.c",
86947 + .param3 = 1,
86948 +};
86949 +struct size_overflow_hash _001141_hash = {
86950 + .next = NULL,
86951 + .name = "rndis_add_response",
86952 + .file = "drivers/usb/gadget/rndis.c",
86953 + .param2 = 1,
86954 +};
86955 +struct size_overflow_hash _001142_hash = {
86956 + .next = NULL,
86957 + .name = "rng_dev_read",
86958 + .file = "drivers/char/hw_random/core.c",
86959 + .param3 = 1,
86960 +};
86961 +struct size_overflow_hash _001143_hash = {
86962 + .next = NULL,
86963 + .name = "roccat_common_receive",
86964 + .file = "drivers/hid/hid-roccat-common.c",
86965 + .param4 = 1,
86966 +};
86967 +struct size_overflow_hash _001144_hash = {
86968 + .next = NULL,
86969 + .name = "roccat_common_send",
86970 + .file = "drivers/hid/hid-roccat-common.c",
86971 + .param4 = 1,
86972 +};
86973 +struct size_overflow_hash _001145_hash = {
86974 + .next = NULL,
86975 + .name = "roccat_read",
86976 + .file = "drivers/hid/hid-roccat.c",
86977 + .param3 = 1,
86978 +};
86979 +struct size_overflow_hash _001146_hash = {
86980 + .next = NULL,
86981 + .name = "rpc_malloc",
86982 + .file = "include/linux/sunrpc/sched.h",
86983 + .param2 = 1,
86984 +};
86985 +struct size_overflow_hash _001147_hash = {
86986 + .next = NULL,
86987 + .name = "rs_sta_dbgfs_rate_scale_data_read",
86988 + .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
86989 + .param3 = 1,
86990 +};
86991 +struct size_overflow_hash _001148_hash = {
86992 + .next = NULL,
86993 + .name = "rs_sta_dbgfs_scale_table_read",
86994 + .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
86995 + .param3 = 1,
86996 +};
86997 +struct size_overflow_hash _001149_hash = {
86998 + .next = NULL,
86999 + .name = "rs_sta_dbgfs_stats_table_read",
87000 + .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
87001 + .param3 = 1,
87002 +};
87003 +struct size_overflow_hash _001150_hash = {
87004 + .next = NULL,
87005 + .name = "rt2x00debug_write_bbp",
87006 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
87007 + .param3 = 1,
87008 +};
87009 +struct size_overflow_hash _001151_hash = {
87010 + .next = NULL,
87011 + .name = "rt2x00debug_write_csr",
87012 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
87013 + .param3 = 1,
87014 +};
87015 +struct size_overflow_hash _001152_hash = {
87016 + .next = &_000808_hash,
87017 + .name = "rt2x00debug_write_eeprom",
87018 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
87019 + .param3 = 1,
87020 +};
87021 +struct size_overflow_hash _001153_hash = {
87022 + .next = NULL,
87023 + .name = "rt2x00debug_write_rf",
87024 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
87025 + .param3 = 1,
87026 +};
87027 +struct size_overflow_hash _001154_hash = {
87028 + .next = NULL,
87029 + .name = "rts51x_read_mem",
87030 + .file = "drivers/usb/storage/realtek_cr.c",
87031 + .param4 = 1,
87032 +};
87033 +struct size_overflow_hash _001155_hash = {
87034 + .next = NULL,
87035 + .name = "rts51x_write_mem",
87036 + .file = "drivers/usb/storage/realtek_cr.c",
87037 + .param4 = 1,
87038 +};
87039 +struct size_overflow_hash _001156_hash = {
87040 + .next = NULL,
87041 + .name = "rts_threshold_read",
87042 + .file = "net/wireless/debugfs.c",
87043 + .param3 = 1,
87044 +};
87045 +struct size_overflow_hash _001157_hash = {
87046 + .next = NULL,
87047 + .name = "rx_dropped_read",
87048 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87049 + .param3 = 1,
87050 +};
87051 +struct size_overflow_hash _001158_hash = {
87052 + .next = NULL,
87053 + .name = "rx_fcs_err_read",
87054 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87055 + .param3 = 1,
87056 +};
87057 +struct size_overflow_hash _001159_hash = {
87058 + .next = NULL,
87059 + .name = "rx_hdr_overflow_read",
87060 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87061 + .param3 = 1,
87062 +};
87063 +struct size_overflow_hash _001160_hash = {
87064 + .next = NULL,
87065 + .name = "rx_hw_stuck_read",
87066 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87067 + .param3 = 1,
87068 +};
87069 +struct size_overflow_hash _001161_hash = {
87070 + .next = NULL,
87071 + .name = "rx_out_of_mem_read",
87072 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87073 + .param3 = 1,
87074 +};
87075 +struct size_overflow_hash _001162_hash = {
87076 + .next = NULL,
87077 + .name = "rx_path_reset_read",
87078 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87079 + .param3 = 1,
87080 +};
87081 +struct size_overflow_hash _001163_hash = {
87082 + .next = NULL,
87083 + .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read",
87084 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87085 + .param3 = 1,
87086 +};
87087 +struct size_overflow_hash _001164_hash = {
87088 + .next = NULL,
87089 + .name = "rxpipe_descr_host_int_trig_rx_data_read",
87090 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87091 + .param3 = 1,
87092 +};
87093 +struct size_overflow_hash _001165_hash = {
87094 + .next = NULL,
87095 + .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read",
87096 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87097 + .param3 = 1,
87098 +};
87099 +struct size_overflow_hash _001166_hash = {
87100 + .next = NULL,
87101 + .name = "rxpipe_rx_prep_beacon_drop_read",
87102 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87103 + .param3 = 1,
87104 +};
87105 +struct size_overflow_hash _001167_hash = {
87106 + .next = NULL,
87107 + .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read",
87108 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87109 + .param3 = 1,
87110 +};
87111 +struct size_overflow_hash _001168_hash = {
87112 + .next = NULL,
87113 + .name = "rx_reset_counter_read",
87114 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87115 + .param3 = 1,
87116 +};
87117 +struct size_overflow_hash _001169_hash = {
87118 + .next = NULL,
87119 + .name = "rx_xfr_hint_trig_read",
87120 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87121 + .param3 = 1,
87122 +};
87123 +struct size_overflow_hash _001170_hash = {
87124 + .next = NULL,
87125 + .name = "saa7164_buffer_alloc_user",
87126 + .file = "drivers/media/video/saa7164/saa7164-buffer.c",
87127 + .param2 = 1,
87128 +};
87129 +struct size_overflow_hash _001171_hash = {
87130 + .next = NULL,
87131 + .name = "scsi_execute",
87132 + .file = "include/scsi/scsi_device.h",
87133 + .param5 = 1,
87134 +};
87135 +struct size_overflow_hash _001172_hash = {
87136 + .next = NULL,
87137 + .name = "scsi_tgt_copy_sense",
87138 + .file = "drivers/scsi/scsi_tgt_lib.c",
87139 + .param3 = 1,
87140 +};
87141 +struct size_overflow_hash _001173_hash = {
87142 + .next = NULL,
87143 + .name = "sctp_auth_create_key",
87144 + .file = "net/sctp/auth.c",
87145 + .param1 = 1,
87146 +};
87147 +struct size_overflow_hash _001174_hash = {
87148 + .next = NULL,
87149 + .name = "sctp_make_abort_user",
87150 + .file = "include/net/sctp/sm.h",
87151 + .param3 = 1,
87152 +};
87153 +struct size_overflow_hash _001175_hash = {
87154 + .next = NULL,
87155 + .name = "sctpprobe_read",
87156 + .file = "net/sctp/probe.c",
87157 + .param3 = 1,
87158 +};
87159 +struct size_overflow_hash _001176_hash = {
87160 + .next = NULL,
87161 + .name = "sctp_setsockopt_active_key",
87162 + .file = "net/sctp/socket.c",
87163 + .param3 = 1,
87164 +};
87165 +struct size_overflow_hash _001177_hash = {
87166 + .next = NULL,
87167 + .name = "sctp_setsockopt_adaptation_layer",
87168 + .file = "net/sctp/socket.c",
87169 + .param3 = 1,
87170 +};
87171 +struct size_overflow_hash _001178_hash = {
87172 + .next = NULL,
87173 + .name = "sctp_setsockopt_associnfo",
87174 + .file = "net/sctp/socket.c",
87175 + .param3 = 1,
87176 +};
87177 +struct size_overflow_hash _001179_hash = {
87178 + .next = NULL,
87179 + .name = "sctp_setsockopt_auth_chunk",
87180 + .file = "net/sctp/socket.c",
87181 + .param3 = 1,
87182 +};
87183 +struct size_overflow_hash _001180_hash = {
87184 + .next = NULL,
87185 + .name = "sctp_setsockopt_auth_key",
87186 + .file = "net/sctp/socket.c",
87187 + .param3 = 1,
87188 +};
87189 +struct size_overflow_hash _001181_hash = {
87190 + .next = NULL,
87191 + .name = "sctp_setsockopt_autoclose",
87192 + .file = "net/sctp/socket.c",
87193 + .param3 = 1,
87194 +};
87195 +struct size_overflow_hash _001182_hash = {
87196 + .next = NULL,
87197 + .name = "sctp_setsockopt_context",
87198 + .file = "net/sctp/socket.c",
87199 + .param3 = 1,
87200 +};
87201 +struct size_overflow_hash _001183_hash = {
87202 + .next = NULL,
87203 + .name = "sctp_setsockopt_default_send_param",
87204 + .file = "net/sctp/socket.c",
87205 + .param3 = 1,
87206 +};
87207 +struct size_overflow_hash _001184_hash = {
87208 + .next = NULL,
87209 + .name = "sctp_setsockopt_delayed_ack",
87210 + .file = "net/sctp/socket.c",
87211 + .param3 = 1,
87212 +};
87213 +struct size_overflow_hash _001185_hash = {
87214 + .next = NULL,
87215 + .name = "sctp_setsockopt_del_key",
87216 + .file = "net/sctp/socket.c",
87217 + .param3 = 1,
87218 +};
87219 +struct size_overflow_hash _001186_hash = {
87220 + .next = NULL,
87221 + .name = "sctp_setsockopt_events",
87222 + .file = "net/sctp/socket.c",
87223 + .param3 = 1,
87224 +};
87225 +struct size_overflow_hash _001187_hash = {
87226 + .next = NULL,
87227 + .name = "sctp_setsockopt_hmac_ident",
87228 + .file = "net/sctp/socket.c",
87229 + .param3 = 1,
87230 +};
87231 +struct size_overflow_hash _001188_hash = {
87232 + .next = NULL,
87233 + .name = "sctp_setsockopt_initmsg",
87234 + .file = "net/sctp/socket.c",
87235 + .param3 = 1,
87236 +};
87237 +struct size_overflow_hash _001189_hash = {
87238 + .next = NULL,
87239 + .name = "sctp_setsockopt_maxburst",
87240 + .file = "net/sctp/socket.c",
87241 + .param3 = 1,
87242 +};
87243 +struct size_overflow_hash _001190_hash = {
87244 + .next = NULL,
87245 + .name = "sctp_setsockopt_maxseg",
87246 + .file = "net/sctp/socket.c",
87247 + .param3 = 1,
87248 +};
87249 +struct size_overflow_hash _001191_hash = {
87250 + .next = NULL,
87251 + .name = "sctp_setsockopt_peer_addr_params",
87252 + .file = "net/sctp/socket.c",
87253 + .param3 = 1,
87254 +};
87255 +struct size_overflow_hash _001192_hash = {
87256 + .next = NULL,
87257 + .name = "sctp_setsockopt_peer_primary_addr",
87258 + .file = "net/sctp/socket.c",
87259 + .param3 = 1,
87260 +};
87261 +struct size_overflow_hash _001193_hash = {
87262 + .next = NULL,
87263 + .name = "sctp_setsockopt_rtoinfo",
87264 + .file = "net/sctp/socket.c",
87265 + .param3 = 1,
87266 +};
87267 +struct size_overflow_hash _001194_hash = {
87268 + .next = NULL,
87269 + .name = "sctp_tsnmap_init",
87270 + .file = "include/net/sctp/tsnmap.h",
87271 + .param2 = 1,
87272 +};
87273 +struct size_overflow_hash _001195_hash = {
87274 + .next = NULL,
87275 + .name = "send_control_msg",
87276 + .file = "drivers/media/video/zr364xx.c",
87277 + .param6 = 1,
87278 +};
87279 +struct size_overflow_hash _001196_hash = {
87280 + .next = NULL,
87281 + .name = "set_aoe_iflist",
87282 + .file = "drivers/block/aoe/aoenet.c",
87283 + .param2 = 1,
87284 +};
87285 +struct size_overflow_hash _001197_hash = {
87286 + .next = NULL,
87287 + .name = "set_registers",
87288 + .file = "drivers/net/usb/pegasus.c",
87289 + .param3 = 1,
87290 +};
87291 +struct size_overflow_hash _001198_hash = {
87292 + .next = NULL,
87293 + .name = "setsockopt",
87294 + .file = "net/caif/caif_socket.c",
87295 + .param5 = 1,
87296 +};
87297 +struct size_overflow_hash _001199_hash = {
87298 + .next = NULL,
87299 + .name = "setup_req",
87300 + .file = "drivers/usb/gadget/inode.c",
87301 + .param3 = 1,
87302 +};
87303 +struct size_overflow_hash _001200_hash = {
87304 + .next = NULL,
87305 + .name = "sfq_alloc",
87306 + .file = "net/sched/sch_sfq.c",
87307 + .param1 = 1,
87308 +};
87309 +struct size_overflow_hash _001201_hash = {
87310 + .next = NULL,
87311 + .name = "sgl_map_user_pages",
87312 + .file = "drivers/scsi/st.c",
87313 + .param2 = 1,
87314 +};
87315 +struct size_overflow_hash _001202_hash = {
87316 + .next = NULL,
87317 + .name = "short_retry_limit_read",
87318 + .file = "net/wireless/debugfs.c",
87319 + .param3 = 1,
87320 +};
87321 +struct size_overflow_hash _001203_hash = {
87322 + .next = NULL,
87323 + .name = "sm501_create_subdev",
87324 + .file = "drivers/mfd/sm501.c",
87325 + .param3 = 1,
87326 + .param4 = 1,
87327 +};
87328 +struct size_overflow_hash _001205_hash = {
87329 + .next = NULL,
87330 + .name = "sn9c102_read",
87331 + .file = "drivers/media/video/sn9c102/sn9c102_core.c",
87332 + .param3 = 1,
87333 +};
87334 +struct size_overflow_hash _001206_hash = {
87335 + .next = NULL,
87336 + .name = "snd_ac97_pcm_assign",
87337 + .file = "include/sound/ac97_codec.h",
87338 + .param2 = 1,
87339 +};
87340 +struct size_overflow_hash _001207_hash = {
87341 + .next = NULL,
87342 + .name = "snd_ctl_elem_user_tlv",
87343 + .file = "sound/core/control.c",
87344 + .param3 = 1,
87345 +};
87346 +struct size_overflow_hash _001208_hash = {
87347 + .next = NULL,
87348 + .name = "snd_emu10k1_fx8010_read",
87349 + .file = "sound/pci/emu10k1/emuproc.c",
87350 + .param5 = 1,
87351 +};
87352 +struct size_overflow_hash _001209_hash = {
87353 + .next = NULL,
87354 + .name = "snd_es1938_capture_copy",
87355 + .file = "sound/pci/es1938.c",
87356 + .param5 = 1,
87357 +};
87358 +struct size_overflow_hash _001210_hash = {
87359 + .next = NULL,
87360 + .name = "snd_gus_dram_peek",
87361 + .file = "sound/isa/gus/gus_dram.c",
87362 + .param4 = 1,
87363 +};
87364 +struct size_overflow_hash _001211_hash = {
87365 + .next = NULL,
87366 + .name = "snd_gus_dram_poke",
87367 + .file = "sound/isa/gus/gus_dram.c",
87368 + .param4 = 1,
87369 +};
87370 +struct size_overflow_hash _001212_hash = {
87371 + .next = NULL,
87372 + .name = "snd_hdsp_capture_copy",
87373 + .file = "sound/pci/rme9652/hdsp.c",
87374 + .param5 = 1,
87375 +};
87376 +struct size_overflow_hash _001213_hash = {
87377 + .next = NULL,
87378 + .name = "snd_hdsp_playback_copy",
87379 + .file = "sound/pci/rme9652/hdsp.c",
87380 + .param5 = 1,
87381 +};
87382 +struct size_overflow_hash _001214_hash = {
87383 + .next = NULL,
87384 + .name = "snd_info_entry_write",
87385 + .file = "sound/core/info.c",
87386 + .param3 = 1,
87387 +};
87388 +struct size_overflow_hash _001215_hash = {
87389 + .next = NULL,
87390 + .name = "snd_opl4_mem_proc_read",
87391 + .file = "sound/drivers/opl4/opl4_proc.c",
87392 + .param5 = 1,
87393 +};
87394 +struct size_overflow_hash _001216_hash = {
87395 + .next = NULL,
87396 + .name = "snd_opl4_mem_proc_write",
87397 + .file = "sound/drivers/opl4/opl4_proc.c",
87398 + .param5 = 1,
87399 +};
87400 +struct size_overflow_hash _001217_hash = {
87401 + .next = NULL,
87402 + .name = "snd_pcm_aio_read",
87403 + .file = "sound/core/pcm_native.c",
87404 + .param3 = 1,
87405 +};
87406 +struct size_overflow_hash _001218_hash = {
87407 + .next = NULL,
87408 + .name = "snd_pcm_aio_write",
87409 + .file = "sound/core/pcm_native.c",
87410 + .param3 = 1,
87411 +};
87412 +struct size_overflow_hash _001219_hash = {
87413 + .next = NULL,
87414 + .name = "snd_pcm_alloc_vmalloc_buffer",
87415 + .file = "drivers/media/video/cx231xx/cx231xx-audio.c",
87416 + .param2 = 1,
87417 +};
87418 +struct size_overflow_hash _001220_hash = {
87419 + .next = NULL,
87420 + .name = "snd_pcm_alloc_vmalloc_buffer",
87421 + .file = "drivers/media/video/cx18/cx18-alsa-pcm.c",
87422 + .param2 = 1,
87423 +};
87424 +struct size_overflow_hash _001221_hash = {
87425 + .next = NULL,
87426 + .name = "snd_pcm_alloc_vmalloc_buffer",
87427 + .file = "drivers/media/video/em28xx/em28xx-audio.c",
87428 + .param2 = 1,
87429 +};
87430 +struct size_overflow_hash _001222_hash = {
87431 + .next = NULL,
87432 + .name = "_snd_pcm_lib_alloc_vmalloc_buffer",
87433 + .file = "include/sound/pcm.h",
87434 + .param2 = 1,
87435 +};
87436 +struct size_overflow_hash _001223_hash = {
87437 + .next = NULL,
87438 + .name = "snd_pcm_oss_read1",
87439 + .file = "sound/core/oss/pcm_oss.c",
87440 + .param3 = 1,
87441 +};
87442 +struct size_overflow_hash _001224_hash = {
87443 + .next = NULL,
87444 + .name = "snd_pcm_oss_write1",
87445 + .file = "sound/core/oss/pcm_oss.c",
87446 + .param3 = 1,
87447 +};
87448 +struct size_overflow_hash _001225_hash = {
87449 + .next = NULL,
87450 + .name = "snd_pcm_oss_write2",
87451 + .file = "sound/core/oss/pcm_oss.c",
87452 + .param3 = 1,
87453 +};
87454 +struct size_overflow_hash _001226_hash = {
87455 + .next = NULL,
87456 + .name = "snd_pcm_plugin_build",
87457 + .file = "sound/core/oss/pcm_plugin.c",
87458 + .param5 = 1,
87459 +};
87460 +struct size_overflow_hash _001227_hash = {
87461 + .next = NULL,
87462 + .name = "snd_rme9652_capture_copy",
87463 + .file = "sound/pci/rme9652/rme9652.c",
87464 + .param5 = 1,
87465 +};
87466 +struct size_overflow_hash _001228_hash = {
87467 + .next = NULL,
87468 + .name = "snd_rme9652_playback_copy",
87469 + .file = "sound/pci/rme9652/rme9652.c",
87470 + .param5 = 1,
87471 +};
87472 +struct size_overflow_hash _001229_hash = {
87473 + .next = NULL,
87474 + .name = "snd_soc_hw_bulk_write_raw",
87475 + .file = "sound/soc/soc-io.c",
87476 + .param4 = 1,
87477 +};
87478 +struct size_overflow_hash _001230_hash = {
87479 + .next = NULL,
87480 + .name = "snd_usb_ctl_msg",
87481 + .file = "sound/usb/helper.c",
87482 + .param8 = 1,
87483 +};
87484 +struct size_overflow_hash _001231_hash = {
87485 + .next = NULL,
87486 + .name = "_sp2d_alloc",
87487 + .file = "fs/exofs/ore_raid.c",
87488 + .param1 = 1,
87489 +};
87490 +struct size_overflow_hash _001232_hash = {
87491 + .next = NULL,
87492 + .name = "spidev_message",
87493 + .file = "drivers/spi/spidev.c",
87494 + .param3 = 1,
87495 +};
87496 +struct size_overflow_hash _001233_hash = {
87497 + .next = NULL,
87498 + .name = "spidev_write",
87499 + .file = "drivers/spi/spidev.c",
87500 + .param3 = 1,
87501 +};
87502 +struct size_overflow_hash _001234_hash = {
87503 + .next = NULL,
87504 + .name = "spi_show_regs",
87505 + .file = "drivers/spi/spi-dw.c",
87506 + .param3 = 1,
87507 +};
87508 +struct size_overflow_hash _001235_hash = {
87509 + .next = NULL,
87510 + .name = "srp_alloc_iu",
87511 + .file = "drivers/infiniband/ulp/srp/ib_srp.c",
87512 + .param2 = 1,
87513 +};
87514 +struct size_overflow_hash _001236_hash = {
87515 + .next = NULL,
87516 + .name = "srp_iu_pool_alloc",
87517 + .file = "drivers/scsi/libsrp.c",
87518 + .param2 = 1,
87519 +};
87520 +struct size_overflow_hash _001237_hash = {
87521 + .next = NULL,
87522 + .name = "srp_ring_alloc",
87523 + .file = "drivers/scsi/libsrp.c",
87524 + .param2 = 1,
87525 +};
87526 +struct size_overflow_hash _001238_hash = {
87527 + .next = NULL,
87528 + .name = "sta_agg_status_read",
87529 + .file = "net/mac80211/debugfs_sta.c",
87530 + .param3 = 1,
87531 +};
87532 +struct size_overflow_hash _001239_hash = {
87533 + .next = NULL,
87534 + .name = "sta_agg_status_write",
87535 + .file = "net/mac80211/debugfs_sta.c",
87536 + .param3 = 1,
87537 +};
87538 +struct size_overflow_hash _001240_hash = {
87539 + .next = NULL,
87540 + .name = "sta_connected_time_read",
87541 + .file = "net/mac80211/debugfs_sta.c",
87542 + .param3 = 1,
87543 +};
87544 +struct size_overflow_hash _001241_hash = {
87545 + .next = NULL,
87546 + .name = "sta_flags_read",
87547 + .file = "net/mac80211/debugfs_sta.c",
87548 + .param3 = 1,
87549 +};
87550 +struct size_overflow_hash _001242_hash = {
87551 + .next = NULL,
87552 + .name = "sta_ht_capa_read",
87553 + .file = "net/mac80211/debugfs_sta.c",
87554 + .param3 = 1,
87555 +};
87556 +struct size_overflow_hash _001243_hash = {
87557 + .next = NULL,
87558 + .name = "sta_last_seq_ctrl_read",
87559 + .file = "net/mac80211/debugfs_sta.c",
87560 + .param3 = 1,
87561 +};
87562 +struct size_overflow_hash _001244_hash = {
87563 + .next = NULL,
87564 + .name = "sta_num_ps_buf_frames_read",
87565 + .file = "net/mac80211/debugfs_sta.c",
87566 + .param3 = 1,
87567 +};
87568 +struct size_overflow_hash _001245_hash = {
87569 + .next = NULL,
87570 + .name = "stk_prepare_sio_buffers",
87571 + .file = "drivers/media/video/stk-webcam.c",
87572 + .param2 = 1,
87573 +};
87574 +struct size_overflow_hash _001246_hash = {
87575 + .next = NULL,
87576 + .name = "store_iwmct_log_level",
87577 + .file = "drivers/misc/iwmc3200top/log.c",
87578 + .param4 = 1,
87579 +};
87580 +struct size_overflow_hash _001247_hash = {
87581 + .next = NULL,
87582 + .name = "store_iwmct_log_level_fw",
87583 + .file = "drivers/misc/iwmc3200top/log.c",
87584 + .param4 = 1,
87585 +};
87586 +struct size_overflow_hash _001248_hash = {
87587 + .next = NULL,
87588 + .name = "str_to_user",
87589 + .file = "drivers/input/evdev.c",
87590 + .param2 = 1,
87591 +};
87592 +struct size_overflow_hash _001249_hash = {
87593 + .next = NULL,
87594 + .name = "svc_pool_map_alloc_arrays",
87595 + .file = "net/sunrpc/svc.c",
87596 + .param2 = 1,
87597 +};
87598 +struct size_overflow_hash _001250_hash = {
87599 + .next = NULL,
87600 + .name = "svc_setsockopt",
87601 + .file = "net/atm/svc.c",
87602 + .param5 = 1,
87603 +};
87604 +struct size_overflow_hash _001251_hash = {
87605 + .next = NULL,
87606 + .name = "t4_alloc_mem",
87607 + .file = "drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c",
87608 + .param1 = 1,
87609 +};
87610 +struct size_overflow_hash _001252_hash = {
87611 + .next = NULL,
87612 + .name = "tda10048_writeregbulk",
87613 + .file = "drivers/media/dvb/frontends/tda10048.c",
87614 + .param4 = 1,
87615 +};
87616 +struct size_overflow_hash _001253_hash = {
87617 + .next = NULL,
87618 + .name = "__team_options_register",
87619 + .file = "drivers/net/team/team.c",
87620 + .param3 = 1,
87621 +};
87622 +struct size_overflow_hash _001254_hash = {
87623 + .next = NULL,
87624 + .name = "tifm_alloc_adapter",
87625 + .file = "include/linux/tifm.h",
87626 + .param1 = 1,
87627 +};
87628 +struct size_overflow_hash _001255_hash = {
87629 + .next = NULL,
87630 + .name = "tipc_subseq_alloc",
87631 + .file = "net/tipc/name_table.c",
87632 + .param1 = 1,
87633 +};
87634 +struct size_overflow_hash _001256_hash = {
87635 + .next = NULL,
87636 + .name = "tm6000_read_write_usb",
87637 + .file = "drivers/media/video/tm6000/tm6000-core.c",
87638 + .param7 = 1,
87639 +};
87640 +struct size_overflow_hash _001257_hash = {
87641 + .next = NULL,
87642 + .name = "tower_write",
87643 + .file = "drivers/usb/misc/legousbtower.c",
87644 + .param3 = 1,
87645 +};
87646 +struct size_overflow_hash _001258_hash = {
87647 + .next = NULL,
87648 + .name = "trusted_instantiate",
87649 + .file = "security/keys/trusted.c",
87650 + .param3 = 1,
87651 +};
87652 +struct size_overflow_hash _001259_hash = {
87653 + .next = NULL,
87654 + .name = "trusted_update",
87655 + .file = "security/keys/trusted.c",
87656 + .param3 = 1,
87657 +};
87658 +struct size_overflow_hash _001260_hash = {
87659 + .next = NULL,
87660 + .name = "TSS_rawhmac",
87661 + .file = "security/keys/trusted.c",
87662 + .param3 = 1,
87663 +};
87664 +struct size_overflow_hash _001261_hash = {
87665 + .next = NULL,
87666 + .name = "tx_internal_desc_overflow_read",
87667 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87668 + .param3 = 1,
87669 +};
87670 +struct size_overflow_hash _001262_hash = {
87671 + .next = NULL,
87672 + .name = "tx_queue_len_read",
87673 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87674 + .param3 = 1,
87675 +};
87676 +struct size_overflow_hash _001263_hash = {
87677 + .next = NULL,
87678 + .name = "tx_queue_len_read",
87679 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
87680 + .param3 = 1,
87681 +};
87682 +struct size_overflow_hash _001264_hash = {
87683 + .next = NULL,
87684 + .name = "tx_queue_status_read",
87685 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87686 + .param3 = 1,
87687 +};
87688 +struct size_overflow_hash _001265_hash = {
87689 + .next = NULL,
87690 + .name = "udf_alloc_i_data",
87691 + .file = "fs/udf/inode.c",
87692 + .param2 = 1,
87693 +};
87694 +struct size_overflow_hash _001266_hash = {
87695 + .next = NULL,
87696 + .name = "udf_sb_alloc_partition_maps",
87697 + .file = "fs/udf/super.c",
87698 + .param2 = 1,
87699 +};
87700 +struct size_overflow_hash _001267_hash = {
87701 + .next = NULL,
87702 + .name = "uea_idma_write",
87703 + .file = "drivers/usb/atm/ueagle-atm.c",
87704 + .param3 = 1,
87705 +};
87706 +struct size_overflow_hash _001268_hash = {
87707 + .next = NULL,
87708 + .name = "uea_request",
87709 + .file = "drivers/usb/atm/ueagle-atm.c",
87710 + .param4 = 1,
87711 +};
87712 +struct size_overflow_hash _001269_hash = {
87713 + .next = NULL,
87714 + .name = "uea_send_modem_cmd",
87715 + .file = "drivers/usb/atm/ueagle-atm.c",
87716 + .param3 = 1,
87717 +};
87718 +struct size_overflow_hash _001270_hash = {
87719 + .next = NULL,
87720 + .name = "uhci_debug_read",
87721 + .file = "drivers/usb/host/uhci-debug.c",
87722 + .param3 = 1,
87723 +};
87724 +struct size_overflow_hash _001271_hash = {
87725 + .next = NULL,
87726 + .name = "uio_read",
87727 + .file = "drivers/uio/uio.c",
87728 + .param3 = 1,
87729 +};
87730 +struct size_overflow_hash _001272_hash = {
87731 + .next = NULL,
87732 + .name = "uio_write",
87733 + .file = "drivers/uio/uio.c",
87734 + .param3 = 1,
87735 +};
87736 +struct size_overflow_hash _001273_hash = {
87737 + .next = NULL,
87738 + .name = "um_idi_write",
87739 + .file = "drivers/isdn/hardware/eicon/divasi.c",
87740 + .param3 = 1,
87741 +};
87742 +struct size_overflow_hash _001274_hash = {
87743 + .next = NULL,
87744 + .name = "unlink_queued",
87745 + .file = "drivers/usb/misc/usbtest.c",
87746 + .param3 = 1,
87747 + .param4 = 1,
87748 +};
87749 +struct size_overflow_hash _001275_hash = {
87750 + .next = NULL,
87751 + .name = "us122l_ctl_msg",
87752 + .file = "sound/usb/usx2y/us122l.c",
87753 + .param8 = 1,
87754 +};
87755 +struct size_overflow_hash _001276_hash = {
87756 + .next = NULL,
87757 + .name = "usbdev_read",
87758 + .file = "drivers/usb/core/devio.c",
87759 + .param3 = 1,
87760 +};
87761 +struct size_overflow_hash _001277_hash = {
87762 + .next = NULL,
87763 + .name = "usblp_read",
87764 + .file = "drivers/usb/class/usblp.c",
87765 + .param3 = 1,
87766 +};
87767 +struct size_overflow_hash _001278_hash = {
87768 + .next = NULL,
87769 + .name = "usblp_write",
87770 + .file = "drivers/usb/class/usblp.c",
87771 + .param3 = 1,
87772 +};
87773 +struct size_overflow_hash _001279_hash = {
87774 + .next = NULL,
87775 + .name = "usbtest_alloc_urb",
87776 + .file = "drivers/usb/misc/usbtest.c",
87777 + .param3 = 1,
87778 + .param5 = 1,
87779 +};
87780 +struct size_overflow_hash _001281_hash = {
87781 + .next = NULL,
87782 + .name = "usbtmc_read",
87783 + .file = "drivers/usb/class/usbtmc.c",
87784 + .param3 = 1,
87785 +};
87786 +struct size_overflow_hash _001282_hash = {
87787 + .next = NULL,
87788 + .name = "usbtmc_write",
87789 + .file = "drivers/usb/class/usbtmc.c",
87790 + .param3 = 1,
87791 +};
87792 +struct size_overflow_hash _001283_hash = {
87793 + .next = NULL,
87794 + .name = "usbvision_v4l2_read",
87795 + .file = "drivers/media/video/usbvision/usbvision-video.c",
87796 + .param3 = 1,
87797 +};
87798 +struct size_overflow_hash _001284_hash = {
87799 + .next = NULL,
87800 + .name = "uvc_alloc_buffers",
87801 + .file = "drivers/usb/gadget/uvc_queue.c",
87802 + .param2 = 1,
87803 +};
87804 +struct size_overflow_hash _001285_hash = {
87805 + .next = NULL,
87806 + .name = "uvc_alloc_entity",
87807 + .file = "drivers/media/video/uvc/uvc_driver.c",
87808 + .param3 = 1,
87809 +};
87810 +struct size_overflow_hash _001286_hash = {
87811 + .next = NULL,
87812 + .name = "uvc_debugfs_stats_read",
87813 + .file = "drivers/media/video/uvc/uvc_debugfs.c",
87814 + .param3 = 1,
87815 +};
87816 +struct size_overflow_hash _001287_hash = {
87817 + .next = NULL,
87818 + .name = "uvc_simplify_fraction",
87819 + .file = "drivers/media/video/uvc/uvc_driver.c",
87820 + .param3 = 1,
87821 +};
87822 +struct size_overflow_hash _001288_hash = {
87823 + .next = NULL,
87824 + .name = "uwb_rc_neh_grok_event",
87825 + .file = "drivers/uwb/neh.c",
87826 + .param3 = 1,
87827 +};
87828 +struct size_overflow_hash _001289_hash = {
87829 + .next = NULL,
87830 + .name = "v4l2_event_subscribe",
87831 + .file = "include/media/v4l2-event.h",
87832 + .param3 = 1,
87833 +};
87834 +struct size_overflow_hash _001290_hash = {
87835 + .next = NULL,
87836 + .name = "v4l_stk_read",
87837 + .file = "drivers/media/video/stk-webcam.c",
87838 + .param3 = 1,
87839 +};
87840 +struct size_overflow_hash _001291_hash = {
87841 + .next = NULL,
87842 + .name = "__vb2_perform_fileio",
87843 + .file = "drivers/media/video/videobuf2-core.c",
87844 + .param3 = 1,
87845 +};
87846 +struct size_overflow_hash _001292_hash = {
87847 + .next = NULL,
87848 + .name = "vdma_mem_alloc",
87849 + .file = "arch/x86/include/asm/floppy.h",
87850 + .param1 = 1,
87851 +};
87852 +struct size_overflow_hash _001293_hash = {
87853 + .next = NULL,
87854 + .name = "vfd_write",
87855 + .file = "drivers/media/rc/imon.c",
87856 + .param3 = 1,
87857 +};
87858 +struct size_overflow_hash _001294_hash = {
87859 + .next = NULL,
87860 + .name = "vhci_get_user",
87861 + .file = "drivers/bluetooth/hci_vhci.c",
87862 + .param3 = 1,
87863 +};
87864 +struct size_overflow_hash _001295_hash = {
87865 + .next = NULL,
87866 + .name = "__vhost_add_used_n",
87867 + .file = "drivers/vhost/vhost.c",
87868 + .param3 = 1,
87869 +};
87870 +struct size_overflow_hash _001296_hash = {
87871 + .next = NULL,
87872 + .name = "__videobuf_alloc_vb",
87873 + .file = "drivers/media/video/videobuf-dma-sg.c",
87874 + .param1 = 1,
87875 +};
87876 +struct size_overflow_hash _001297_hash = {
87877 + .next = NULL,
87878 + .name = "__videobuf_alloc_vb",
87879 + .file = "drivers/media/video/videobuf-dma-contig.c",
87880 + .param1 = 1,
87881 +};
87882 +struct size_overflow_hash _001298_hash = {
87883 + .next = NULL,
87884 + .name = "__videobuf_alloc_vb",
87885 + .file = "drivers/media/video/videobuf-vmalloc.c",
87886 + .param1 = 1,
87887 +};
87888 +struct size_overflow_hash _001299_hash = {
87889 + .next = NULL,
87890 + .name = "__videobuf_copy_to_user",
87891 + .file = "drivers/media/video/videobuf-core.c",
87892 + .param4 = 1,
87893 +};
87894 +struct size_overflow_hash _001300_hash = {
87895 + .next = NULL,
87896 + .name = "video_proc_write",
87897 + .file = "drivers/platform/x86/toshiba_acpi.c",
87898 + .param3 = 1,
87899 +};
87900 +struct size_overflow_hash _001301_hash = {
87901 + .next = NULL,
87902 + .name = "vifs_state_read",
87903 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
87904 + .param3 = 1,
87905 +};
87906 +struct size_overflow_hash _001302_hash = {
87907 + .next = NULL,
87908 + .name = "vlsi_alloc_ring",
87909 + .file = "drivers/net/irda/vlsi_ir.c",
87910 + .param3 = 1,
87911 + .param4 = 1,
87912 +};
87913 +struct size_overflow_hash _001304_hash = {
87914 + .next = NULL,
87915 + .name = "vol_cdev_direct_write",
87916 + .file = "drivers/mtd/ubi/cdev.c",
87917 + .param3 = 1,
87918 +};
87919 +struct size_overflow_hash _001305_hash = {
87920 + .next = NULL,
87921 + .name = "vol_cdev_read",
87922 + .file = "drivers/mtd/ubi/cdev.c",
87923 + .param3 = 1,
87924 +};
87925 +struct size_overflow_hash _001306_hash = {
87926 + .next = NULL,
87927 + .name = "vring_add_indirect",
87928 + .file = "drivers/virtio/virtio_ring.c",
87929 + .param3 = 1,
87930 + .param4 = 1,
87931 +};
87932 +struct size_overflow_hash _001308_hash = {
87933 + .next = NULL,
87934 + .name = "vring_new_virtqueue",
87935 + .file = "include/linux/virtio_ring.h",
87936 + .param1 = 1,
87937 +};
87938 +struct size_overflow_hash _001309_hash = {
87939 + .next = NULL,
87940 + .name = "__vxge_hw_channel_allocate",
87941 + .file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
87942 + .param3 = 1,
87943 +};
87944 +struct size_overflow_hash _001310_hash = {
87945 + .next = NULL,
87946 + .name = "vxge_os_dma_malloc",
87947 + .file = "drivers/net/ethernet/neterion/vxge/vxge-config.h",
87948 + .param2 = 1,
87949 +};
87950 +struct size_overflow_hash _001311_hash = {
87951 + .next = NULL,
87952 + .name = "vxge_os_dma_malloc_async",
87953 + .file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
87954 + .param3 = 1,
87955 +};
87956 +struct size_overflow_hash _001312_hash = {
87957 + .next = NULL,
87958 + .name = "w9966_v4l_read",
87959 + .file = "drivers/media/video/w9966.c",
87960 + .param3 = 1,
87961 +};
87962 +struct size_overflow_hash _001313_hash = {
87963 + .next = NULL,
87964 + .name = "waiters_read",
87965 + .file = "fs/dlm/debug_fs.c",
87966 + .param3 = 1,
87967 +};
87968 +struct size_overflow_hash _001314_hash = {
87969 + .next = NULL,
87970 + .name = "wa_nep_queue",
87971 + .file = "drivers/usb/wusbcore/wa-nep.c",
87972 + .param2 = 1,
87973 +};
87974 +struct size_overflow_hash _001315_hash = {
87975 + .next = NULL,
87976 + .name = "__wa_xfer_setup_segs",
87977 + .file = "drivers/usb/wusbcore/wa-xfer.c",
87978 + .param2 = 1,
87979 +};
87980 +struct size_overflow_hash _001316_hash = {
87981 + .next = NULL,
87982 + .name = "wdm_read",
87983 + .file = "drivers/usb/class/cdc-wdm.c",
87984 + .param3 = 1,
87985 +};
87986 +struct size_overflow_hash _001317_hash = {
87987 + .next = NULL,
87988 + .name = "wdm_write",
87989 + .file = "drivers/usb/class/cdc-wdm.c",
87990 + .param3 = 1,
87991 +};
87992 +struct size_overflow_hash _001318_hash = {
87993 + .next = NULL,
87994 + .name = "wep_addr_key_count_read",
87995 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87996 + .param3 = 1,
87997 +};
87998 +struct size_overflow_hash _001319_hash = {
87999 + .next = &_000480_hash,
88000 + .name = "wep_decrypt_fail_read",
88001 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88002 + .param3 = 1,
88003 +};
88004 +struct size_overflow_hash _001320_hash = {
88005 + .next = NULL,
88006 + .name = "wep_default_key_count_read",
88007 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88008 + .param3 = 1,
88009 +};
88010 +struct size_overflow_hash _001321_hash = {
88011 + .next = NULL,
88012 + .name = "wep_interrupt_read",
88013 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88014 + .param3 = 1,
88015 +};
88016 +struct size_overflow_hash _001322_hash = {
88017 + .next = NULL,
88018 + .name = "wep_key_not_found_read",
88019 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88020 + .param3 = 1,
88021 +};
88022 +struct size_overflow_hash _001323_hash = {
88023 + .next = NULL,
88024 + .name = "wep_packets_read",
88025 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88026 + .param3 = 1,
88027 +};
88028 +struct size_overflow_hash _001324_hash = {
88029 + .next = NULL,
88030 + .name = "wiimote_hid_send",
88031 + .file = "drivers/hid/hid-wiimote-core.c",
88032 + .param3 = 1,
88033 +};
88034 +struct size_overflow_hash _001325_hash = {
88035 + .next = NULL,
88036 + .name = "wl1271_format_buffer",
88037 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88038 + .param2 = 1,
88039 +};
88040 +struct size_overflow_hash _001326_hash = {
88041 + .next = NULL,
88042 + .name = "wl1273_fm_fops_write",
88043 + .file = "drivers/media/radio/radio-wl1273.c",
88044 + .param3 = 1,
88045 +};
88046 +struct size_overflow_hash _001327_hash = {
88047 + .next = NULL,
88048 + .name = "wlc_phy_loadsampletable_nphy",
88049 + .file = "drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c",
88050 + .param3 = 1,
88051 +};
88052 +struct size_overflow_hash _001328_hash = {
88053 + .next = NULL,
88054 + .name = "wpan_phy_alloc",
88055 + .file = "include/net/wpan-phy.h",
88056 + .param1 = 1,
88057 +};
88058 +struct size_overflow_hash _001329_hash = {
88059 + .next = NULL,
88060 + .name = "write_flush",
88061 + .file = "net/sunrpc/cache.c",
88062 + .param3 = 1,
88063 +};
88064 +struct size_overflow_hash _001330_hash = {
88065 + .next = NULL,
88066 + .name = "write_rio",
88067 + .file = "drivers/usb/misc/rio500.c",
88068 + .param3 = 1,
88069 +};
88070 +struct size_overflow_hash _001331_hash = {
88071 + .next = NULL,
88072 + .name = "wusb_ccm_mac",
88073 + .file = "drivers/usb/wusbcore/crypto.c",
88074 + .param7 = 1,
88075 +};
88076 +struct size_overflow_hash _001332_hash = {
88077 + .next = NULL,
88078 + .name = "xfs_attrmulti_attr_set",
88079 + .file = "fs/xfs/xfs_ioctl.c",
88080 + .param4 = 1,
88081 +};
88082 +struct size_overflow_hash _001333_hash = {
88083 + .next = NULL,
88084 + .name = "xfs_handle_to_dentry",
88085 + .file = "fs/xfs/xfs_ioctl.c",
88086 + .param3 = 1,
88087 +};
88088 +struct size_overflow_hash _001334_hash = {
88089 + .next = NULL,
88090 + .name = "xhci_alloc_stream_info",
88091 + .file = "drivers/usb/host/xhci-mem.c",
88092 + .param3 = 1,
88093 +};
88094 +struct size_overflow_hash _001335_hash = {
88095 + .next = NULL,
88096 + .name = "xprt_alloc",
88097 + .file = "include/linux/sunrpc/xprt.h",
88098 + .param2 = 1,
88099 +};
88100 +struct size_overflow_hash _001336_hash = {
88101 + .next = NULL,
88102 + .name = "xprt_rdma_allocate",
88103 + .file = "net/sunrpc/xprtrdma/transport.c",
88104 + .param2 = 1,
88105 +};
88106 +struct size_overflow_hash _001337_hash = {
88107 + .next = NULL,
88108 + .name = "xt_alloc_table_info",
88109 + .file = "include/linux/netfilter/x_tables.h",
88110 + .param1 = 1,
88111 +};
88112 +struct size_overflow_hash _001338_hash = {
88113 + .next = NULL,
88114 + .name = "zd_usb_iowrite16v_async",
88115 + .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
88116 + .param3 = 1,
88117 +};
88118 +struct size_overflow_hash _001339_hash = {
88119 + .next = NULL,
88120 + .name = "zd_usb_read_fw",
88121 + .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
88122 + .param4 = 1,
88123 +};
88124 +struct size_overflow_hash _001340_hash = {
88125 + .next = NULL,
88126 + .name = "zoran_write",
88127 + .file = "drivers/media/video/zoran/zoran_procfs.c",
88128 + .param3 = 1,
88129 +};
88130 +struct size_overflow_hash _001341_hash = {
88131 + .next = NULL,
88132 + .name = "ad7879_spi_multi_read",
88133 + .file = "drivers/input/touchscreen/ad7879-spi.c",
88134 + .param3 = 1,
88135 +};
88136 +struct size_overflow_hash _001342_hash = {
88137 + .next = NULL,
88138 + .name = "aes_decrypt_fail_read",
88139 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88140 + .param3 = 1,
88141 +};
88142 +struct size_overflow_hash _001343_hash = {
88143 + .next = NULL,
88144 + .name = "aes_decrypt_interrupt_read",
88145 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88146 + .param3 = 1,
88147 +};
88148 +struct size_overflow_hash _001344_hash = {
88149 + .next = NULL,
88150 + .name = "aes_decrypt_packets_read",
88151 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88152 + .param3 = 1,
88153 +};
88154 +struct size_overflow_hash _001345_hash = {
88155 + .next = NULL,
88156 + .name = "aes_encrypt_fail_read",
88157 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88158 + .param3 = 1,
88159 +};
88160 +struct size_overflow_hash _001346_hash = {
88161 + .next = NULL,
88162 + .name = "aes_encrypt_interrupt_read",
88163 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88164 + .param3 = 1,
88165 +};
88166 +struct size_overflow_hash _001347_hash = {
88167 + .next = NULL,
88168 + .name = "aes_encrypt_packets_read",
88169 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88170 + .param3 = 1,
88171 +};
88172 +struct size_overflow_hash _001348_hash = {
88173 + .next = NULL,
88174 + .name = "afs_cell_create",
88175 + .file = "fs/afs/cell.c",
88176 + .param2 = 1,
88177 +};
88178 +struct size_overflow_hash _001349_hash = {
88179 + .next = NULL,
88180 + .name = "agp_create_user_memory",
88181 + .file = "drivers/char/agp/generic.c",
88182 + .param1 = 1,
88183 +};
88184 +struct size_overflow_hash _001350_hash = {
88185 + .next = NULL,
88186 + .name = "alg_setsockopt",
88187 + .file = "crypto/af_alg.c",
88188 + .param5 = 1,
88189 +};
88190 +struct size_overflow_hash _001351_hash = {
88191 + .next = NULL,
88192 + .name = "alloc_targets",
88193 + .file = "drivers/md/dm-table.c",
88194 + .param2 = 1,
88195 +};
88196 +struct size_overflow_hash _001352_hash = {
88197 + .next = NULL,
88198 + .name = "aoechr_write",
88199 + .file = "drivers/block/aoe/aoechr.c",
88200 + .param3 = 1,
88201 +};
88202 +struct size_overflow_hash _001353_hash = {
88203 + .next = NULL,
88204 + .name = "ath6kl_cfg80211_connect_event",
88205 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
88206 + .param7 = 1,
88207 + .param9 = 1,
88208 + .param8 = 1,
88209 +};
88210 +struct size_overflow_hash _001356_hash = {
88211 + .next = NULL,
88212 + .name = "ath6kl_mgmt_tx",
88213 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
88214 + .param9 = 1,
88215 +};
88216 +struct size_overflow_hash _001357_hash = {
88217 + .next = NULL,
88218 + .name = "atomic_read_file",
88219 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
88220 + .param3 = 1,
88221 +};
88222 +struct size_overflow_hash _001358_hash = {
88223 + .next = NULL,
88224 + .name = "beacon_interval_read",
88225 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88226 + .param3 = 1,
88227 +};
88228 +struct size_overflow_hash _001359_hash = {
88229 + .next = NULL,
88230 + .name = "bm_entry_write",
88231 + .file = "fs/binfmt_misc.c",
88232 + .param3 = 1,
88233 +};
88234 +struct size_overflow_hash _001360_hash = {
88235 + .next = NULL,
88236 + .name = "bm_init",
88237 + .file = "lib/ts_bm.c",
88238 + .param2 = 1,
88239 +};
88240 +struct size_overflow_hash _001361_hash = {
88241 + .next = NULL,
88242 + .name = "bm_register_write",
88243 + .file = "fs/binfmt_misc.c",
88244 + .param3 = 1,
88245 +};
88246 +struct size_overflow_hash _001362_hash = {
88247 + .next = NULL,
88248 + .name = "bm_status_write",
88249 + .file = "fs/binfmt_misc.c",
88250 + .param3 = 1,
88251 +};
88252 +struct size_overflow_hash _001363_hash = {
88253 + .next = NULL,
88254 + .name = "brn_proc_write",
88255 + .file = "drivers/platform/x86/asus_acpi.c",
88256 + .param3 = 1,
88257 +};
88258 +struct size_overflow_hash _001364_hash = {
88259 + .next = NULL,
88260 + .name = "btrfs_map_block",
88261 + .file = "fs/btrfs/volumes.c",
88262 + .param3 = 1,
88263 +};
88264 +struct size_overflow_hash _001365_hash = {
88265 + .next = NULL,
88266 + .name = "cache_downcall",
88267 + .file = "net/sunrpc/cache.c",
88268 + .param3 = 1,
88269 +};
88270 +struct size_overflow_hash _001366_hash = {
88271 + .next = NULL,
88272 + .name = "cache_slow_downcall",
88273 + .file = "net/sunrpc/cache.c",
88274 + .param2 = 1,
88275 +};
88276 +struct size_overflow_hash _001367_hash = {
88277 + .next = NULL,
88278 + .name = "ceph_dns_resolve_name",
88279 + .file = "net/ceph/messenger.c",
88280 + .param1 = 1,
88281 +};
88282 +struct size_overflow_hash _001368_hash = {
88283 + .next = NULL,
88284 + .name = "cfg80211_roamed",
88285 + .file = "include/net/cfg80211.h",
88286 + .param5 = 1,
88287 + .param7 = 1,
88288 +};
88289 +struct size_overflow_hash _001370_hash = {
88290 + .next = NULL,
88291 + .name = "cifs_readv_from_socket",
88292 + .file = "fs/cifs/connect.c",
88293 + .param3 = 1,
88294 +};
88295 +struct size_overflow_hash _001371_hash = {
88296 + .next = NULL,
88297 + .name = "configfs_write_file",
88298 + .file = "fs/configfs/file.c",
88299 + .param3 = 1,
88300 +};
88301 +struct size_overflow_hash _001372_hash = {
88302 + .next = &_001370_hash,
88303 + .name = "cpu_type_read",
88304 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88305 + .param3 = 1,
88306 +};
88307 +struct size_overflow_hash _001373_hash = {
88308 + .next = NULL,
88309 + .name = "cx18_copy_mdl_to_user",
88310 + .file = "drivers/media/video/cx18/cx18-fileops.c",
88311 + .param4 = 1,
88312 +};
88313 +struct size_overflow_hash _001374_hash = {
88314 + .next = NULL,
88315 + .name = "cxgbi_ddp_reserve",
88316 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
88317 + .param4 = 1,
88318 +};
88319 +struct size_overflow_hash _001375_hash = {
88320 + .next = NULL,
88321 + .name = "cxgbi_device_portmap_create",
88322 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
88323 + .param3 = 1,
88324 +};
88325 +struct size_overflow_hash _001376_hash = {
88326 + .next = NULL,
88327 + .name = "datablob_hmac_append",
88328 + .file = "security/keys/encrypted-keys/encrypted.c",
88329 + .param3 = 1,
88330 +};
88331 +struct size_overflow_hash _001377_hash = {
88332 + .next = NULL,
88333 + .name = "datablob_hmac_verify",
88334 + .file = "security/keys/encrypted-keys/encrypted.c",
88335 + .param4 = 1,
88336 +};
88337 +struct size_overflow_hash _001378_hash = {
88338 + .next = NULL,
88339 + .name = "dataflash_read_fact_otp",
88340 + .file = "drivers/mtd/devices/mtd_dataflash.c",
88341 + .param3 = 1,
88342 +};
88343 +struct size_overflow_hash _001379_hash = {
88344 + .next = NULL,
88345 + .name = "dataflash_read_user_otp",
88346 + .file = "drivers/mtd/devices/mtd_dataflash.c",
88347 + .param3 = 1,
88348 +};
88349 +struct size_overflow_hash _001380_hash = {
88350 + .next = NULL,
88351 + .name = "depth_read",
88352 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88353 + .param3 = 1,
88354 +};
88355 +struct size_overflow_hash _001381_hash = {
88356 + .next = NULL,
88357 + .name = "depth_write",
88358 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88359 + .param3 = 1,
88360 +};
88361 +struct size_overflow_hash _001382_hash = {
88362 + .next = NULL,
88363 + .name = "dev_irnet_write",
88364 + .file = "net/irda/irnet/irnet_ppp.c",
88365 + .param3 = 1,
88366 +};
88367 +struct size_overflow_hash _001383_hash = {
88368 + .next = NULL,
88369 + .name = "dev_write",
88370 + .file = "sound/oss/msnd_pinnacle.c",
88371 + .param3 = 1,
88372 +};
88373 +struct size_overflow_hash _001384_hash = {
88374 + .next = NULL,
88375 + .name = "dfs_file_read",
88376 + .file = "fs/ubifs/debug.c",
88377 + .param3 = 1,
88378 +};
88379 +struct size_overflow_hash _001385_hash = {
88380 + .next = NULL,
88381 + .name = "dfs_file_write",
88382 + .file = "fs/ubifs/debug.c",
88383 + .param3 = 1,
88384 +};
88385 +struct size_overflow_hash _001386_hash = {
88386 + .next = NULL,
88387 + .name = "dfs_global_file_read",
88388 + .file = "fs/ubifs/debug.c",
88389 + .param3 = 1,
88390 +};
88391 +struct size_overflow_hash _001387_hash = {
88392 + .next = NULL,
88393 + .name = "dfs_global_file_write",
88394 + .file = "fs/ubifs/debug.c",
88395 + .param3 = 1,
88396 +};
88397 +struct size_overflow_hash _001388_hash = {
88398 + .next = NULL,
88399 + .name = "disconnect",
88400 + .file = "net/bluetooth/mgmt.c",
88401 + .param4 = 1,
88402 +};
88403 +struct size_overflow_hash _001389_hash = {
88404 + .next = NULL,
88405 + .name = "disp_proc_write",
88406 + .file = "drivers/platform/x86/asus_acpi.c",
88407 + .param3 = 1,
88408 +};
88409 +struct size_overflow_hash _001390_hash = {
88410 + .next = NULL,
88411 + .name = "dma_rx_errors_read",
88412 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88413 + .param3 = 1,
88414 +};
88415 +struct size_overflow_hash _001391_hash = {
88416 + .next = NULL,
88417 + .name = "dma_rx_requested_read",
88418 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88419 + .param3 = 1,
88420 +};
88421 +struct size_overflow_hash _001392_hash = {
88422 + .next = NULL,
88423 + .name = "dma_tx_errors_read",
88424 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88425 + .param3 = 1,
88426 +};
88427 +struct size_overflow_hash _001393_hash = {
88428 + .next = NULL,
88429 + .name = "dma_tx_requested_read",
88430 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88431 + .param3 = 1,
88432 +};
88433 +struct size_overflow_hash _001394_hash = {
88434 + .next = NULL,
88435 + .name = "dm_exception_table_init",
88436 + .file = "drivers/md/dm-snap.c",
88437 + .param2 = 1,
88438 +};
88439 +struct size_overflow_hash _001395_hash = {
88440 + .next = NULL,
88441 + .name = "do_dccp_setsockopt",
88442 + .file = "net/dccp/proto.c",
88443 + .param5 = 1,
88444 +};
88445 +struct size_overflow_hash _001396_hash = {
88446 + .next = NULL,
88447 + .name = "dtim_interval_read",
88448 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88449 + .param3 = 1,
88450 +};
88451 +struct size_overflow_hash _001397_hash = {
88452 + .next = NULL,
88453 + .name = "dvb_audio_write",
88454 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
88455 + .param3 = 1,
88456 +};
88457 +struct size_overflow_hash _001398_hash = {
88458 + .next = NULL,
88459 + .name = "dvb_demux_do_ioctl",
88460 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
88461 + .param3 = 1,
88462 +};
88463 +struct size_overflow_hash _001399_hash = {
88464 + .next = NULL,
88465 + .name = "dvb_dvr_do_ioctl",
88466 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
88467 + .param3 = 1,
88468 +};
88469 +struct size_overflow_hash _001400_hash = {
88470 + .next = NULL,
88471 + .name = "dvb_video_write",
88472 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
88473 + .param3 = 1,
88474 +};
88475 +struct size_overflow_hash _001401_hash = {
88476 + .next = NULL,
88477 + .name = "ecryptfs_decode_and_decrypt_filename",
88478 + .file = "fs/ecryptfs/crypto.c",
88479 + .param5 = 1,
88480 +};
88481 +struct size_overflow_hash _001402_hash = {
88482 + .next = NULL,
88483 + .name = "ecryptfs_encrypt_and_encode_filename",
88484 + .file = "fs/ecryptfs/crypto.c",
88485 + .param6 = 1,
88486 +};
88487 +struct size_overflow_hash _001403_hash = {
88488 + .next = NULL,
88489 + .name = "enable_read",
88490 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88491 + .param3 = 1,
88492 +};
88493 +struct size_overflow_hash _001404_hash = {
88494 + .next = NULL,
88495 + .name = "enable_write",
88496 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
88497 + .param3 = 1,
88498 +};
88499 +struct size_overflow_hash _001405_hash = {
88500 + .next = NULL,
88501 + .name = "event_calibration_read",
88502 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88503 + .param3 = 1,
88504 +};
88505 +struct size_overflow_hash _001406_hash = {
88506 + .next = NULL,
88507 + .name = "event_heart_beat_read",
88508 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88509 + .param3 = 1,
88510 +};
88511 +struct size_overflow_hash _001407_hash = {
88512 + .next = NULL,
88513 + .name = "event_oom_late_read",
88514 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88515 + .param3 = 1,
88516 +};
88517 +struct size_overflow_hash _001408_hash = {
88518 + .next = NULL,
88519 + .name = "event_phy_transmit_error_read",
88520 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88521 + .param3 = 1,
88522 +};
88523 +struct size_overflow_hash _001409_hash = {
88524 + .next = NULL,
88525 + .name = "event_rx_mem_empty_read",
88526 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88527 + .param3 = 1,
88528 +};
88529 +struct size_overflow_hash _001410_hash = {
88530 + .next = NULL,
88531 + .name = "event_rx_mismatch_read",
88532 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88533 + .param3 = 1,
88534 +};
88535 +struct size_overflow_hash _001411_hash = {
88536 + .next = NULL,
88537 + .name = "event_rx_pool_read",
88538 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88539 + .param3 = 1,
88540 +};
88541 +struct size_overflow_hash _001412_hash = {
88542 + .next = NULL,
88543 + .name = "event_tx_stuck_read",
88544 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88545 + .param3 = 1,
88546 +};
88547 +struct size_overflow_hash _001413_hash = {
88548 + .next = NULL,
88549 + .name = "excessive_retries_read",
88550 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88551 + .param3 = 1,
88552 +};
88553 +struct size_overflow_hash _001414_hash = {
88554 + .next = NULL,
88555 + .name = "exofs_read_kern",
88556 + .file = "fs/exofs/super.c",
88557 + .param6 = 1,
88558 +};
88559 +struct size_overflow_hash _001415_hash = {
88560 + .next = NULL,
88561 + .name = "fallback_on_nodma_alloc",
88562 + .file = "drivers/block/floppy.c",
88563 + .param2 = 1,
88564 +};
88565 +struct size_overflow_hash _001416_hash = {
88566 + .next = NULL,
88567 + .name = "__feat_register_sp",
88568 + .file = "net/dccp/feat.c",
88569 + .param6 = 1,
88570 +};
88571 +struct size_overflow_hash _001417_hash = {
88572 + .next = NULL,
88573 + .name = "ffs_ep0_write",
88574 + .file = "drivers/usb/gadget/f_fs.c",
88575 + .param3 = 1,
88576 +};
88577 +struct size_overflow_hash _001418_hash = {
88578 + .next = NULL,
88579 + .name = "ffs_epfile_read",
88580 + .file = "drivers/usb/gadget/f_fs.c",
88581 + .param3 = 1,
88582 +};
88583 +struct size_overflow_hash _001419_hash = {
88584 + .next = NULL,
88585 + .name = "ffs_epfile_write",
88586 + .file = "drivers/usb/gadget/f_fs.c",
88587 + .param3 = 1,
88588 +};
88589 +struct size_overflow_hash _001420_hash = {
88590 + .next = NULL,
88591 + .name = "frequency_read",
88592 + .file = "net/mac80211/debugfs.c",
88593 + .param3 = 1,
88594 +};
88595 +struct size_overflow_hash _001421_hash = {
88596 + .next = NULL,
88597 + .name = "fsm_init",
88598 + .file = "lib/ts_fsm.c",
88599 + .param2 = 1,
88600 +};
88601 +struct size_overflow_hash _001422_hash = {
88602 + .next = NULL,
88603 + .name = "garmin_read_process",
88604 + .file = "drivers/usb/serial/garmin_gps.c",
88605 + .param3 = 1,
88606 +};
88607 +struct size_overflow_hash _001423_hash = {
88608 + .next = NULL,
88609 + .name = "garp_request_join",
88610 + .file = "include/net/garp.h",
88611 + .param4 = 1,
88612 +};
88613 +struct size_overflow_hash _001424_hash = {
88614 + .next = NULL,
88615 + .name = "hcd_alloc_coherent",
88616 + .file = "drivers/usb/core/hcd.c",
88617 + .param5 = 1,
88618 +};
88619 +struct size_overflow_hash _001425_hash = {
88620 + .next = NULL,
88621 + .name = "hci_sock_sendmsg",
88622 + .file = "net/bluetooth/hci_sock.c",
88623 + .param4 = 1,
88624 +};
88625 +struct size_overflow_hash _001426_hash = {
88626 + .next = NULL,
88627 + .name = "__hwahc_op_set_gtk",
88628 + .file = "drivers/usb/host/hwa-hc.c",
88629 + .param4 = 1,
88630 +};
88631 +struct size_overflow_hash _001427_hash = {
88632 + .next = NULL,
88633 + .name = "__hwahc_op_set_ptk",
88634 + .file = "drivers/usb/host/hwa-hc.c",
88635 + .param5 = 1,
88636 +};
88637 +struct size_overflow_hash _001428_hash = {
88638 + .next = NULL,
88639 + .name = "ib_send_cm_drep",
88640 + .file = "include/rdma/ib_cm.h",
88641 + .param3 = 1,
88642 +};
88643 +struct size_overflow_hash _001429_hash = {
88644 + .next = NULL,
88645 + .name = "ib_send_cm_mra",
88646 + .file = "include/rdma/ib_cm.h",
88647 + .param4 = 1,
88648 +};
88649 +struct size_overflow_hash _001430_hash = {
88650 + .next = NULL,
88651 + .name = "ib_send_cm_rtu",
88652 + .file = "include/rdma/ib_cm.h",
88653 + .param3 = 1,
88654 +};
88655 +struct size_overflow_hash _001431_hash = {
88656 + .next = NULL,
88657 + .name = "ieee80211_bss_info_update",
88658 + .file = "net/mac80211/scan.c",
88659 + .param4 = 1,
88660 +};
88661 +struct size_overflow_hash _001432_hash = {
88662 + .next = NULL,
88663 + .name = "ieee80211_if_read_aid",
88664 + .file = "net/mac80211/debugfs_netdev.c",
88665 + .param3 = 1,
88666 +};
88667 +struct size_overflow_hash _001433_hash = {
88668 + .next = NULL,
88669 + .name = "ieee80211_if_read_auto_open_plinks",
88670 + .file = "net/mac80211/debugfs_netdev.c",
88671 + .param3 = 1,
88672 +};
88673 +struct size_overflow_hash _001434_hash = {
88674 + .next = NULL,
88675 + .name = "ieee80211_if_read_ave_beacon",
88676 + .file = "net/mac80211/debugfs_netdev.c",
88677 + .param3 = 1,
88678 +};
88679 +struct size_overflow_hash _001435_hash = {
88680 + .next = NULL,
88681 + .name = "ieee80211_if_read_bssid",
88682 + .file = "net/mac80211/debugfs_netdev.c",
88683 + .param3 = 1,
88684 +};
88685 +struct size_overflow_hash _001436_hash = {
88686 + .next = NULL,
88687 + .name = "ieee80211_if_read_channel_type",
88688 + .file = "net/mac80211/debugfs_netdev.c",
88689 + .param3 = 1,
88690 +};
88691 +struct size_overflow_hash _001437_hash = {
88692 + .next = NULL,
88693 + .name = "ieee80211_if_read_dot11MeshConfirmTimeout",
88694 + .file = "net/mac80211/debugfs_netdev.c",
88695 + .param3 = 1,
88696 +};
88697 +struct size_overflow_hash _001438_hash = {
88698 + .next = NULL,
88699 + .name = "ieee80211_if_read_dot11MeshGateAnnouncementProtocol",
88700 + .file = "net/mac80211/debugfs_netdev.c",
88701 + .param3 = 1,
88702 +};
88703 +struct size_overflow_hash _001439_hash = {
88704 + .next = NULL,
88705 + .name = "ieee80211_if_read_dot11MeshHoldingTimeout",
88706 + .file = "net/mac80211/debugfs_netdev.c",
88707 + .param3 = 1,
88708 +};
88709 +struct size_overflow_hash _001440_hash = {
88710 + .next = NULL,
88711 + .name = "ieee80211_if_read_dot11MeshHWMPactivePathTimeout",
88712 + .file = "net/mac80211/debugfs_netdev.c",
88713 + .param3 = 1,
88714 +};
88715 +struct size_overflow_hash _001441_hash = {
88716 + .next = NULL,
88717 + .name = "ieee80211_if_read_dot11MeshHWMPmaxPREQretries",
88718 + .file = "net/mac80211/debugfs_netdev.c",
88719 + .param3 = 1,
88720 +};
88721 +struct size_overflow_hash _001442_hash = {
88722 + .next = NULL,
88723 + .name = "ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime",
88724 + .file = "net/mac80211/debugfs_netdev.c",
88725 + .param3 = 1,
88726 +};
88727 +struct size_overflow_hash _001443_hash = {
88728 + .next = NULL,
88729 + .name = "ieee80211_if_read_dot11MeshHWMPperrMinInterval",
88730 + .file = "net/mac80211/debugfs_netdev.c",
88731 + .param3 = 1,
88732 +};
88733 +struct size_overflow_hash _001444_hash = {
88734 + .next = NULL,
88735 + .name = "ieee80211_if_read_dot11MeshHWMPpreqMinInterval",
88736 + .file = "net/mac80211/debugfs_netdev.c",
88737 + .param3 = 1,
88738 +};
88739 +struct size_overflow_hash _001445_hash = {
88740 + .next = NULL,
88741 + .name = "ieee80211_if_read_dot11MeshHWMPRannInterval",
88742 + .file = "net/mac80211/debugfs_netdev.c",
88743 + .param3 = 1,
88744 +};
88745 +struct size_overflow_hash _001446_hash = {
88746 + .next = NULL,
88747 + .name = "ieee80211_if_read_dot11MeshHWMPRootMode",
88748 + .file = "net/mac80211/debugfs_netdev.c",
88749 + .param3 = 1,
88750 +};
88751 +struct size_overflow_hash _001447_hash = {
88752 + .next = NULL,
88753 + .name = "ieee80211_if_read_dot11MeshMaxPeerLinks",
88754 + .file = "net/mac80211/debugfs_netdev.c",
88755 + .param3 = 1,
88756 +};
88757 +struct size_overflow_hash _001448_hash = {
88758 + .next = NULL,
88759 + .name = "ieee80211_if_read_dot11MeshMaxRetries",
88760 + .file = "net/mac80211/debugfs_netdev.c",
88761 + .param3 = 1,
88762 +};
88763 +struct size_overflow_hash _001449_hash = {
88764 + .next = NULL,
88765 + .name = "ieee80211_if_read_dot11MeshRetryTimeout",
88766 + .file = "net/mac80211/debugfs_netdev.c",
88767 + .param3 = 1,
88768 +};
88769 +struct size_overflow_hash _001450_hash = {
88770 + .next = NULL,
88771 + .name = "ieee80211_if_read_dot11MeshTTL",
88772 + .file = "net/mac80211/debugfs_netdev.c",
88773 + .param3 = 1,
88774 +};
88775 +struct size_overflow_hash _001451_hash = {
88776 + .next = NULL,
88777 + .name = "ieee80211_if_read_dropped_frames_congestion",
88778 + .file = "net/mac80211/debugfs_netdev.c",
88779 + .param3 = 1,
88780 +};
88781 +struct size_overflow_hash _001452_hash = {
88782 + .next = NULL,
88783 + .name = "ieee80211_if_read_dropped_frames_no_route",
88784 + .file = "net/mac80211/debugfs_netdev.c",
88785 + .param3 = 1,
88786 +};
88787 +struct size_overflow_hash _001453_hash = {
88788 + .next = NULL,
88789 + .name = "ieee80211_if_read_dropped_frames_ttl",
88790 + .file = "net/mac80211/debugfs_netdev.c",
88791 + .param3 = 1,
88792 +};
88793 +struct size_overflow_hash _001454_hash = {
88794 + .next = NULL,
88795 + .name = "ieee80211_if_read_drop_unencrypted",
88796 + .file = "net/mac80211/debugfs_netdev.c",
88797 + .param3 = 1,
88798 +};
88799 +struct size_overflow_hash _001455_hash = {
88800 + .next = NULL,
88801 + .name = "ieee80211_if_read_dtim_count",
88802 + .file = "net/mac80211/debugfs_netdev.c",
88803 + .param3 = 1,
88804 +};
88805 +struct size_overflow_hash _001456_hash = {
88806 + .next = NULL,
88807 + .name = "ieee80211_if_read_element_ttl",
88808 + .file = "net/mac80211/debugfs_netdev.c",
88809 + .param3 = 1,
88810 +};
88811 +struct size_overflow_hash _001457_hash = {
88812 + .next = NULL,
88813 + .name = "ieee80211_if_read_estab_plinks",
88814 + .file = "net/mac80211/debugfs_netdev.c",
88815 + .param3 = 1,
88816 +};
88817 +struct size_overflow_hash _001458_hash = {
88818 + .next = NULL,
88819 + .name = "ieee80211_if_read_flags",
88820 + .file = "net/mac80211/debugfs_netdev.c",
88821 + .param3 = 1,
88822 +};
88823 +struct size_overflow_hash _001459_hash = {
88824 + .next = NULL,
88825 + .name = "ieee80211_if_read_fwded_frames",
88826 + .file = "net/mac80211/debugfs_netdev.c",
88827 + .param3 = 1,
88828 +};
88829 +struct size_overflow_hash _001460_hash = {
88830 + .next = NULL,
88831 + .name = "ieee80211_if_read_fwded_mcast",
88832 + .file = "net/mac80211/debugfs_netdev.c",
88833 + .param3 = 1,
88834 +};
88835 +struct size_overflow_hash _001461_hash = {
88836 + .next = NULL,
88837 + .name = "ieee80211_if_read_fwded_unicast",
88838 + .file = "net/mac80211/debugfs_netdev.c",
88839 + .param3 = 1,
88840 +};
88841 +struct size_overflow_hash _001462_hash = {
88842 + .next = NULL,
88843 + .name = "ieee80211_if_read_last_beacon",
88844 + .file = "net/mac80211/debugfs_netdev.c",
88845 + .param3 = 1,
88846 +};
88847 +struct size_overflow_hash _001463_hash = {
88848 + .next = NULL,
88849 + .name = "ieee80211_if_read_min_discovery_timeout",
88850 + .file = "net/mac80211/debugfs_netdev.c",
88851 + .param3 = 1,
88852 +};
88853 +struct size_overflow_hash _001464_hash = {
88854 + .next = NULL,
88855 + .name = "ieee80211_if_read_num_buffered_multicast",
88856 + .file = "net/mac80211/debugfs_netdev.c",
88857 + .param3 = 1,
88858 +};
88859 +struct size_overflow_hash _001465_hash = {
88860 + .next = NULL,
88861 + .name = "ieee80211_if_read_num_sta_authorized",
88862 + .file = "net/mac80211/debugfs_netdev.c",
88863 + .param3 = 1,
88864 +};
88865 +struct size_overflow_hash _001466_hash = {
88866 + .next = NULL,
88867 + .name = "ieee80211_if_read_num_sta_ps",
88868 + .file = "net/mac80211/debugfs_netdev.c",
88869 + .param3 = 1,
88870 +};
88871 +struct size_overflow_hash _001467_hash = {
88872 + .next = NULL,
88873 + .name = "ieee80211_if_read_path_refresh_time",
88874 + .file = "net/mac80211/debugfs_netdev.c",
88875 + .param3 = 1,
88876 +};
88877 +struct size_overflow_hash _001468_hash = {
88878 + .next = NULL,
88879 + .name = "ieee80211_if_read_peer",
88880 + .file = "net/mac80211/debugfs_netdev.c",
88881 + .param3 = 1,
88882 +};
88883 +struct size_overflow_hash _001469_hash = {
88884 + .next = NULL,
88885 + .name = "ieee80211_if_read_rc_rateidx_mask_2ghz",
88886 + .file = "net/mac80211/debugfs_netdev.c",
88887 + .param3 = 1,
88888 +};
88889 +struct size_overflow_hash _001470_hash = {
88890 + .next = NULL,
88891 + .name = "ieee80211_if_read_rc_rateidx_mask_5ghz",
88892 + .file = "net/mac80211/debugfs_netdev.c",
88893 + .param3 = 1,
88894 +};
88895 +struct size_overflow_hash _001471_hash = {
88896 + .next = NULL,
88897 + .name = "ieee80211_if_read_smps",
88898 + .file = "net/mac80211/debugfs_netdev.c",
88899 + .param3 = 1,
88900 +};
88901 +struct size_overflow_hash _001472_hash = {
88902 + .next = NULL,
88903 + .name = "ieee80211_if_read_state",
88904 + .file = "net/mac80211/debugfs_netdev.c",
88905 + .param3 = 1,
88906 +};
88907 +struct size_overflow_hash _001473_hash = {
88908 + .next = NULL,
88909 + .name = "ieee80211_if_read_tkip_mic_test",
88910 + .file = "net/mac80211/debugfs_netdev.c",
88911 + .param3 = 1,
88912 +};
88913 +struct size_overflow_hash _001474_hash = {
88914 + .next = NULL,
88915 + .name = "ieee80211_if_read_tsf",
88916 + .file = "net/mac80211/debugfs_netdev.c",
88917 + .param3 = 1,
88918 +};
88919 +struct size_overflow_hash _001475_hash = {
88920 + .next = NULL,
88921 + .name = "ieee80211_send_probe_req",
88922 + .file = "net/mac80211/util.c",
88923 + .param6 = 1,
88924 +};
88925 +struct size_overflow_hash _001476_hash = {
88926 + .next = NULL,
88927 + .name = "init_map_ipmac",
88928 + .file = "net/netfilter/ipset/ip_set_bitmap_ipmac.c",
88929 + .param3 = 1,
88930 + .param4 = 1,
88931 +};
88932 +struct size_overflow_hash _001478_hash = {
88933 + .next = NULL,
88934 + .name = "init_tid_tabs",
88935 + .file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
88936 + .param2 = 1,
88937 + .param4 = 1,
88938 + .param3 = 1,
88939 +};
88940 +struct size_overflow_hash _001481_hash = {
88941 + .next = NULL,
88942 + .name = "isr_cmd_cmplt_read",
88943 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88944 + .param3 = 1,
88945 +};
88946 +struct size_overflow_hash _001482_hash = {
88947 + .next = NULL,
88948 + .name = "isr_commands_read",
88949 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88950 + .param3 = 1,
88951 +};
88952 +struct size_overflow_hash _001483_hash = {
88953 + .next = NULL,
88954 + .name = "isr_decrypt_done_read",
88955 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88956 + .param3 = 1,
88957 +};
88958 +struct size_overflow_hash _001484_hash = {
88959 + .next = NULL,
88960 + .name = "isr_dma0_done_read",
88961 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88962 + .param3 = 1,
88963 +};
88964 +struct size_overflow_hash _001485_hash = {
88965 + .next = NULL,
88966 + .name = "isr_dma1_done_read",
88967 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88968 + .param3 = 1,
88969 +};
88970 +struct size_overflow_hash _001486_hash = {
88971 + .next = NULL,
88972 + .name = "isr_fiqs_read",
88973 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88974 + .param3 = 1,
88975 +};
88976 +struct size_overflow_hash _001487_hash = {
88977 + .next = NULL,
88978 + .name = "isr_host_acknowledges_read",
88979 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88980 + .param3 = 1,
88981 +};
88982 +struct size_overflow_hash _001488_hash = {
88983 + .next = &_001393_hash,
88984 + .name = "isr_hw_pm_mode_changes_read",
88985 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88986 + .param3 = 1,
88987 +};
88988 +struct size_overflow_hash _001489_hash = {
88989 + .next = &_001205_hash,
88990 + .name = "isr_irqs_read",
88991 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88992 + .param3 = 1,
88993 +};
88994 +struct size_overflow_hash _001490_hash = {
88995 + .next = NULL,
88996 + .name = "isr_low_rssi_read",
88997 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
88998 + .param3 = 1,
88999 +};
89000 +struct size_overflow_hash _001491_hash = {
89001 + .next = NULL,
89002 + .name = "isr_pci_pm_read",
89003 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89004 + .param3 = 1,
89005 +};
89006 +struct size_overflow_hash _001492_hash = {
89007 + .next = NULL,
89008 + .name = "isr_rx_headers_read",
89009 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89010 + .param3 = 1,
89011 +};
89012 +struct size_overflow_hash _001493_hash = {
89013 + .next = NULL,
89014 + .name = "isr_rx_mem_overflow_read",
89015 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89016 + .param3 = 1,
89017 +};
89018 +struct size_overflow_hash _001494_hash = {
89019 + .next = NULL,
89020 + .name = "isr_rx_procs_read",
89021 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89022 + .param3 = 1,
89023 +};
89024 +struct size_overflow_hash _001495_hash = {
89025 + .next = NULL,
89026 + .name = "isr_rx_rdys_read",
89027 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89028 + .param3 = 1,
89029 +};
89030 +struct size_overflow_hash _001496_hash = {
89031 + .next = NULL,
89032 + .name = "isr_tx_exch_complete_read",
89033 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89034 + .param3 = 1,
89035 +};
89036 +struct size_overflow_hash _001497_hash = {
89037 + .next = NULL,
89038 + .name = "isr_tx_procs_read",
89039 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89040 + .param3 = 1,
89041 +};
89042 +struct size_overflow_hash _001498_hash = {
89043 + .next = NULL,
89044 + .name = "isr_wakeups_read",
89045 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89046 + .param3 = 1,
89047 +};
89048 +struct size_overflow_hash _001499_hash = {
89049 + .next = NULL,
89050 + .name = "ivtv_read",
89051 + .file = "drivers/media/video/ivtv/ivtv-fileops.c",
89052 + .param3 = 1,
89053 +};
89054 +struct size_overflow_hash _001500_hash = {
89055 + .next = NULL,
89056 + .name = "kmem_realloc",
89057 + .file = "fs/xfs/kmem.c",
89058 + .param2 = 1,
89059 +};
89060 +struct size_overflow_hash _001501_hash = {
89061 + .next = NULL,
89062 + .name = "kmem_zalloc",
89063 + .file = "fs/xfs/kmem.c",
89064 + .param1 = 1,
89065 +};
89066 +struct size_overflow_hash _001502_hash = {
89067 + .next = NULL,
89068 + .name = "kmem_zalloc_greedy",
89069 + .file = "fs/xfs/kmem.c",
89070 + .param2 = 1,
89071 + .param3 = 1,
89072 +};
89073 +struct size_overflow_hash _001504_hash = {
89074 + .next = NULL,
89075 + .name = "kmp_init",
89076 + .file = "lib/ts_kmp.c",
89077 + .param2 = 1,
89078 +};
89079 +struct size_overflow_hash _001505_hash = {
89080 + .next = NULL,
89081 + .name = "lcd_proc_write",
89082 + .file = "drivers/platform/x86/asus_acpi.c",
89083 + .param3 = 1,
89084 +};
89085 +struct size_overflow_hash _001506_hash = {
89086 + .next = NULL,
89087 + .name = "ledd_proc_write",
89088 + .file = "drivers/platform/x86/asus_acpi.c",
89089 + .param3 = 1,
89090 +};
89091 +struct size_overflow_hash _001507_hash = {
89092 + .next = NULL,
89093 + .name = "mic_calc_failure_read",
89094 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89095 + .param3 = 1,
89096 +};
89097 +struct size_overflow_hash _001508_hash = {
89098 + .next = NULL,
89099 + .name = "mic_rx_pkts_read",
89100 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89101 + .param3 = 1,
89102 +};
89103 +struct size_overflow_hash _001509_hash = {
89104 + .next = NULL,
89105 + .name = "nfs4_realloc_slot_table",
89106 + .file = "fs/nfs/nfs4proc.c",
89107 + .param2 = 1,
89108 +};
89109 +struct size_overflow_hash _001510_hash = {
89110 + .next = NULL,
89111 + .name = "nfs_idmap_request_key",
89112 + .file = "fs/nfs/idmap.c",
89113 + .param2 = 1,
89114 + .param3 = 1,
89115 +};
89116 +struct size_overflow_hash _001511_hash = {
89117 + .next = NULL,
89118 + .name = "nsm_get_handle",
89119 + .file = "include/linux/lockd/lockd.h",
89120 + .param4 = 1,
89121 +};
89122 +struct size_overflow_hash _001512_hash = {
89123 + .next = NULL,
89124 + .name = "ntfs_copy_from_user_iovec",
89125 + .file = "fs/ntfs/file.c",
89126 + .param3 = 1,
89127 + .param6 = 1,
89128 +};
89129 +struct size_overflow_hash _001514_hash = {
89130 + .next = NULL,
89131 + .name = "ntfs_file_buffered_write",
89132 + .file = "fs/ntfs/file.c",
89133 + .param6 = 1,
89134 +};
89135 +struct size_overflow_hash _001515_hash = {
89136 + .next = NULL,
89137 + .name = "ntfs_malloc_nofs",
89138 + .file = "fs/ntfs/malloc.h",
89139 + .param1 = 1,
89140 +};
89141 +struct size_overflow_hash _001516_hash = {
89142 + .next = NULL,
89143 + .name = "ntfs_malloc_nofs_nofail",
89144 + .file = "fs/ntfs/malloc.h",
89145 + .param1 = 1,
89146 +};
89147 +struct size_overflow_hash _001517_hash = {
89148 + .next = NULL,
89149 + .name = "ocfs2_control_message",
89150 + .file = "fs/ocfs2/stack_user.c",
89151 + .param3 = 1,
89152 +};
89153 +struct size_overflow_hash _001518_hash = {
89154 + .next = NULL,
89155 + .name = "opera1_usb_i2c_msgxfer",
89156 + .file = "drivers/media/dvb/dvb-usb/opera1.c",
89157 + .param4 = 1,
89158 +};
89159 +struct size_overflow_hash _001519_hash = {
89160 + .next = NULL,
89161 + .name = "orinoco_add_extscan_result",
89162 + .file = "drivers/net/wireless/orinoco/scan.c",
89163 + .param3 = 1,
89164 +};
89165 +struct size_overflow_hash _001520_hash = {
89166 + .next = NULL,
89167 + .name = "osd_req_list_collection_objects",
89168 + .file = "include/scsi/osd_initiator.h",
89169 + .param5 = 1,
89170 +};
89171 +struct size_overflow_hash _001521_hash = {
89172 + .next = NULL,
89173 + .name = "osd_req_list_partition_objects",
89174 + .file = "include/scsi/osd_initiator.h",
89175 + .param5 = 1,
89176 +};
89177 +struct size_overflow_hash _001522_hash = {
89178 + .next = NULL,
89179 + .name = "pair_device",
89180 + .file = "net/bluetooth/mgmt.c",
89181 + .param4 = 1,
89182 +};
89183 +struct size_overflow_hash _001523_hash = {
89184 + .next = NULL,
89185 + .name = "pccard_store_cis",
89186 + .file = "drivers/pcmcia/cistpl.c",
89187 + .param6 = 1,
89188 +};
89189 +struct size_overflow_hash _001524_hash = {
89190 + .next = NULL,
89191 + .name = "pin_code_reply",
89192 + .file = "net/bluetooth/mgmt.c",
89193 + .param4 = 1,
89194 +};
89195 +struct size_overflow_hash _001525_hash = {
89196 + .next = NULL,
89197 + .name = "play_iframe",
89198 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
89199 + .param3 = 1,
89200 +};
89201 +struct size_overflow_hash _001526_hash = {
89202 + .next = NULL,
89203 + .name = "pointer_size_read",
89204 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
89205 + .param3 = 1,
89206 +};
89207 +struct size_overflow_hash _001527_hash = {
89208 + .next = NULL,
89209 + .name = "power_read",
89210 + .file = "net/mac80211/debugfs.c",
89211 + .param3 = 1,
89212 +};
89213 +struct size_overflow_hash _001528_hash = {
89214 + .next = NULL,
89215 + .name = "ps_pspoll_max_apturn_read",
89216 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89217 + .param3 = 1,
89218 +};
89219 +struct size_overflow_hash _001529_hash = {
89220 + .next = NULL,
89221 + .name = "ps_pspoll_timeouts_read",
89222 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89223 + .param3 = 1,
89224 +};
89225 +struct size_overflow_hash _001530_hash = {
89226 + .next = NULL,
89227 + .name = "ps_pspoll_utilization_read",
89228 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89229 + .param3 = 1,
89230 +};
89231 +struct size_overflow_hash _001531_hash = {
89232 + .next = NULL,
89233 + .name = "ps_upsd_max_apturn_read",
89234 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89235 + .param3 = 1,
89236 +};
89237 +struct size_overflow_hash _001532_hash = {
89238 + .next = NULL,
89239 + .name = "ps_upsd_max_sptime_read",
89240 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89241 + .param3 = 1,
89242 +};
89243 +struct size_overflow_hash _001533_hash = {
89244 + .next = NULL,
89245 + .name = "ps_upsd_timeouts_read",
89246 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89247 + .param3 = 1,
89248 +};
89249 +struct size_overflow_hash _001534_hash = {
89250 + .next = NULL,
89251 + .name = "ps_upsd_utilization_read",
89252 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89253 + .param3 = 1,
89254 +};
89255 +struct size_overflow_hash _001535_hash = {
89256 + .next = NULL,
89257 + .name = "pwr_disable_ps_read",
89258 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89259 + .param3 = 1,
89260 +};
89261 +struct size_overflow_hash _001536_hash = {
89262 + .next = NULL,
89263 + .name = "pwr_elp_enter_read",
89264 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89265 + .param3 = 1,
89266 +};
89267 +struct size_overflow_hash _001537_hash = {
89268 + .next = NULL,
89269 + .name = "pwr_enable_ps_read",
89270 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89271 + .param3 = 1,
89272 +};
89273 +struct size_overflow_hash _001538_hash = {
89274 + .next = NULL,
89275 + .name = "pwr_fix_tsf_ps_read",
89276 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89277 + .param3 = 1,
89278 +};
89279 +struct size_overflow_hash _001539_hash = {
89280 + .next = NULL,
89281 + .name = "pwr_missing_bcns_read",
89282 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89283 + .param3 = 1,
89284 +};
89285 +struct size_overflow_hash _001540_hash = {
89286 + .next = NULL,
89287 + .name = "pwr_power_save_off_read",
89288 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89289 + .param3 = 1,
89290 +};
89291 +struct size_overflow_hash _001541_hash = {
89292 + .next = NULL,
89293 + .name = "pwr_ps_enter_read",
89294 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89295 + .param3 = 1,
89296 +};
89297 +struct size_overflow_hash _001542_hash = {
89298 + .next = NULL,
89299 + .name = "pwr_rcvd_awake_beacons_read",
89300 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89301 + .param3 = 1,
89302 +};
89303 +struct size_overflow_hash _001543_hash = {
89304 + .next = NULL,
89305 + .name = "pwr_rcvd_beacons_read",
89306 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89307 + .param3 = 1,
89308 +};
89309 +struct size_overflow_hash _001544_hash = {
89310 + .next = NULL,
89311 + .name = "pwr_tx_without_ps_read",
89312 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89313 + .param3 = 1,
89314 +};
89315 +struct size_overflow_hash _001545_hash = {
89316 + .next = NULL,
89317 + .name = "pwr_tx_with_ps_read",
89318 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89319 + .param3 = 1,
89320 +};
89321 +struct size_overflow_hash _001546_hash = {
89322 + .next = NULL,
89323 + .name = "pwr_wake_on_host_read",
89324 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89325 + .param3 = 1,
89326 +};
89327 +struct size_overflow_hash _001547_hash = {
89328 + .next = NULL,
89329 + .name = "pwr_wake_on_timer_exp_read",
89330 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89331 + .param3 = 1,
89332 +};
89333 +struct size_overflow_hash _001548_hash = {
89334 + .next = NULL,
89335 + .name = "qcam_read",
89336 + .file = "drivers/media/video/c-qcam.c",
89337 + .param3 = 1,
89338 +};
89339 +struct size_overflow_hash _001549_hash = {
89340 + .next = NULL,
89341 + .name = "retry_count_read",
89342 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89343 + .param3 = 1,
89344 +};
89345 +struct size_overflow_hash _001550_hash = {
89346 + .next = NULL,
89347 + .name = "rx_dropped_read",
89348 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89349 + .param3 = 1,
89350 +};
89351 +struct size_overflow_hash _001551_hash = {
89352 + .next = NULL,
89353 + .name = "rx_fcs_err_read",
89354 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89355 + .param3 = 1,
89356 +};
89357 +struct size_overflow_hash _001552_hash = {
89358 + .next = NULL,
89359 + .name = "rx_hdr_overflow_read",
89360 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89361 + .param3 = 1,
89362 +};
89363 +struct size_overflow_hash _001553_hash = {
89364 + .next = NULL,
89365 + .name = "rx_hw_stuck_read",
89366 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89367 + .param3 = 1,
89368 +};
89369 +struct size_overflow_hash _001554_hash = {
89370 + .next = NULL,
89371 + .name = "rx_out_of_mem_read",
89372 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89373 + .param3 = 1,
89374 +};
89375 +struct size_overflow_hash _001555_hash = {
89376 + .next = NULL,
89377 + .name = "rx_path_reset_read",
89378 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89379 + .param3 = 1,
89380 +};
89381 +struct size_overflow_hash _001556_hash = {
89382 + .next = NULL,
89383 + .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read",
89384 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89385 + .param3 = 1,
89386 +};
89387 +struct size_overflow_hash _001557_hash = {
89388 + .next = NULL,
89389 + .name = "rxpipe_descr_host_int_trig_rx_data_read",
89390 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89391 + .param3 = 1,
89392 +};
89393 +struct size_overflow_hash _001558_hash = {
89394 + .next = NULL,
89395 + .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read",
89396 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89397 + .param3 = 1,
89398 +};
89399 +struct size_overflow_hash _001559_hash = {
89400 + .next = NULL,
89401 + .name = "rxpipe_rx_prep_beacon_drop_read",
89402 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89403 + .param3 = 1,
89404 +};
89405 +struct size_overflow_hash _001560_hash = {
89406 + .next = NULL,
89407 + .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read",
89408 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89409 + .param3 = 1,
89410 +};
89411 +struct size_overflow_hash _001561_hash = {
89412 + .next = NULL,
89413 + .name = "rx_reset_counter_read",
89414 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89415 + .param3 = 1,
89416 +};
89417 +struct size_overflow_hash _001562_hash = {
89418 + .next = NULL,
89419 + .name = "rx_streaming_always_read",
89420 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89421 + .param3 = 1,
89422 +};
89423 +struct size_overflow_hash _001563_hash = {
89424 + .next = NULL,
89425 + .name = "rx_streaming_interval_read",
89426 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89427 + .param3 = 1,
89428 +};
89429 +struct size_overflow_hash _001564_hash = {
89430 + .next = NULL,
89431 + .name = "rx_xfr_hint_trig_read",
89432 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89433 + .param3 = 1,
89434 +};
89435 +struct size_overflow_hash _001565_hash = {
89436 + .next = NULL,
89437 + .name = "scsi_execute_req",
89438 + .file = "include/scsi/scsi_device.h",
89439 + .param5 = 1,
89440 +};
89441 +struct size_overflow_hash _001566_hash = {
89442 + .next = NULL,
89443 + .name = "scsi_tgt_kspace_exec",
89444 + .file = "drivers/scsi/scsi_tgt_lib.c",
89445 + .param8 = 1,
89446 +};
89447 +struct size_overflow_hash _001567_hash = {
89448 + .next = NULL,
89449 + .name = "sctp_sendmsg",
89450 + .file = "net/sctp/socket.c",
89451 + .param4 = 1,
89452 +};
89453 +struct size_overflow_hash _001568_hash = {
89454 + .next = NULL,
89455 + .name = "sctp_setsockopt",
89456 + .file = "net/sctp/socket.c",
89457 + .param5 = 1,
89458 +};
89459 +struct size_overflow_hash _001569_hash = {
89460 + .next = NULL,
89461 + .name = "set_connectable",
89462 + .file = "net/bluetooth/mgmt.c",
89463 + .param4 = 1,
89464 +};
89465 +struct size_overflow_hash _001570_hash = {
89466 + .next = NULL,
89467 + .name = "set_discoverable",
89468 + .file = "net/bluetooth/mgmt.c",
89469 + .param4 = 1,
89470 +};
89471 +struct size_overflow_hash _001571_hash = {
89472 + .next = NULL,
89473 + .name = "set_local_name",
89474 + .file = "net/bluetooth/mgmt.c",
89475 + .param4 = 1,
89476 +};
89477 +struct size_overflow_hash _001572_hash = {
89478 + .next = NULL,
89479 + .name = "set_powered",
89480 + .file = "net/bluetooth/mgmt.c",
89481 + .param4 = 1,
89482 +};
89483 +struct size_overflow_hash _001573_hash = {
89484 + .next = NULL,
89485 + .name = "simple_alloc_urb",
89486 + .file = "drivers/usb/misc/usbtest.c",
89487 + .param3 = 1,
89488 +};
89489 +struct size_overflow_hash _001574_hash = {
89490 + .next = NULL,
89491 + .name = "sm_checker_extend",
89492 + .file = "drivers/md/persistent-data/dm-space-map-checker.c",
89493 + .param2 = 1,
89494 +};
89495 +struct size_overflow_hash _001575_hash = {
89496 + .next = NULL,
89497 + .name = "snd_cs4281_BA0_read",
89498 + .file = "sound/pci/cs4281.c",
89499 + .param5 = 1,
89500 +};
89501 +struct size_overflow_hash _001576_hash = {
89502 + .next = NULL,
89503 + .name = "snd_cs4281_BA1_read",
89504 + .file = "sound/pci/cs4281.c",
89505 + .param5 = 1,
89506 +};
89507 +struct size_overflow_hash _001577_hash = {
89508 + .next = NULL,
89509 + .name = "snd_cs46xx_io_read",
89510 + .file = "sound/pci/cs46xx/cs46xx_lib.c",
89511 + .param5 = 1,
89512 +};
89513 +struct size_overflow_hash _001578_hash = {
89514 + .next = NULL,
89515 + .name = "snd_gus_dram_read",
89516 + .file = "include/sound/gus.h",
89517 + .param4 = 1,
89518 +};
89519 +struct size_overflow_hash _001579_hash = {
89520 + .next = NULL,
89521 + .name = "snd_gus_dram_write",
89522 + .file = "include/sound/gus.h",
89523 + .param4 = 1,
89524 +};
89525 +struct size_overflow_hash _001580_hash = {
89526 + .next = NULL,
89527 + .name = "snd_mem_proc_write",
89528 + .file = "sound/core/memalloc.c",
89529 + .param3 = 1,
89530 +};
89531 +struct size_overflow_hash _001581_hash = {
89532 + .next = NULL,
89533 + .name = "snd_pcm_oss_read",
89534 + .file = "sound/core/oss/pcm_oss.c",
89535 + .param3 = 1,
89536 +};
89537 +struct size_overflow_hash _001582_hash = {
89538 + .next = NULL,
89539 + .name = "snd_pcm_oss_sync1",
89540 + .file = "sound/core/oss/pcm_oss.c",
89541 + .param2 = 1,
89542 +};
89543 +struct size_overflow_hash _001583_hash = {
89544 + .next = NULL,
89545 + .name = "snd_pcm_oss_write",
89546 + .file = "sound/core/oss/pcm_oss.c",
89547 + .param3 = 1,
89548 +};
89549 +struct size_overflow_hash _001584_hash = {
89550 + .next = NULL,
89551 + .name = "snd_rme32_capture_copy",
89552 + .file = "sound/pci/rme32.c",
89553 + .param5 = 1,
89554 +};
89555 +struct size_overflow_hash _001585_hash = {
89556 + .next = NULL,
89557 + .name = "snd_rme32_playback_copy",
89558 + .file = "sound/pci/rme32.c",
89559 + .param5 = 1,
89560 +};
89561 +struct size_overflow_hash _001586_hash = {
89562 + .next = NULL,
89563 + .name = "snd_rme96_capture_copy",
89564 + .file = "sound/pci/rme96.c",
89565 + .param5 = 1,
89566 +};
89567 +struct size_overflow_hash _001587_hash = {
89568 + .next = NULL,
89569 + .name = "snd_rme96_playback_copy",
89570 + .file = "sound/pci/rme96.c",
89571 + .param5 = 1,
89572 +};
89573 +struct size_overflow_hash _001588_hash = {
89574 + .next = NULL,
89575 + .name = "spi_execute",
89576 + .file = "drivers/scsi/scsi_transport_spi.c",
89577 + .param5 = 1,
89578 +};
89579 +struct size_overflow_hash _001589_hash = {
89580 + .next = NULL,
89581 + .name = "srp_target_alloc",
89582 + .file = "include/scsi/libsrp.h",
89583 + .param3 = 1,
89584 +};
89585 +struct size_overflow_hash _001590_hash = {
89586 + .next = NULL,
89587 + .name = "stats_dot11ACKFailureCount_read",
89588 + .file = "net/mac80211/debugfs.c",
89589 + .param3 = 1,
89590 +};
89591 +struct size_overflow_hash _001591_hash = {
89592 + .next = NULL,
89593 + .name = "stats_dot11FCSErrorCount_read",
89594 + .file = "net/mac80211/debugfs.c",
89595 + .param3 = 1,
89596 +};
89597 +struct size_overflow_hash _001592_hash = {
89598 + .next = NULL,
89599 + .name = "stats_dot11RTSFailureCount_read",
89600 + .file = "net/mac80211/debugfs.c",
89601 + .param3 = 1,
89602 +};
89603 +struct size_overflow_hash _001593_hash = {
89604 + .next = NULL,
89605 + .name = "stats_dot11RTSSuccessCount_read",
89606 + .file = "net/mac80211/debugfs.c",
89607 + .param3 = 1,
89608 +};
89609 +struct size_overflow_hash _001594_hash = {
89610 + .next = NULL,
89611 + .name = "stk_allocate_buffers",
89612 + .file = "drivers/media/video/stk-webcam.c",
89613 + .param2 = 1,
89614 +};
89615 +struct size_overflow_hash _001595_hash = {
89616 + .next = NULL,
89617 + .name = "submit_inquiry",
89618 + .file = "drivers/scsi/device_handler/scsi_dh_rdac.c",
89619 + .param3 = 1,
89620 +};
89621 +struct size_overflow_hash _001596_hash = {
89622 + .next = NULL,
89623 + .name = "team_options_register",
89624 + .file = "include/linux/if_team.h",
89625 + .param3 = 1,
89626 +};
89627 +struct size_overflow_hash _001597_hash = {
89628 + .next = NULL,
89629 + .name = "test_unaligned_bulk",
89630 + .file = "drivers/usb/misc/usbtest.c",
89631 + .param3 = 1,
89632 +};
89633 +struct size_overflow_hash _001598_hash = {
89634 + .next = NULL,
89635 + .name = "timeout_read",
89636 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
89637 + .param3 = 1,
89638 +};
89639 +struct size_overflow_hash _001599_hash = {
89640 + .next = NULL,
89641 + .name = "timeout_write",
89642 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
89643 + .param3 = 1,
89644 +};
89645 +struct size_overflow_hash _001600_hash = {
89646 + .next = NULL,
89647 + .name = "tipc_link_send_sections_fast",
89648 + .file = "net/tipc/link.c",
89649 + .param4 = 1,
89650 +};
89651 +struct size_overflow_hash _001601_hash = {
89652 + .next = NULL,
89653 + .name = "total_ps_buffered_read",
89654 + .file = "net/mac80211/debugfs.c",
89655 + .param3 = 1,
89656 +};
89657 +struct size_overflow_hash _001602_hash = {
89658 + .next = NULL,
89659 + .name = "ts_read",
89660 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
89661 + .param3 = 1,
89662 +};
89663 +struct size_overflow_hash _001603_hash = {
89664 + .next = NULL,
89665 + .name = "TSS_authhmac",
89666 + .file = "security/keys/trusted.c",
89667 + .param3 = 1,
89668 +};
89669 +struct size_overflow_hash _001604_hash = {
89670 + .next = NULL,
89671 + .name = "TSS_checkhmac1",
89672 + .file = "security/keys/trusted.c",
89673 + .param5 = 1,
89674 +};
89675 +struct size_overflow_hash _001605_hash = {
89676 + .next = NULL,
89677 + .name = "TSS_checkhmac2",
89678 + .file = "security/keys/trusted.c",
89679 + .param5 = 1,
89680 + .param7 = 1,
89681 +};
89682 +struct size_overflow_hash _001607_hash = {
89683 + .next = NULL,
89684 + .name = "ts_write",
89685 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
89686 + .param3 = 1,
89687 +};
89688 +struct size_overflow_hash _001608_hash = {
89689 + .next = NULL,
89690 + .name = "tx_internal_desc_overflow_read",
89691 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89692 + .param3 = 1,
89693 +};
89694 +struct size_overflow_hash _001609_hash = {
89695 + .next = NULL,
89696 + .name = "uapsd_max_sp_len_read",
89697 + .file = "net/mac80211/debugfs.c",
89698 + .param3 = 1,
89699 +};
89700 +struct size_overflow_hash _001610_hash = {
89701 + .next = NULL,
89702 + .name = "uapsd_queues_read",
89703 + .file = "net/mac80211/debugfs.c",
89704 + .param3 = 1,
89705 +};
89706 +struct size_overflow_hash _001611_hash = {
89707 + .next = NULL,
89708 + .name = "ulong_read_file",
89709 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
89710 + .param3 = 1,
89711 +};
89712 +struct size_overflow_hash _001612_hash = {
89713 + .next = NULL,
89714 + .name = "ulong_write_file",
89715 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
89716 + .param3 = 1,
89717 +};
89718 +struct size_overflow_hash _001613_hash = {
89719 + .next = NULL,
89720 + .name = "usb_alloc_coherent",
89721 + .file = "include/linux/usb.h",
89722 + .param2 = 1,
89723 +};
89724 +struct size_overflow_hash _001614_hash = {
89725 + .next = NULL,
89726 + .name = "user_power_read",
89727 + .file = "net/mac80211/debugfs.c",
89728 + .param3 = 1,
89729 +};
89730 +struct size_overflow_hash _001615_hash = {
89731 + .next = NULL,
89732 + .name = "vb2_read",
89733 + .file = "include/media/videobuf2-core.h",
89734 + .param3 = 1,
89735 +};
89736 +struct size_overflow_hash _001616_hash = {
89737 + .next = NULL,
89738 + .name = "vb2_write",
89739 + .file = "include/media/videobuf2-core.h",
89740 + .param3 = 1,
89741 +};
89742 +struct size_overflow_hash _001617_hash = {
89743 + .next = NULL,
89744 + .name = "vhost_add_used_n",
89745 + .file = "drivers/vhost/vhost.c",
89746 + .param3 = 1,
89747 +};
89748 +struct size_overflow_hash _001618_hash = {
89749 + .next = NULL,
89750 + .name = "virtqueue_add_buf",
89751 + .file = "include/linux/virtio.h",
89752 + .param3 = 1,
89753 + .param4 = 1,
89754 +};
89755 +struct size_overflow_hash _001620_hash = {
89756 + .next = NULL,
89757 + .name = "vmbus_establish_gpadl",
89758 + .file = "include/linux/hyperv.h",
89759 + .param3 = 1,
89760 +};
89761 +struct size_overflow_hash _001621_hash = {
89762 + .next = NULL,
89763 + .name = "wep_addr_key_count_read",
89764 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89765 + .param3 = 1,
89766 +};
89767 +struct size_overflow_hash _001622_hash = {
89768 + .next = NULL,
89769 + .name = "wep_decrypt_fail_read",
89770 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89771 + .param3 = 1,
89772 +};
89773 +struct size_overflow_hash _001623_hash = {
89774 + .next = NULL,
89775 + .name = "wep_default_key_count_read",
89776 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89777 + .param3 = 1,
89778 +};
89779 +struct size_overflow_hash _001624_hash = {
89780 + .next = NULL,
89781 + .name = "wep_interrupt_read",
89782 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89783 + .param3 = 1,
89784 +};
89785 +struct size_overflow_hash _001625_hash = {
89786 + .next = NULL,
89787 + .name = "wep_iv_read",
89788 + .file = "net/mac80211/debugfs.c",
89789 + .param3 = 1,
89790 +};
89791 +struct size_overflow_hash _001626_hash = {
89792 + .next = NULL,
89793 + .name = "wep_key_not_found_read",
89794 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89795 + .param3 = 1,
89796 +};
89797 +struct size_overflow_hash _001627_hash = {
89798 + .next = NULL,
89799 + .name = "wep_packets_read",
89800 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89801 + .param3 = 1,
89802 +};
89803 +struct size_overflow_hash _001628_hash = {
89804 + .next = NULL,
89805 + .name = "write_led",
89806 + .file = "drivers/platform/x86/asus_acpi.c",
89807 + .param2 = 1,
89808 +};
89809 +struct size_overflow_hash _001629_hash = {
89810 + .next = NULL,
89811 + .name = "wusb_prf",
89812 + .file = "include/linux/usb/wusb.h",
89813 + .param7 = 1,
89814 +};
89815 +struct size_overflow_hash _001630_hash = {
89816 + .next = NULL,
89817 + .name = "zd_usb_iowrite16v",
89818 + .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
89819 + .param3 = 1,
89820 +};
89821 +struct size_overflow_hash _001631_hash = {
89822 + .next = NULL,
89823 + .name = "afs_cell_lookup",
89824 + .file = "fs/afs/cell.c",
89825 + .param2 = 1,
89826 +};
89827 +struct size_overflow_hash _001632_hash = {
89828 + .next = NULL,
89829 + .name = "agp_generic_alloc_user",
89830 + .file = "drivers/char/agp/generic.c",
89831 + .param1 = 1,
89832 +};
89833 +struct size_overflow_hash _001634_hash = {
89834 + .next = NULL,
89835 + .name = "bluetooth_proc_write",
89836 + .file = "drivers/platform/x86/asus_acpi.c",
89837 + .param3 = 1,
89838 +};
89839 +struct size_overflow_hash _001635_hash = {
89840 + .next = NULL,
89841 + .name = "cache_write",
89842 + .file = "net/sunrpc/cache.c",
89843 + .param3 = 1,
89844 +};
89845 +struct size_overflow_hash _001636_hash = {
89846 + .next = NULL,
89847 + .name = "ch_do_scsi",
89848 + .file = "drivers/scsi/ch.c",
89849 + .param4 = 1,
89850 +};
89851 +struct size_overflow_hash _001637_hash = {
89852 + .next = NULL,
89853 + .name = "cx18_read",
89854 + .file = "drivers/media/video/cx18/cx18-fileops.c",
89855 + .param3 = 1,
89856 +};
89857 +struct size_overflow_hash _001638_hash = {
89858 + .next = NULL,
89859 + .name = "dccp_feat_register_sp",
89860 + .file = "net/dccp/feat.c",
89861 + .param5 = 1,
89862 +};
89863 +struct size_overflow_hash _001640_hash = {
89864 + .next = NULL,
89865 + .name = "iso_alloc_urb",
89866 + .file = "drivers/usb/misc/usbtest.c",
89867 + .param5 = 1,
89868 +};
89869 +struct size_overflow_hash _001641_hash = {
89870 + .next = NULL,
89871 + .name = "ivtv_read_pos",
89872 + .file = "drivers/media/video/ivtv/ivtv-fileops.c",
89873 + .param3 = 1,
89874 +};
89875 +struct size_overflow_hash _001642_hash = {
89876 + .next = NULL,
89877 + .name = "mcam_v4l_read",
89878 + .file = "drivers/media/video/marvell-ccic/mcam-core.c",
89879 + .param3 = 1,
89880 +};
89881 +struct size_overflow_hash _001643_hash = {
89882 + .next = NULL,
89883 + .name = "mled_proc_write",
89884 + .file = "drivers/platform/x86/asus_acpi.c",
89885 + .param3 = 1,
89886 +};
89887 +struct size_overflow_hash _001644_hash = {
89888 + .next = NULL,
89889 + .name = "nfs_idmap_lookup_id",
89890 + .file = "fs/nfs/idmap.c",
89891 + .param2 = 1,
89892 +};
89893 +struct size_overflow_hash _001645_hash = {
89894 + .next = NULL,
89895 + .name = "ocfs2_control_write",
89896 + .file = "fs/ocfs2/stack_user.c",
89897 + .param3 = 1,
89898 +};
89899 +struct size_overflow_hash _001646_hash = {
89900 + .next = NULL,
89901 + .name = "osd_req_list_dev_partitions",
89902 + .file = "include/scsi/osd_initiator.h",
89903 + .param4 = 1,
89904 +};
89905 +struct size_overflow_hash _001647_hash = {
89906 + .next = NULL,
89907 + .name = "osd_req_list_partition_collections",
89908 + .file = "include/scsi/osd_initiator.h",
89909 + .param5 = 1,
89910 +};
89911 +struct size_overflow_hash _001648_hash = {
89912 + .next = NULL,
89913 + .name = "pwc_video_read",
89914 + .file = "drivers/media/video/pwc/pwc-if.c",
89915 + .param3 = 1,
89916 +};
89917 +struct size_overflow_hash _001649_hash = {
89918 + .next = NULL,
89919 + .name = "scsi_vpd_inquiry",
89920 + .file = "drivers/scsi/scsi.c",
89921 + .param4 = 1,
89922 +};
89923 +struct size_overflow_hash _001650_hash = {
89924 + .next = NULL,
89925 + .name = "snd_gf1_mem_proc_dump",
89926 + .file = "sound/isa/gus/gus_mem_proc.c",
89927 + .param5 = 1,
89928 +};
89929 +struct size_overflow_hash _001651_hash = {
89930 + .next = NULL,
89931 + .name = "spi_dv_device_echo_buffer",
89932 + .file = "drivers/scsi/scsi_transport_spi.c",
89933 + .param2 = 1,
89934 + .param3 = 1,
89935 +};
89936 +struct size_overflow_hash _001653_hash = {
89937 + .next = NULL,
89938 + .name = "tled_proc_write",
89939 + .file = "drivers/platform/x86/asus_acpi.c",
89940 + .param3 = 1,
89941 +};
89942 +struct size_overflow_hash _001655_hash = {
89943 + .next = NULL,
89944 + .name = "usb_allocate_stream_buffers",
89945 + .file = "drivers/media/dvb/dvb-usb/usb-urb.c",
89946 + .param3 = 1,
89947 +};
89948 +struct size_overflow_hash _001656_hash = {
89949 + .next = NULL,
89950 + .name = "_usb_writeN_sync",
89951 + .file = "drivers/net/wireless/rtlwifi/usb.c",
89952 + .param4 = 1,
89953 +};
89954 +struct size_overflow_hash _001657_hash = {
89955 + .next = NULL,
89956 + .name = "vhost_add_used_and_signal_n",
89957 + .file = "drivers/vhost/vhost.c",
89958 + .param4 = 1,
89959 +};
89960 +struct size_overflow_hash _001658_hash = {
89961 + .next = NULL,
89962 + .name = "vmbus_open",
89963 + .file = "include/linux/hyperv.h",
89964 + .param2 = 1,
89965 + .param3 = 1,
89966 +};
89967 +struct size_overflow_hash _001660_hash = {
89968 + .next = NULL,
89969 + .name = "wled_proc_write",
89970 + .file = "drivers/platform/x86/asus_acpi.c",
89971 + .param3 = 1,
89972 +};
89973 +struct size_overflow_hash _001661_hash = {
89974 + .next = NULL,
89975 + .name = "wusb_prf_256",
89976 + .file = "include/linux/usb/wusb.h",
89977 + .param7 = 1,
89978 +};
89979 +struct size_overflow_hash _001662_hash = {
89980 + .next = NULL,
89981 + .name = "wusb_prf_64",
89982 + .file = "include/linux/usb/wusb.h",
89983 + .param7 = 1,
89984 +};
89985 +struct size_overflow_hash _001663_hash = {
89986 + .next = NULL,
89987 + .name = "agp_allocate_memory",
89988 + .file = "include/linux/agp_backend.h",
89989 + .param2 = 1,
89990 +};
89991 +struct size_overflow_hash _001664_hash = {
89992 + .next = NULL,
89993 + .name = "cx18_read_pos",
89994 + .file = "drivers/media/video/cx18/cx18-fileops.c",
89995 + .param3 = 1,
89996 +};
89997 +struct size_overflow_hash _001665_hash = {
89998 + .next = NULL,
89999 + .name = "nfs_map_group_to_gid",
90000 + .file = "include/linux/nfs_idmap.h",
90001 + .param3 = 1,
90002 +};
90003 +struct size_overflow_hash _001666_hash = {
90004 + .next = NULL,
90005 + .name = "nfs_map_name_to_uid",
90006 + .file = "include/linux/nfs_idmap.h",
90007 + .param3 = 1,
90008 +};
90009 +struct size_overflow_hash _001667_hash = {
90010 + .next = NULL,
90011 + .name = "test_iso_queue",
90012 + .file = "drivers/usb/misc/usbtest.c",
90013 + .param5 = 1,
90014 +};
90015 +struct size_overflow_hash _001668_hash = {
90016 + .next = NULL,
90017 + .name = "agp_allocate_memory_wrap",
90018 + .file = "drivers/char/agp/frontend.c",
90019 + .param1 = 1,
90020 +};
90021 +struct size_overflow_hash _001669_hash = {
90022 + .next = NULL,
90023 + .name = "alloc_irq_cpu_rmap",
90024 + .file = "include/linux/cpu_rmap.h",
90025 + .param1 = 1,
90026 +};
90027 +struct size_overflow_hash _001670_hash = {
90028 + .next = NULL,
90029 + .name = "alloc_ring",
90030 + .file = "drivers/net/ethernet/chelsio/cxgb4/sge.c",
90031 + .param2 = 1,
90032 + .param4 = 1,
90033 +};
90034 +struct size_overflow_hash _001672_hash = {
90035 + .next = &_001124_hash,
90036 + .name = "atomic_counters_read",
90037 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
90038 + .param3 = 1,
90039 +};
90040 +struct size_overflow_hash _001673_hash = {
90041 + .next = NULL,
90042 + .name = "atomic_stats_read",
90043 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
90044 + .param3 = 1,
90045 +};
90046 +struct size_overflow_hash _001674_hash = {
90047 + .next = NULL,
90048 + .name = "c4iw_init_resource_fifo",
90049 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
90050 + .param3 = 1,
90051 +};
90052 +struct size_overflow_hash _001675_hash = {
90053 + .next = NULL,
90054 + .name = "c4iw_init_resource_fifo_random",
90055 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
90056 + .param3 = 1,
90057 +};
90058 +struct size_overflow_hash _001676_hash = {
90059 + .next = NULL,
90060 + .name = "compat_do_arpt_set_ctl",
90061 + .file = "net/ipv4/netfilter/arp_tables.c",
90062 + .param4 = 1,
90063 +};
90064 +struct size_overflow_hash _001677_hash = {
90065 + .next = NULL,
90066 + .name = "compat_do_ip6t_set_ctl",
90067 + .file = "net/ipv6/netfilter/ip6_tables.c",
90068 + .param4 = 1,
90069 +};
90070 +struct size_overflow_hash _001678_hash = {
90071 + .next = NULL,
90072 + .name = "compat_do_ipt_set_ctl",
90073 + .file = "net/ipv4/netfilter/ip_tables.c",
90074 + .param4 = 1,
90075 +};
90076 +struct size_overflow_hash _001679_hash = {
90077 + .next = NULL,
90078 + .name = "cxio_init_resource_fifo",
90079 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
90080 + .param3 = 1,
90081 +};
90082 +struct size_overflow_hash _001680_hash = {
90083 + .next = NULL,
90084 + .name = "cxio_init_resource_fifo_random",
90085 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
90086 + .param3 = 1,
90087 +};
90088 +struct size_overflow_hash _001681_hash = {
90089 + .next = NULL,
90090 + .name = "dev_counters_read",
90091 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90092 + .param3 = 1,
90093 +};
90094 +struct size_overflow_hash _001682_hash = {
90095 + .next = NULL,
90096 + .name = "dev_names_read",
90097 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90098 + .param3 = 1,
90099 +};
90100 +struct size_overflow_hash _001683_hash = {
90101 + .next = &_001468_hash,
90102 + .name = "do_arpt_set_ctl",
90103 + .file = "net/ipv4/netfilter/arp_tables.c",
90104 + .param4 = 1,
90105 +};
90106 +struct size_overflow_hash _001684_hash = {
90107 + .next = NULL,
90108 + .name = "do_ip6t_set_ctl",
90109 + .file = "net/ipv6/netfilter/ip6_tables.c",
90110 + .param4 = 1,
90111 +};
90112 +struct size_overflow_hash _001685_hash = {
90113 + .next = NULL,
90114 + .name = "do_ipt_set_ctl",
90115 + .file = "net/ipv4/netfilter/ip_tables.c",
90116 + .param4 = 1,
90117 +};
90118 +struct size_overflow_hash _001686_hash = {
90119 + .next = NULL,
90120 + .name = "drbd_bm_resize",
90121 + .file = "drivers/block/drbd/drbd_bitmap.c",
90122 + .param2 = 1,
90123 +};
90124 +struct size_overflow_hash _001687_hash = {
90125 + .next = NULL,
90126 + .name = "driver_names_read",
90127 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90128 + .param3 = 1,
90129 +};
90130 +struct size_overflow_hash _001688_hash = {
90131 + .next = NULL,
90132 + .name = "driver_stats_read",
90133 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90134 + .param3 = 1,
90135 +};
90136 +struct size_overflow_hash _001689_hash = {
90137 + .next = NULL,
90138 + .name = "flash_read",
90139 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90140 + .param3 = 1,
90141 +};
90142 +struct size_overflow_hash _001690_hash = {
90143 + .next = NULL,
90144 + .name = "flash_read",
90145 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
90146 + .param3 = 1,
90147 +};
90148 +struct size_overflow_hash _001691_hash = {
90149 + .next = NULL,
90150 + .name = "flash_write",
90151 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90152 + .param3 = 1,
90153 +};
90154 +struct size_overflow_hash _001692_hash = {
90155 + .next = NULL,
90156 + .name = "flash_write",
90157 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
90158 + .param3 = 1,
90159 +};
90160 +struct size_overflow_hash _001693_hash = {
90161 + .next = NULL,
90162 + .name = "ghash_async_setkey",
90163 + .file = "arch/x86/crypto/ghash-clmulni-intel_glue.c",
90164 + .param3 = 1,
90165 +};
90166 +struct size_overflow_hash _001694_hash = {
90167 + .next = NULL,
90168 + .name = "handle_eviocgbit",
90169 + .file = "drivers/input/evdev.c",
90170 + .param3 = 1,
90171 +};
90172 +struct size_overflow_hash _001695_hash = {
90173 + .next = NULL,
90174 + .name = "hid_parse_report",
90175 + .file = "include/linux/hid.h",
90176 + .param3 = 1,
90177 +};
90178 +struct size_overflow_hash _001696_hash = {
90179 + .next = NULL,
90180 + .name = "ipath_get_base_info",
90181 + .file = "drivers/infiniband/hw/ipath/ipath_file_ops.c",
90182 + .param3 = 1,
90183 +};
90184 +struct size_overflow_hash _001697_hash = {
90185 + .next = NULL,
90186 + .name = "options_write",
90187 + .file = "drivers/misc/sgi-gru/gruprocfs.c",
90188 + .param3 = 1,
90189 +};
90190 +struct size_overflow_hash _001698_hash = {
90191 + .next = NULL,
90192 + .name = "portcntrs_1_read",
90193 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90194 + .param3 = 1,
90195 +};
90196 +struct size_overflow_hash _001699_hash = {
90197 + .next = NULL,
90198 + .name = "portcntrs_2_read",
90199 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90200 + .param3 = 1,
90201 +};
90202 +struct size_overflow_hash _001700_hash = {
90203 + .next = NULL,
90204 + .name = "portnames_read",
90205 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90206 + .param3 = 1,
90207 +};
90208 +struct size_overflow_hash _001701_hash = {
90209 + .next = NULL,
90210 + .name = "qib_alloc_devdata",
90211 + .file = "drivers/infiniband/hw/qib/qib_init.c",
90212 + .param2 = 1,
90213 +};
90214 +struct size_overflow_hash _001702_hash = {
90215 + .next = NULL,
90216 + .name = "qib_diag_write",
90217 + .file = "drivers/infiniband/hw/qib/qib_diag.c",
90218 + .param3 = 1,
90219 +};
90220 +struct size_overflow_hash _001703_hash = {
90221 + .next = NULL,
90222 + .name = "qib_get_base_info",
90223 + .file = "drivers/infiniband/hw/qib/qib_file_ops.c",
90224 + .param3 = 1,
90225 +};
90226 +struct size_overflow_hash _001704_hash = {
90227 + .next = NULL,
90228 + .name = "qsfp_1_read",
90229 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90230 + .param3 = 1,
90231 +};
90232 +struct size_overflow_hash _001705_hash = {
90233 + .next = NULL,
90234 + .name = "qsfp_2_read",
90235 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
90236 + .param3 = 1,
90237 +};
90238 +struct size_overflow_hash _001706_hash = {
90239 + .next = NULL,
90240 + .name = "rfc4106_set_key",
90241 + .file = "arch/x86/crypto/aesni-intel_glue.c",
90242 + .param3 = 1,
90243 +};
90244 +struct size_overflow_hash _001707_hash = {
90245 + .next = &_000258_hash,
90246 + .name = "stats_read_ul",
90247 + .file = "drivers/idle/i7300_idle.c",
90248 + .param3 = 1,
90249 +};
90250 +struct size_overflow_hash _001708_hash = {
90251 + .next = NULL,
90252 + .name = "xpc_kmalloc_cacheline_aligned",
90253 + .file = "drivers/misc/sgi-xp/xpc_partition.c",
90254 + .param1 = 1,
90255 +};
90256 +struct size_overflow_hash _001709_hash = {
90257 + .next = NULL,
90258 + .name = "xpc_kzalloc_cacheline_aligned",
90259 + .file = "drivers/misc/sgi-xp/xpc_main.c",
90260 + .param1 = 1,
90261 +};
90262 +struct size_overflow_hash _001710_hash = {
90263 + .next = NULL,
90264 + .name = "c4iw_init_resource",
90265 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
90266 + .param2 = 1,
90267 + .param3 = 1,
90268 +};
90269 +struct size_overflow_hash _001712_hash = {
90270 + .next = NULL,
90271 + .name = "cxio_hal_init_resource",
90272 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
90273 + .param2 = 1,
90274 + .param7 = 1,
90275 + .param6 = 1,
90276 +};
90277 +struct size_overflow_hash _001715_hash = {
90278 + .next = &_000734_hash,
90279 + .name = "cxio_hal_init_rhdl_resource",
90280 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
90281 + .param1 = 1,
90282 +};
90283 +struct size_overflow_hash _001716_hash = {
90284 + .next = NULL,
90285 + .name = "amthi_read",
90286 + .file = "drivers/staging/mei/iorw.c",
90287 + .param4 = 1,
90288 +};
90289 +struct size_overflow_hash _001717_hash = {
90290 + .next = NULL,
90291 + .name = "bcm_char_read",
90292 + .file = "drivers/staging/bcm/Bcmchar.c",
90293 + .param3 = 1,
90294 +};
90295 +struct size_overflow_hash _001718_hash = {
90296 + .next = NULL,
90297 + .name = "BcmCopySection",
90298 + .file = "drivers/staging/bcm/nvm.c",
90299 + .param5 = 1,
90300 +};
90301 +struct size_overflow_hash _001719_hash = {
90302 + .next = NULL,
90303 + .name = "buffer_from_user",
90304 + .file = "drivers/staging/vme/devices/vme_user.c",
90305 + .param3 = 1,
90306 +};
90307 +struct size_overflow_hash _001720_hash = {
90308 + .next = NULL,
90309 + .name = "buffer_to_user",
90310 + .file = "drivers/staging/vme/devices/vme_user.c",
90311 + .param3 = 1,
90312 +};
90313 +struct size_overflow_hash _001721_hash = {
90314 + .next = NULL,
90315 + .name = "capabilities_read",
90316 + .file = "drivers/xen/xenfs/super.c",
90317 + .param3 = 1,
90318 +};
90319 +struct size_overflow_hash _001722_hash = {
90320 + .next = NULL,
90321 + .name = "chd_dec_fetch_cdata",
90322 + .file = "drivers/staging/crystalhd/crystalhd_lnx.c",
90323 + .param3 = 1,
90324 +};
90325 +struct size_overflow_hash _001723_hash = {
90326 + .next = NULL,
90327 + .name = "create_bounce_buffer",
90328 + .file = "drivers/staging/hv/storvsc_drv.c",
90329 + .param3 = 1,
90330 +};
90331 +struct size_overflow_hash _001724_hash = {
90332 + .next = NULL,
90333 + .name = "crystalhd_create_dio_pool",
90334 + .file = "drivers/staging/crystalhd/crystalhd_misc.c",
90335 + .param2 = 1,
90336 +};
90337 +struct size_overflow_hash _001725_hash = {
90338 + .next = NULL,
90339 + .name = "do_read_log_to_user",
90340 + .file = "drivers/staging/android/logger.c",
90341 + .param4 = 1,
90342 +};
90343 +struct size_overflow_hash _001726_hash = {
90344 + .next = NULL,
90345 + .name = "do_write_log_from_user",
90346 + .file = "drivers/staging/android/logger.c",
90347 + .param3 = 1,
90348 +};
90349 +struct size_overflow_hash _001727_hash = {
90350 + .next = NULL,
90351 + .name = "dt3155_read",
90352 + .file = "drivers/staging/media/dt3155v4l/dt3155v4l.c",
90353 + .param3 = 1,
90354 +};
90355 +struct size_overflow_hash _001728_hash = {
90356 + .next = NULL,
90357 + .name = "easycap_alsa_vmalloc",
90358 + .file = "drivers/staging/media/easycap/easycap_sound.c",
90359 + .param2 = 1,
90360 +};
90361 +struct size_overflow_hash _001729_hash = {
90362 + .next = NULL,
90363 + .name = "evm_read_key",
90364 + .file = "security/integrity/evm/evm_secfs.c",
90365 + .param3 = 1,
90366 +};
90367 +struct size_overflow_hash _001730_hash = {
90368 + .next = NULL,
90369 + .name = "evm_write_key",
90370 + .file = "security/integrity/evm/evm_secfs.c",
90371 + .param3 = 1,
90372 +};
90373 +struct size_overflow_hash _001731_hash = {
90374 + .next = NULL,
90375 + .name = "evtchn_read",
90376 + .file = "drivers/xen/evtchn.c",
90377 + .param3 = 1,
90378 +};
90379 +struct size_overflow_hash _001732_hash = {
90380 + .next = NULL,
90381 + .name = "gather_array",
90382 + .file = "drivers/xen/privcmd.c",
90383 + .param3 = 1,
90384 +};
90385 +struct size_overflow_hash _001733_hash = {
90386 + .next = NULL,
90387 + .name = "gnttab_map",
90388 + .file = "drivers/xen/grant-table.c",
90389 + .param2 = 1,
90390 +};
90391 +struct size_overflow_hash _001734_hash = {
90392 + .next = NULL,
90393 + .name = "iio_read_first_n_kfifo",
90394 + .file = "drivers/staging/iio/kfifo_buf.c",
90395 + .param2 = 1,
90396 +};
90397 +struct size_overflow_hash _001735_hash = {
90398 + .next = NULL,
90399 + .name = "iio_read_first_n_sw_rb",
90400 + .file = "drivers/staging/iio/ring_sw.c",
90401 + .param2 = 1,
90402 +};
90403 +struct size_overflow_hash _001736_hash = {
90404 + .next = NULL,
90405 + .name = "keymap_store",
90406 + .file = "drivers/staging/speakup/kobjects.c",
90407 + .param4 = 1,
90408 +};
90409 +struct size_overflow_hash _001737_hash = {
90410 + .next = NULL,
90411 + .name = "line6_dumpreq_initbuf",
90412 + .file = "drivers/staging/line6/dumprequest.c",
90413 + .param3 = 1,
90414 +};
90415 +struct size_overflow_hash _001738_hash = {
90416 + .next = NULL,
90417 + .name = "lirc_write",
90418 + .file = "drivers/staging/media/lirc/lirc_parallel.c",
90419 + .param3 = 1,
90420 +};
90421 +struct size_overflow_hash _001739_hash = {
90422 + .next = NULL,
90423 + .name = "lirc_write",
90424 + .file = "drivers/staging/media/lirc/lirc_sir.c",
90425 + .param3 = 1,
90426 +};
90427 +struct size_overflow_hash _001740_hash = {
90428 + .next = &_000815_hash,
90429 + .name = "lirc_write",
90430 + .file = "drivers/staging/media/lirc/lirc_serial.c",
90431 + .param3 = 1,
90432 +};
90433 +struct size_overflow_hash _001741_hash = {
90434 + .next = &_001021_hash,
90435 + .name = "_malloc",
90436 + .file = "drivers/staging/rtl8712/osdep_service.h",
90437 + .param1 = 1,
90438 +};
90439 +struct size_overflow_hash _001742_hash = {
90440 + .next = NULL,
90441 + .name = "mei_read",
90442 + .file = "drivers/staging/mei/main.c",
90443 + .param3 = 1,
90444 +};
90445 +struct size_overflow_hash _001743_hash = {
90446 + .next = NULL,
90447 + .name = "mei_write",
90448 + .file = "drivers/staging/mei/main.c",
90449 + .param3 = 1,
90450 +};
90451 +struct size_overflow_hash _001744_hash = {
90452 + .next = NULL,
90453 + .name = "msg_set",
90454 + .file = "drivers/staging/speakup/i18n.c",
90455 + .param3 = 1,
90456 +};
90457 +struct size_overflow_hash _001745_hash = {
90458 + .next = NULL,
90459 + .name = "OS_kmalloc",
90460 + .file = "drivers/staging/cxt1e1/sbecom_inline_linux.h",
90461 + .param1 = 1,
90462 +};
90463 +struct size_overflow_hash _001746_hash = {
90464 + .next = NULL,
90465 + .name = "queue_reply",
90466 + .file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
90467 + .param3 = 1,
90468 +};
90469 +struct size_overflow_hash _001747_hash = {
90470 + .next = &_000841_hash,
90471 + .name = "resource_from_user",
90472 + .file = "drivers/staging/vme/devices/vme_user.c",
90473 + .param3 = 1,
90474 +};
90475 +struct size_overflow_hash _001748_hash = {
90476 + .next = NULL,
90477 + .name = "sca3000_read_first_n_hw_rb",
90478 + .file = "drivers/staging/iio/accel/sca3000_ring.c",
90479 + .param2 = 1,
90480 +};
90481 +struct size_overflow_hash _001749_hash = {
90482 + .next = NULL,
90483 + .name = "sep_lock_user_pages",
90484 + .file = "drivers/staging/sep/sep_driver.c",
90485 + .param2 = 1,
90486 + .param3 = 1,
90487 +};
90488 +struct size_overflow_hash _001751_hash = {
90489 + .next = NULL,
90490 + .name = "sep_prepare_input_output_dma_table_in_dcb",
90491 + .file = "drivers/staging/sep/sep_driver.c",
90492 + .param4 = 1,
90493 + .param5 = 1,
90494 + .param2 = 1,
90495 + .param3 = 1,
90496 +};
90497 +struct size_overflow_hash _001753_hash = {
90498 + .next = NULL,
90499 + .name = "split",
90500 + .file = "drivers/xen/xenbus/xenbus_xs.c",
90501 + .param2 = 1,
90502 +};
90503 +struct size_overflow_hash _001754_hash = {
90504 + .next = NULL,
90505 + .name = "storvsc_connect_to_vsp",
90506 + .file = "drivers/staging/hv/storvsc_drv.c",
90507 + .param2 = 1,
90508 +};
90509 +struct size_overflow_hash _001755_hash = {
90510 + .next = NULL,
90511 + .name = "u32_array_read",
90512 + .file = "arch/x86/xen/debugfs.c",
90513 + .param3 = 1,
90514 +};
90515 +struct size_overflow_hash _001756_hash = {
90516 + .next = NULL,
90517 + .name = "ValidateDSDParamsChecksum",
90518 + .file = "drivers/staging/bcm/led_control.c",
90519 + .param3 = 1,
90520 +};
90521 +struct size_overflow_hash _001757_hash = {
90522 + .next = NULL,
90523 + .name = "vfd_write",
90524 + .file = "drivers/staging/media/lirc/lirc_sasem.c",
90525 + .param3 = 1,
90526 +};
90527 +struct size_overflow_hash _001758_hash = {
90528 + .next = NULL,
90529 + .name = "vfd_write",
90530 + .file = "drivers/staging/media/lirc/lirc_imon.c",
90531 + .param3 = 1,
90532 +};
90533 +struct size_overflow_hash _001759_hash = {
90534 + .next = NULL,
90535 + .name = "Wb35Reg_BurstWrite",
90536 + .file = "drivers/staging/winbond/wb35reg.c",
90537 + .param4 = 1,
90538 +};
90539 +struct size_overflow_hash _001760_hash = {
90540 + .next = NULL,
90541 + .name = "xenbus_file_write",
90542 + .file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
90543 + .param3 = 1,
90544 +};
90545 +struct size_overflow_hash _001761_hash = {
90546 + .next = NULL,
90547 + .name = "xsd_read",
90548 + .file = "drivers/xen/xenfs/xenstored.c",
90549 + .param3 = 1,
90550 +};
90551 +struct size_overflow_hash _001762_hash = {
90552 + .next = NULL,
90553 + .name = "line6_dumpreq_init",
90554 + .file = "drivers/staging/line6/dumprequest.c",
90555 + .param3 = 1,
90556 +};
90557 +struct size_overflow_hash _001763_hash = {
90558 + .next = NULL,
90559 + .name = "r8712_usbctrl_vendorreq",
90560 + .file = "drivers/staging/rtl8712/usb_ops_linux.c",
90561 + .param6 = 1,
90562 +};
90563 +struct size_overflow_hash _001764_hash = {
90564 + .next = NULL,
90565 + .name = "r871x_set_wpa_ie",
90566 + .file = "drivers/staging/rtl8712/rtl871x_ioctl_linux.c",
90567 + .param3 = 1,
90568 +};
90569 +struct size_overflow_hash _001765_hash = {
90570 + .next = NULL,
90571 + .name = "sep_prepare_input_dma_table",
90572 + .file = "drivers/staging/sep/sep_driver.c",
90573 + .param2 = 1,
90574 + .param3 = 1,
90575 +};
90576 +struct size_overflow_hash _001767_hash = {
90577 + .next = NULL,
90578 + .name = "sep_prepare_input_output_dma_table",
90579 + .file = "drivers/staging/sep/sep_driver.c",
90580 + .param2 = 1,
90581 + .param4 = 1,
90582 + .param3 = 1,
90583 +};
90584 +struct size_overflow_hash _001770_hash = {
90585 + .next = NULL,
90586 + .name = "vme_user_write",
90587 + .file = "drivers/staging/vme/devices/vme_user.c",
90588 + .param3 = 1,
90589 +};
90590 +struct size_overflow_hash _001771_hash = {
90591 + .next = NULL,
90592 + .name = "alloc_ebda_hpc",
90593 + .file = "drivers/pci/hotplug/ibmphp_ebda.c",
90594 + .param1 = 1,
90595 + .param2 = 1,
90596 +};
90597 +struct size_overflow_hash _001772_hash = {
90598 + .next = NULL,
90599 + .name = "add_uuid",
90600 + .file = "net/bluetooth/mgmt.c",
90601 + .param4 = 1,
90602 +};
90603 +struct size_overflow_hash _001773_hash = {
90604 + .next = NULL,
90605 + .name = "__alloc_extent_buffer",
90606 + .file = "fs/btrfs/extent_io.c",
90607 + .param3 = 1,
90608 +};
90609 +struct size_overflow_hash _001774_hash = {
90610 + .next = NULL,
90611 + .name = "array_zalloc",
90612 + .file = "drivers/target/target_core_tpg.c",
90613 + .param2 = 1,
90614 +};
90615 +struct size_overflow_hash _001775_hash = {
90616 + .next = NULL,
90617 + .name = "ath6kl_fwlog_block_read",
90618 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
90619 + .param3 = 1,
90620 +};
90621 +struct size_overflow_hash _001776_hash = {
90622 + .next = NULL,
90623 + .name = "ath6kl_listen_int_read",
90624 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
90625 + .param3 = 1,
90626 +};
90627 +struct size_overflow_hash _001777_hash = {
90628 + .next = NULL,
90629 + .name = "ath6kl_mgmt_powersave_ap",
90630 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
90631 + .param6 = 1,
90632 +};
90633 +struct size_overflow_hash _001778_hash = {
90634 + .next = NULL,
90635 + .name = "__ath6kl_wmi_send_mgmt_cmd",
90636 + .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
90637 + .param7 = 1,
90638 +};
90639 +struct size_overflow_hash _001779_hash = {
90640 + .next = NULL,
90641 + .name = "cld_pipe_downcall",
90642 + .file = "fs/nfsd/nfs4recover.c",
90643 + .param3 = 1,
90644 +};
90645 +struct size_overflow_hash _001780_hash = {
90646 + .next = NULL,
90647 + .name = "create_bounce_buffer",
90648 + .file = "drivers/scsi/storvsc_drv.c",
90649 + .param3 = 1,
90650 +};
90651 +struct size_overflow_hash _001781_hash = {
90652 + .next = NULL,
90653 + .name = "dwc3_link_state_write",
90654 + .file = "drivers/usb/dwc3/debugfs.c",
90655 + .param3 = 1,
90656 +};
90657 +struct size_overflow_hash _001782_hash = {
90658 + .next = NULL,
90659 + .name = "dwc3_testmode_write",
90660 + .file = "drivers/usb/dwc3/debugfs.c",
90661 + .param3 = 1,
90662 +};
90663 +struct size_overflow_hash _001783_hash = {
90664 + .next = NULL,
90665 + .name = "dynamic_ps_timeout_read",
90666 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90667 + .param3 = 1,
90668 +};
90669 +struct size_overflow_hash _001784_hash = {
90670 + .next = NULL,
90671 + .name = "forced_ps_read",
90672 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90673 + .param3 = 1,
90674 +};
90675 +struct size_overflow_hash _001785_hash = {
90676 + .next = NULL,
90677 + .name = "idmap_pipe_downcall",
90678 + .file = "fs/nfs/idmap.c",
90679 + .param3 = 1,
90680 +};
90681 +struct size_overflow_hash _001786_hash = {
90682 + .next = NULL,
90683 + .name = "ieee80211_if_read_rc_rateidx_mcs_mask_2ghz",
90684 + .file = "net/mac80211/debugfs_netdev.c",
90685 + .param3 = 1,
90686 +};
90687 +struct size_overflow_hash _001787_hash = {
90688 + .next = NULL,
90689 + .name = "ieee80211_if_read_rc_rateidx_mcs_mask_5ghz",
90690 + .file = "net/mac80211/debugfs_netdev.c",
90691 + .param3 = 1,
90692 +};
90693 +struct size_overflow_hash _001788_hash = {
90694 + .next = NULL,
90695 + .name = "ieee80211_if_read_rssi_threshold",
90696 + .file = "net/mac80211/debugfs_netdev.c",
90697 + .param3 = 1,
90698 +};
90699 +struct size_overflow_hash _001789_hash = {
90700 + .next = NULL,
90701 + .name = "ieee80211_if_read_uapsd_max_sp_len",
90702 + .file = "net/mac80211/debugfs_netdev.c",
90703 + .param3 = 1,
90704 +};
90705 +struct size_overflow_hash _001790_hash = {
90706 + .next = NULL,
90707 + .name = "ieee80211_if_read_uapsd_queues",
90708 + .file = "net/mac80211/debugfs_netdev.c",
90709 + .param3 = 1,
90710 +};
90711 +struct size_overflow_hash _001791_hash = {
90712 + .next = NULL,
90713 + .name = "irq_domain_add_linear",
90714 + .file = "include/linux/irqdomain.h",
90715 + .param2 = 1,
90716 +};
90717 +struct size_overflow_hash _001792_hash = {
90718 + .next = NULL,
90719 + .name = "kmalloc_array",
90720 + .file = "include/linux/slab.h",
90721 + .param1 = 1,
90722 + .param2 = 1,
90723 +};
90724 +struct size_overflow_hash _001794_hash = {
90725 + .next = NULL,
90726 + .name = "nfc_llcp_send_i_frame",
90727 + .file = "net/nfc/llcp/commands.c",
90728 + .param3 = 1,
90729 +};
90730 +struct size_overflow_hash _001797_hash = {
90731 + .next = NULL,
90732 + .name = "pn533_dep_link_up",
90733 + .file = "drivers/nfc/pn533.c",
90734 + .param5 = 1,
90735 +};
90736 +struct size_overflow_hash _001798_hash = {
90737 + .next = NULL,
90738 + .name = "port_show_regs",
90739 + .file = "drivers/tty/serial/pch_uart.c",
90740 + .param3 = 1,
90741 +};
90742 +struct size_overflow_hash _001799_hash = {
90743 + .next = NULL,
90744 + .name = "qla4xxx_alloc_work",
90745 + .file = "drivers/scsi/qla4xxx/ql4_os.c",
90746 + .param2 = 1,
90747 +};
90748 +struct size_overflow_hash _001800_hash = {
90749 + .next = NULL,
90750 + .name = "rbd_add",
90751 + .file = "drivers/block/rbd.c",
90752 + .param3 = 1,
90753 +};
90754 +struct size_overflow_hash _001801_hash = {
90755 + .next = NULL,
90756 + .name = "read_file_reset",
90757 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
90758 + .param3 = 1,
90759 +};
90760 +struct size_overflow_hash _001802_hash = {
90761 + .next = NULL,
90762 + .name = "regmap_bulk_write",
90763 + .file = "include/linux/regmap.h",
90764 + .param4 = 1,
90765 +};
90766 +struct size_overflow_hash _001803_hash = {
90767 + .next = NULL,
90768 + .name = "regmap_name_read_file",
90769 + .file = "drivers/base/regmap/regmap-debugfs.c",
90770 + .param3 = 1,
90771 +};
90772 +struct size_overflow_hash _001804_hash = {
90773 + .next = NULL,
90774 + .name = "reiserfs_allocate_list_bitmaps",
90775 + .file = "fs/reiserfs/journal.c",
90776 + .param3 = 1,
90777 +};
90778 +struct size_overflow_hash _001805_hash = {
90779 + .next = NULL,
90780 + .name = "reiserfs_resize",
90781 + .file = "fs/reiserfs/resize.c",
90782 + .param2 = 1,
90783 +};
90784 +struct size_overflow_hash _001806_hash = {
90785 + .next = NULL,
90786 + .name = "remove_uuid",
90787 + .file = "net/bluetooth/mgmt.c",
90788 + .param4 = 1,
90789 +};
90790 +struct size_overflow_hash _001807_hash = {
90791 + .next = NULL,
90792 + .name = "set_dev_class",
90793 + .file = "net/bluetooth/mgmt.c",
90794 + .param4 = 1,
90795 +};
90796 +struct size_overflow_hash _001808_hash = {
90797 + .next = NULL,
90798 + .name = "set_le",
90799 + .file = "net/bluetooth/mgmt.c",
90800 + .param4 = 1,
90801 +};
90802 +struct size_overflow_hash _001809_hash = {
90803 + .next = NULL,
90804 + .name = "set_link_security",
90805 + .file = "net/bluetooth/mgmt.c",
90806 + .param4 = 1,
90807 +};
90808 +struct size_overflow_hash _001810_hash = {
90809 + .next = NULL,
90810 + .name = "set_ssp",
90811 + .file = "net/bluetooth/mgmt.c",
90812 + .param4 = 1,
90813 +};
90814 +struct size_overflow_hash _001811_hash = {
90815 + .next = NULL,
90816 + .name = "shmem_setxattr",
90817 + .file = "mm/shmem.c",
90818 + .param4 = 1,
90819 +};
90820 +struct size_overflow_hash _001812_hash = {
90821 + .next = NULL,
90822 + .name = "shmem_xattr_alloc",
90823 + .file = "mm/shmem.c",
90824 + .param2 = 1,
90825 +};
90826 +struct size_overflow_hash _001813_hash = {
90827 + .next = NULL,
90828 + .name = "split_scan_timeout_read",
90829 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90830 + .param3 = 1,
90831 +};
90832 +struct size_overflow_hash _001814_hash = {
90833 + .next = NULL,
90834 + .name = "storvsc_connect_to_vsp",
90835 + .file = "drivers/scsi/storvsc_drv.c",
90836 + .param2 = 1,
90837 +};
90838 +struct size_overflow_hash _001815_hash = {
90839 + .next = NULL,
90840 + .name = "suspend_dtim_interval_read",
90841 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90842 + .param3 = 1,
90843 +};
90844 +struct size_overflow_hash _001816_hash = {
90845 + .next = NULL,
90846 + .name = "alloc_extent_buffer",
90847 + .file = "fs/btrfs/extent_io.c",
90848 + .param3 = 1,
90849 +};
90850 +struct size_overflow_hash _001817_hash = {
90851 + .next = NULL,
90852 + .name = "nfs_idmap_get_key",
90853 + .file = "fs/nfs/idmap.c",
90854 + .param2 = 1,
90855 +};
90856 +struct size_overflow_hash _001818_hash = {
90857 + .next = NULL,
90858 + .name = "iio_debugfs_read_reg",
90859 + .file = "drivers/staging/iio/industrialio-core.c",
90860 + .param3 = 1,
90861 +};
90862 +struct size_overflow_hash _001819_hash = {
90863 + .next = NULL,
90864 + .name = "iio_debugfs_write_reg",
90865 + .file = "drivers/staging/iio/industrialio-core.c",
90866 + .param3 = 1,
90867 +};
90868 +struct size_overflow_hash _001820_hash = {
90869 + .next = NULL,
90870 + .name = "iio_event_chrdev_read",
90871 + .file = "drivers/staging/iio/industrialio-event.c",
90872 + .param3 = 1,
90873 +};
90874 +struct size_overflow_hash _001821_hash = {
90875 + .next = NULL,
90876 + .name = "sep_create_dcb_dmatables_context",
90877 + .file = "drivers/staging/sep/sep_main.c",
90878 + .param6 = 1,
90879 +};
90880 +struct size_overflow_hash _001822_hash = {
90881 + .next = NULL,
90882 + .name = "sep_create_dcb_dmatables_context_kernel",
90883 + .file = "drivers/staging/sep/sep_main.c",
90884 + .param6 = 1,
90885 +};
90886 +struct size_overflow_hash _001823_hash = {
90887 + .next = NULL,
90888 + .name = "sep_create_msgarea_context",
90889 + .file = "drivers/staging/sep/sep_main.c",
90890 + .param4 = 1,
90891 +};
90892 +struct size_overflow_hash _001824_hash = {
90893 + .next = NULL,
90894 + .name = "sep_lli_table_secure_dma",
90895 + .file = "drivers/staging/sep/sep_main.c",
90896 + .param2 = 1,
90897 + .param3 = 1,
90898 +};
90899 +struct size_overflow_hash _001826_hash = {
90900 + .next = NULL,
90901 + .name = "sep_lock_user_pages",
90902 + .file = "drivers/staging/sep/sep_main.c",
90903 + .param2 = 1,
90904 + .param3 = 1,
90905 +};
90906 +struct size_overflow_hash _001828_hash = {
90907 + .next = NULL,
90908 + .name = "sep_prepare_input_output_dma_table_in_dcb",
90909 + .file = "drivers/staging/sep/sep_main.c",
90910 + .param4 = 1,
90911 + .param5 = 1,
90912 +};
90913 +struct size_overflow_hash _001830_hash = {
90914 + .next = NULL,
90915 + .name = "sep_read",
90916 + .file = "drivers/staging/sep/sep_main.c",
90917 + .param3 = 1,
90918 +};
90919 +struct size_overflow_hash _001831_hash = {
90920 + .next = NULL,
90921 + .name = "alloc_rx_desc_ring",
90922 + .file = "drivers/staging/rtl8187se/r8180_core.c",
90923 + .param2 = 1,
90924 +};
90925 +struct size_overflow_hash _001832_hash = {
90926 + .next = NULL,
90927 + .name = "alloc_subdevices",
90928 + .file = "drivers/staging/comedi/drivers/../comedidev.h",
90929 + .param2 = 1,
90930 +};
90931 +struct size_overflow_hash _001833_hash = {
90932 + .next = NULL,
90933 + .name = "alloc_subdevices",
90934 + .file = "drivers/staging/comedi/drivers/addi-data/../../comedidev.h",
90935 + .param2 = 1,
90936 +};
90937 +struct size_overflow_hash _001834_hash = {
90938 + .next = NULL,
90939 + .name = "comedi_read",
90940 + .file = "drivers/staging/comedi/comedi_fops.c",
90941 + .param3 = 1,
90942 +};
90943 +struct size_overflow_hash _001835_hash = {
90944 + .next = NULL,
90945 + .name = "comedi_write",
90946 + .file = "drivers/staging/comedi/comedi_fops.c",
90947 + .param3 = 1,
90948 +};
90949 +struct size_overflow_hash _001836_hash = {
90950 + .next = NULL,
90951 + .name = "compat_sys_preadv64",
90952 + .file = "fs/compat.c",
90953 + .param3 = 1,
90954 +};
90955 +struct size_overflow_hash _001837_hash = {
90956 + .next = NULL,
90957 + .name = "compat_sys_pwritev64",
90958 + .file = "fs/compat.c",
90959 + .param3 = 1,
90960 +};
90961 +struct size_overflow_hash _001838_hash = {
90962 + .next = NULL,
90963 + .name = "ext_sd_execute_read_data",
90964 + .file = "drivers/staging/rts5139/sd_cprm.c",
90965 + .param9 = 1,
90966 +};
90967 +struct size_overflow_hash _001839_hash = {
90968 + .next = NULL,
90969 + .name = "ext_sd_execute_write_data",
90970 + .file = "drivers/staging/rts5139/sd_cprm.c",
90971 + .param9 = 1,
90972 +};
90973 +struct size_overflow_hash _001840_hash = {
90974 + .next = NULL,
90975 + .name = "ieee80211_wx_set_gen_ie",
90976 + .file = "drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c",
90977 + .param3 = 1,
90978 +};
90979 +struct size_overflow_hash _001841_hash = {
90980 + .next = NULL,
90981 + .name = "ieee80211_wx_set_gen_ie_rsl",
90982 + .file = "drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c",
90983 + .param3 = 1,
90984 +};
90985 +struct size_overflow_hash _001842_hash = {
90986 + .next = NULL,
90987 + .name = "ni_gpct_device_construct",
90988 + .file = "drivers/staging/comedi/drivers/ni_tio.c",
90989 + .param5 = 1,
90990 +};
90991 +struct size_overflow_hash _001843_hash = {
90992 + .next = NULL,
90993 + .name = "Realloc",
90994 + .file = "drivers/staging/comedi/drivers/comedi_bond.c",
90995 + .param2 = 1,
90996 +};
90997 +struct size_overflow_hash _001844_hash = {
90998 + .next = NULL,
90999 + .name = "rtllib_wx_set_gen_ie",
91000 + .file = "drivers/staging/rtl8192e/rtllib_wx.c",
91001 + .param3 = 1,
91002 +};
91003 +struct size_overflow_hash _001845_hash = {
91004 + .next = NULL,
91005 + .name = "rts51x_transfer_data_partial",
91006 + .file = "drivers/staging/rts5139/rts51x_transport.c",
91007 + .param6 = 1,
91008 +};
91009 +struct size_overflow_hash _001846_hash = {
91010 + .next = NULL,
91011 + .name = "store_debug_level",
91012 + .file = "drivers/staging/rtl8192u/ieee80211/ieee80211_module.c",
91013 + .param3 = 1,
91014 +};
91015 +struct size_overflow_hash _001847_hash = {
91016 + .next = NULL,
91017 + .name = "usb_buffer_alloc",
91018 + .file = "drivers/staging/rts5139/rts51x.h",
91019 + .param2 = 1,
91020 +};
91021 +struct size_overflow_hash _001848_hash = {
91022 + .next = NULL,
91023 + .name = "alloc_apertures",
91024 + .file = "include/linux/fb.h",
91025 + .param1 = 1,
91026 +};
91027 +struct size_overflow_hash _001849_hash = {
91028 + .next = NULL,
91029 + .name = "bin_uuid",
91030 + .file = "kernel/sysctl_binary.c",
91031 + .param3 = 1,
91032 +};
91033 +struct size_overflow_hash _001850_hash = {
91034 + .next = &_000640_hash,
91035 + .name = "__copy_from_user_inatomic_nocache",
91036 + .file = "arch/x86/include/asm/uaccess_64.h",
91037 + .param3 = 1,
91038 +};
91039 +struct size_overflow_hash _001851_hash = {
91040 + .next = NULL,
91041 + .name = "do_dmabuf_dirty_sou",
91042 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
91043 + .param7 = 1,
91044 +};
91045 +struct size_overflow_hash _001852_hash = {
91046 + .next = NULL,
91047 + .name = "do_surface_dirty_sou",
91048 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
91049 + .param7 = 1,
91050 +};
91051 +struct size_overflow_hash _001853_hash = {
91052 + .next = NULL,
91053 + .name = "drm_agp_bind_pages",
91054 + .file = "drivers/gpu/drm/drm_agpsupport.c",
91055 + .param3 = 1,
91056 +};
91057 +struct size_overflow_hash _001854_hash = {
91058 + .next = NULL,
91059 + .name = "drm_calloc_large",
91060 + .file = "include/drm/drm_mem_util.h",
91061 + .param1 = 1,
91062 + .param2 = 1,
91063 +};
91064 +struct size_overflow_hash _001856_hash = {
91065 + .next = NULL,
91066 + .name = "drm_ht_create",
91067 + .file = "drivers/gpu/drm/drm_hashtab.c",
91068 + .param2 = 1,
91069 +};
91070 +struct size_overflow_hash _001857_hash = {
91071 + .next = NULL,
91072 + .name = "drm_malloc_ab",
91073 + .file = "include/drm/drm_mem_util.h",
91074 + .param1 = 1,
91075 + .param2 = 1,
91076 +};
91077 +struct size_overflow_hash _001859_hash = {
91078 + .next = NULL,
91079 + .name = "drm_plane_init",
91080 + .file = "drivers/gpu/drm/drm_crtc.c",
91081 + .param6 = 1,
91082 +};
91083 +struct size_overflow_hash _001860_hash = {
91084 + .next = NULL,
91085 + .name = "drm_vmalloc_dma",
91086 + .file = "drivers/gpu/drm/drm_scatter.c",
91087 + .param1 = 1,
91088 +};
91089 +struct size_overflow_hash _001861_hash = {
91090 + .next = NULL,
91091 + .name = "fb_read",
91092 + .file = "drivers/video/fbmem.c",
91093 + .param3 = 1,
91094 +};
91095 +struct size_overflow_hash _001862_hash = {
91096 + .next = NULL,
91097 + .name = "fb_write",
91098 + .file = "drivers/video/fbmem.c",
91099 + .param3 = 1,
91100 +};
91101 +struct size_overflow_hash _001863_hash = {
91102 + .next = NULL,
91103 + .name = "framebuffer_alloc",
91104 + .file = "include/linux/fb.h",
91105 + .param1 = 1,
91106 +};
91107 +struct size_overflow_hash _001864_hash = {
91108 + .next = NULL,
91109 + .name = "i915_cache_sharing_read",
91110 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91111 + .param3 = 1,
91112 +};
91113 +struct size_overflow_hash _001865_hash = {
91114 + .next = NULL,
91115 + .name = "i915_cache_sharing_write",
91116 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91117 + .param3 = 1,
91118 +};
91119 +struct size_overflow_hash _001866_hash = {
91120 + .next = NULL,
91121 + .name = "i915_max_freq_read",
91122 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91123 + .param3 = 1,
91124 +};
91125 +struct size_overflow_hash _001867_hash = {
91126 + .next = NULL,
91127 + .name = "i915_max_freq_write",
91128 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91129 + .param3 = 1,
91130 +};
91131 +struct size_overflow_hash _001868_hash = {
91132 + .next = NULL,
91133 + .name = "i915_wedged_read",
91134 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91135 + .param3 = 1,
91136 +};
91137 +struct size_overflow_hash _001869_hash = {
91138 + .next = NULL,
91139 + .name = "i915_wedged_write",
91140 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
91141 + .param3 = 1,
91142 +};
91143 +struct size_overflow_hash _001870_hash = {
91144 + .next = NULL,
91145 + .name = "__module_alloc",
91146 + .file = "arch/x86/kernel/module.c",
91147 + .param1 = 1,
91148 +};
91149 +struct size_overflow_hash _001871_hash = {
91150 + .next = NULL,
91151 + .name = "module_alloc_update_bounds_rw",
91152 + .file = "kernel/module.c",
91153 + .param1 = 1,
91154 +};
91155 +struct size_overflow_hash _001872_hash = {
91156 + .next = NULL,
91157 + .name = "module_alloc_update_bounds_rx",
91158 + .file = "kernel/module.c",
91159 + .param1 = 1,
91160 +};
91161 +struct size_overflow_hash _001873_hash = {
91162 + .next = NULL,
91163 + .name = "p9_client_read",
91164 + .file = "include/net/9p/client.h",
91165 + .param5 = 1,
91166 +};
91167 +struct size_overflow_hash _001874_hash = {
91168 + .next = NULL,
91169 + .name = "probe_kernel_write",
91170 + .file = "include/linux/uaccess.h",
91171 + .param3 = 1,
91172 +};
91173 +struct size_overflow_hash _001875_hash = {
91174 + .next = NULL,
91175 + .name = "sched_feat_write",
91176 + .file = "kernel/sched/core.c",
91177 + .param3 = 1,
91178 +};
91179 +struct size_overflow_hash _001876_hash = {
91180 + .next = NULL,
91181 + .name = "tstats_write",
91182 + .file = "kernel/time/timer_stats.c",
91183 + .param3 = 1,
91184 +};
91185 +struct size_overflow_hash _001877_hash = {
91186 + .next = NULL,
91187 + .name = "ttm_bo_fbdev_io",
91188 + .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
91189 + .param4 = 1,
91190 +};
91191 +struct size_overflow_hash _001878_hash = {
91192 + .next = NULL,
91193 + .name = "ttm_bo_io",
91194 + .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
91195 + .param5 = 1,
91196 +};
91197 +struct size_overflow_hash _001879_hash = {
91198 + .next = NULL,
91199 + .name = "ttm_dma_page_pool_free",
91200 + .file = "drivers/gpu/drm/ttm/ttm_page_alloc_dma.c",
91201 + .param2 = 1,
91202 +};
91203 +struct size_overflow_hash _001880_hash = {
91204 + .next = NULL,
91205 + .name = "ttm_page_pool_free",
91206 + .file = "drivers/gpu/drm/ttm/ttm_page_alloc.c",
91207 + .param2 = 1,
91208 +};
91209 +struct size_overflow_hash _001881_hash = {
91210 + .next = NULL,
91211 + .name = "vmw_execbuf_process",
91212 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c",
91213 + .param5 = 1,
91214 +};
91215 +struct size_overflow_hash _001882_hash = {
91216 + .next = NULL,
91217 + .name = "vmw_fifo_reserve",
91218 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c",
91219 + .param2 = 1,
91220 +};
91221 +struct size_overflow_hash _001883_hash = {
91222 + .next = NULL,
91223 + .name = "vmw_kms_present",
91224 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
91225 + .param9 = 1,
91226 +};
91227 +struct size_overflow_hash _001884_hash = {
91228 + .next = NULL,
91229 + .name = "vmw_kms_readback",
91230 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
91231 + .param6 = 1,
91232 +};
91233 +struct size_overflow_hash _001885_hash = {
91234 + .next = NULL,
91235 + .name = "__copy_from_user_inatomic_nocache",
91236 + .file = "arch/x86/include/asm/uaccess_32.h",
91237 + .param3 = 1,
91238 +};
91239 +struct size_overflow_hash _001886_hash = {
91240 + .next = NULL,
91241 + .name = "arcfb_write",
91242 + .file = "drivers/video/arcfb.c",
91243 + .param3 = 1,
91244 +};
91245 +struct size_overflow_hash _001887_hash = {
91246 + .next = NULL,
91247 + .name = "ath6kl_usb_submit_ctrl_in",
91248 + .file = "drivers/net/wireless/ath/ath6kl/usb.c",
91249 + .param6 = 1,
91250 +};
91251 +struct size_overflow_hash _001888_hash = {
91252 + .next = NULL,
91253 + .name = "ath6kl_usb_submit_ctrl_out",
91254 + .file = "drivers/net/wireless/ath/ath6kl/usb.c",
91255 + .param6 = 1,
91256 +};
91257 +struct size_overflow_hash _001889_hash = {
91258 + .next = NULL,
91259 + .name = "blk_dropped_read",
91260 + .file = "kernel/trace/blktrace.c",
91261 + .param3 = 1,
91262 +};
91263 +struct size_overflow_hash _001890_hash = {
91264 + .next = NULL,
91265 + .name = "blk_msg_write",
91266 + .file = "kernel/trace/blktrace.c",
91267 + .param3 = 1,
91268 +};
91269 +struct size_overflow_hash _001891_hash = {
91270 + .next = NULL,
91271 + .name = "broadsheetfb_write",
91272 + .file = "drivers/video/broadsheetfb.c",
91273 + .param3 = 1,
91274 +};
91275 +struct size_overflow_hash _001892_hash = {
91276 + .next = NULL,
91277 + .name = "cyttsp_probe",
91278 + .file = "drivers/input/touchscreen/cyttsp_core.c",
91279 + .param4 = 1,
91280 +};
91281 +struct size_overflow_hash _001893_hash = {
91282 + .next = NULL,
91283 + .name = "da9052_group_write",
91284 + .file = "include/linux/mfd/da9052/da9052.h",
91285 + .param3 = 1,
91286 +};
91287 +struct size_overflow_hash _001894_hash = {
91288 + .next = NULL,
91289 + .name = "dccpprobe_read",
91290 + .file = "net/dccp/probe.c",
91291 + .param3 = 1,
91292 +};
91293 +struct size_overflow_hash _001895_hash = {
91294 + .next = NULL,
91295 + .name = "__devres_alloc",
91296 + .file = "include/linux/device.h",
91297 + .param2 = 1,
91298 +};
91299 +struct size_overflow_hash _001896_hash = {
91300 + .next = NULL,
91301 + .name = "event_enable_read",
91302 + .file = "kernel/trace/trace_events.c",
91303 + .param3 = 1,
91304 +};
91305 +struct size_overflow_hash _001897_hash = {
91306 + .next = NULL,
91307 + .name = "event_filter_read",
91308 + .file = "kernel/trace/trace_events.c",
91309 + .param3 = 1,
91310 +};
91311 +struct size_overflow_hash _001898_hash = {
91312 + .next = NULL,
91313 + .name = "event_filter_write",
91314 + .file = "kernel/trace/trace_events.c",
91315 + .param3 = 1,
91316 +};
91317 +struct size_overflow_hash _001899_hash = {
91318 + .next = NULL,
91319 + .name = "event_id_read",
91320 + .file = "kernel/trace/trace_events.c",
91321 + .param3 = 1,
91322 +};
91323 +struct size_overflow_hash _001900_hash = {
91324 + .next = NULL,
91325 + .name = "fb_sys_read",
91326 + .file = "include/linux/fb.h",
91327 + .param3 = 1,
91328 +};
91329 +struct size_overflow_hash _001901_hash = {
91330 + .next = NULL,
91331 + .name = "fb_sys_write",
91332 + .file = "include/linux/fb.h",
91333 + .param3 = 1,
91334 +};
91335 +struct size_overflow_hash _001902_hash = {
91336 + .next = NULL,
91337 + .name = "ftrace_pid_write",
91338 + .file = "kernel/trace/ftrace.c",
91339 + .param3 = 1,
91340 +};
91341 +struct size_overflow_hash _001903_hash = {
91342 + .next = NULL,
91343 + .name = "ftrace_profile_read",
91344 + .file = "kernel/trace/ftrace.c",
91345 + .param3 = 1,
91346 +};
91347 +struct size_overflow_hash _001904_hash = {
91348 + .next = NULL,
91349 + .name = "hecubafb_write",
91350 + .file = "drivers/video/hecubafb.c",
91351 + .param3 = 1,
91352 +};
91353 +struct size_overflow_hash _001905_hash = {
91354 + .next = NULL,
91355 + .name = "hsc_msg_alloc",
91356 + .file = "drivers/hsi/clients/hsi_char.c",
91357 + .param1 = 1,
91358 +};
91359 +struct size_overflow_hash _001906_hash = {
91360 + .next = NULL,
91361 + .name = "hsc_write",
91362 + .file = "drivers/hsi/clients/hsi_char.c",
91363 + .param3 = 1,
91364 +};
91365 +struct size_overflow_hash _001907_hash = {
91366 + .next = NULL,
91367 + .name = "hsi_alloc_controller",
91368 + .file = "include/linux/hsi/hsi.h",
91369 + .param1 = 1,
91370 +};
91371 +struct size_overflow_hash _001908_hash = {
91372 + .next = NULL,
91373 + .name = "hsi_register_board_info",
91374 + .file = "include/linux/hsi/hsi.h",
91375 + .param2 = 1,
91376 +};
91377 +struct size_overflow_hash _001909_hash = {
91378 + .next = NULL,
91379 + .name = "ivtvfb_write",
91380 + .file = "drivers/media/video/ivtv/ivtvfb.c",
91381 + .param3 = 1,
91382 +};
91383 +struct size_overflow_hash _001910_hash = {
91384 + .next = NULL,
91385 + .name = "metronomefb_write",
91386 + .file = "drivers/video/metronomefb.c",
91387 + .param3 = 1,
91388 +};
91389 +struct size_overflow_hash _001911_hash = {
91390 + .next = NULL,
91391 + .name = "odev_update",
91392 + .file = "drivers/video/via/viafbdev.c",
91393 + .param2 = 1,
91394 +};
91395 +struct size_overflow_hash _001912_hash = {
91396 + .next = NULL,
91397 + .name = "oz_add_farewell",
91398 + .file = "drivers/staging/ozwpan/ozproto.c",
91399 + .param5 = 1,
91400 +};
91401 +struct size_overflow_hash _001913_hash = {
91402 + .next = NULL,
91403 + .name = "oz_cdev_read",
91404 + .file = "drivers/staging/ozwpan/ozcdev.c",
91405 + .param3 = 1,
91406 +};
91407 +struct size_overflow_hash _001914_hash = {
91408 + .next = NULL,
91409 + .name = "oz_cdev_write",
91410 + .file = "drivers/staging/ozwpan/ozcdev.c",
91411 + .param3 = 1,
91412 +};
91413 +struct size_overflow_hash _001915_hash = {
91414 + .next = NULL,
91415 + .name = "pmcraid_copy_sglist",
91416 + .file = "drivers/scsi/pmcraid.c",
91417 + .param3 = 1,
91418 +};
91419 +struct size_overflow_hash _001916_hash = {
91420 + .next = NULL,
91421 + .name = "probes_write",
91422 + .file = "kernel/trace/trace_kprobe.c",
91423 + .param3 = 1,
91424 +};
91425 +struct size_overflow_hash _001917_hash = {
91426 + .next = NULL,
91427 + .name = "proc_fault_inject_read",
91428 + .file = "fs/proc/base.c",
91429 + .param3 = 1,
91430 +};
91431 +struct size_overflow_hash _001918_hash = {
91432 + .next = NULL,
91433 + .name = "proc_fault_inject_write",
91434 + .file = "fs/proc/base.c",
91435 + .param3 = 1,
91436 +};
91437 +struct size_overflow_hash _001919_hash = {
91438 + .next = NULL,
91439 + .name = "rb_simple_read",
91440 + .file = "kernel/trace/trace.c",
91441 + .param3 = 1,
91442 +};
91443 +struct size_overflow_hash _001920_hash = {
91444 + .next = NULL,
91445 + .name = "read_file_dfs",
91446 + .file = "drivers/net/wireless/ath/ath9k/dfs_debug.c",
91447 + .param3 = 1,
91448 +};
91449 +struct size_overflow_hash _001921_hash = {
91450 + .next = NULL,
91451 + .name = "show_header",
91452 + .file = "kernel/trace/trace_events.c",
91453 + .param3 = 1,
91454 +};
91455 +struct size_overflow_hash _001922_hash = {
91456 + .next = NULL,
91457 + .name = "stack_max_size_read",
91458 + .file = "kernel/trace/trace_stack.c",
91459 + .param3 = 1,
91460 +};
91461 +struct size_overflow_hash _001923_hash = {
91462 + .next = NULL,
91463 + .name = "subsystem_filter_read",
91464 + .file = "kernel/trace/trace_events.c",
91465 + .param3 = 1,
91466 +};
91467 +struct size_overflow_hash _001924_hash = {
91468 + .next = NULL,
91469 + .name = "subsystem_filter_write",
91470 + .file = "kernel/trace/trace_events.c",
91471 + .param3 = 1,
91472 +};
91473 +struct size_overflow_hash _001925_hash = {
91474 + .next = NULL,
91475 + .name = "system_enable_read",
91476 + .file = "kernel/trace/trace_events.c",
91477 + .param3 = 1,
91478 +};
91479 +struct size_overflow_hash _001926_hash = {
91480 + .next = NULL,
91481 + .name = "trace_options_core_read",
91482 + .file = "kernel/trace/trace.c",
91483 + .param3 = 1,
91484 +};
91485 +struct size_overflow_hash _001927_hash = {
91486 + .next = NULL,
91487 + .name = "trace_options_read",
91488 + .file = "kernel/trace/trace.c",
91489 + .param3 = 1,
91490 +};
91491 +struct size_overflow_hash _001928_hash = {
91492 + .next = NULL,
91493 + .name = "trace_seq_to_user",
91494 + .file = "include/linux/trace_seq.h",
91495 + .param3 = 1,
91496 +};
91497 +struct size_overflow_hash _001929_hash = {
91498 + .next = NULL,
91499 + .name = "tracing_buffers_read",
91500 + .file = "kernel/trace/trace.c",
91501 + .param3 = 1,
91502 +};
91503 +struct size_overflow_hash _001930_hash = {
91504 + .next = NULL,
91505 + .name = "tracing_clock_write",
91506 + .file = "kernel/trace/trace.c",
91507 + .param3 = 1,
91508 +};
91509 +struct size_overflow_hash _001931_hash = {
91510 + .next = NULL,
91511 + .name = "tracing_cpumask_read",
91512 + .file = "kernel/trace/trace.c",
91513 + .param3 = 1,
91514 +};
91515 +struct size_overflow_hash _001932_hash = {
91516 + .next = NULL,
91517 + .name = "tracing_ctrl_read",
91518 + .file = "kernel/trace/trace.c",
91519 + .param3 = 1,
91520 +};
91521 +struct size_overflow_hash _001933_hash = {
91522 + .next = NULL,
91523 + .name = "tracing_entries_read",
91524 + .file = "kernel/trace/trace.c",
91525 + .param3 = 1,
91526 +};
91527 +struct size_overflow_hash _001934_hash = {
91528 + .next = NULL,
91529 + .name = "tracing_max_lat_read",
91530 + .file = "kernel/trace/trace.c",
91531 + .param3 = 1,
91532 +};
91533 +struct size_overflow_hash _001935_hash = {
91534 + .next = NULL,
91535 + .name = "tracing_read_dyn_info",
91536 + .file = "kernel/trace/trace.c",
91537 + .param3 = 1,
91538 +};
91539 +struct size_overflow_hash _001936_hash = {
91540 + .next = NULL,
91541 + .name = "tracing_readme_read",
91542 + .file = "kernel/trace/trace.c",
91543 + .param3 = 1,
91544 +};
91545 +struct size_overflow_hash _001937_hash = {
91546 + .next = NULL,
91547 + .name = "tracing_saved_cmdlines_read",
91548 + .file = "kernel/trace/trace.c",
91549 + .param3 = 1,
91550 +};
91551 +struct size_overflow_hash _001938_hash = {
91552 + .next = NULL,
91553 + .name = "tracing_set_trace_read",
91554 + .file = "kernel/trace/trace.c",
91555 + .param3 = 1,
91556 +};
91557 +struct size_overflow_hash _001939_hash = {
91558 + .next = NULL,
91559 + .name = "tracing_set_trace_write",
91560 + .file = "kernel/trace/trace.c",
91561 + .param3 = 1,
91562 +};
91563 +struct size_overflow_hash _001940_hash = {
91564 + .next = NULL,
91565 + .name = "tracing_stats_read",
91566 + .file = "kernel/trace/trace.c",
91567 + .param3 = 1,
91568 +};
91569 +struct size_overflow_hash _001941_hash = {
91570 + .next = NULL,
91571 + .name = "tracing_total_entries_read",
91572 + .file = "kernel/trace/trace.c",
91573 + .param3 = 1,
91574 +};
91575 +struct size_overflow_hash _001942_hash = {
91576 + .next = NULL,
91577 + .name = "tracing_trace_options_write",
91578 + .file = "kernel/trace/trace.c",
91579 + .param3 = 1,
91580 +};
91581 +struct size_overflow_hash _001943_hash = {
91582 + .next = NULL,
91583 + .name = "ufx_alloc_urb_list",
91584 + .file = "drivers/video/smscufx.c",
91585 + .param3 = 1,
91586 +};
91587 +struct size_overflow_hash _001944_hash = {
91588 + .next = NULL,
91589 + .name = "u_memcpya",
91590 + .file = "drivers/gpu/drm/nouveau/nouveau_gem.c",
91591 + .param2 = 1,
91592 + .param3 = 1,
91593 +};
91594 +struct size_overflow_hash _001946_hash = {
91595 + .next = NULL,
91596 + .name = "v9fs_fid_readn",
91597 + .file = "fs/9p/vfs_file.c",
91598 + .param4 = 1,
91599 +};
91600 +struct size_overflow_hash _001947_hash = {
91601 + .next = NULL,
91602 + .name = "v9fs_file_read",
91603 + .file = "fs/9p/vfs_file.c",
91604 + .param3 = 1,
91605 +};
91606 +struct size_overflow_hash _001948_hash = {
91607 + .next = NULL,
91608 + .name = "viafb_dfph_proc_write",
91609 + .file = "drivers/video/via/viafbdev.c",
91610 + .param3 = 1,
91611 +};
91612 +struct size_overflow_hash _001949_hash = {
91613 + .next = NULL,
91614 + .name = "viafb_dfpl_proc_write",
91615 + .file = "drivers/video/via/viafbdev.c",
91616 + .param3 = 1,
91617 +};
91618 +struct size_overflow_hash _001950_hash = {
91619 + .next = NULL,
91620 + .name = "viafb_dvp0_proc_write",
91621 + .file = "drivers/video/via/viafbdev.c",
91622 + .param3 = 1,
91623 +};
91624 +struct size_overflow_hash _001951_hash = {
91625 + .next = NULL,
91626 + .name = "viafb_dvp1_proc_write",
91627 + .file = "drivers/video/via/viafbdev.c",
91628 + .param3 = 1,
91629 +};
91630 +struct size_overflow_hash _001952_hash = {
91631 + .next = NULL,
91632 + .name = "viafb_vt1636_proc_write",
91633 + .file = "drivers/video/via/viafbdev.c",
91634 + .param3 = 1,
91635 +};
91636 +struct size_overflow_hash _001953_hash = {
91637 + .next = NULL,
91638 + .name = "vivi_read",
91639 + .file = "drivers/media/video/vivi.c",
91640 + .param3 = 1,
91641 +};
91642 +struct size_overflow_hash *size_overflow_hash[65536] = {
91643 + [56878] = &_000001_hash,
91644 + [11151] = &_000002_hash,
91645 + [17854] = &_000003_hash,
91646 + [4132] = &_000004_hash,
91647 + [39070] = &_000005_hash,
91648 + [35447] = &_000007_hash,
91649 + [47830] = &_000008_hash,
91650 + [65254] = &_000009_hash,
91651 + [17521] = &_000011_hash,
91652 + [41425] = &_000012_hash,
91653 + [5785] = &_000013_hash,
91654 + [19960] = &_000014_hash,
91655 + [26729] = &_000015_hash,
91656 + [7954] = &_000016_hash,
91657 + [22403] = &_000017_hash,
91658 + [23258] = &_000018_hash,
91659 + [55695] = &_000019_hash,
91660 + [38964] = &_000020_hash,
91661 + [64250] = &_000021_hash,
91662 + [31825] = &_000022_hash,
91663 + [47446] = &_000023_hash,
91664 + [61521] = &_000024_hash,
91665 + [64227] = &_000025_hash,
91666 + [53378] = &_000026_hash,
91667 + [8885] = &_000027_hash,
91668 + [62101] = &_000028_hash,
91669 + [18152] = &_000029_hash,
91670 + [37525] = &_000030_hash,
91671 + [25827] = &_000031_hash,
91672 + [1169] = &_000032_hash,
91673 + [11925] = &_000033_hash,
91674 + [20558] = &_000034_hash,
91675 + [44019] = &_000035_hash,
91676 + [21909] = &_000036_hash,
91677 + [63679] = &_000037_hash,
91678 + [39450] = &_000038_hash,
91679 + [25085] = &_000039_hash,
91680 + [17830] = &_000040_hash,
91681 + [14329] = &_000041_hash,
91682 + [31235] = &_000042_hash,
91683 + [48207] = &_000043_hash,
91684 + [34918] = &_000044_hash,
91685 + [46839] = &_000045_hash,
91686 + [57930] = &_000046_hash,
91687 + [41364] = &_000047_hash,
91688 + [17581] = &_000048_hash,
91689 + [45922] = &_000049_hash,
91690 + [49567] = &_000050_hash,
91691 + [18248] = &_000051_hash,
91692 + [25528] = &_000052_hash,
91693 + [61874] = &_000053_hash,
91694 + [22591] = &_000054_hash,
91695 + [48456] = &_000055_hash,
91696 + [8743] = &_000056_hash,
91697 + [39131] = &_000057_hash,
91698 + [48328] = &_000058_hash,
91699 + [47136] = &_000059_hash,
91700 + [6358] = &_000060_hash,
91701 + [12252] = &_000061_hash,
91702 + [49340] = &_000062_hash,
91703 + [45875] = &_000063_hash,
91704 + [52182] = &_000065_hash,
91705 + [31149] = &_000067_hash,
91706 + [20455] = &_000068_hash,
91707 + [19917] = &_000070_hash,
91708 + [64771] = &_000071_hash,
91709 + [25140] = &_000072_hash,
91710 + [34097] = &_000073_hash,
91711 + [58131] = &_000074_hash,
91712 + [65311] = &_000075_hash,
91713 + [60609] = &_000076_hash,
91714 + [1917] = &_000077_hash,
91715 + [15337] = &_000078_hash,
91716 + [4732] = &_000079_hash,
91717 + [38783] = &_000080_hash,
91718 + [37249] = &_000081_hash,
91719 + [9234] = &_000082_hash,
91720 + [33309] = &_000083_hash,
91721 + [22389] = &_000084_hash,
91722 + [56319] = &_000085_hash,
91723 + [21496] = &_000086_hash,
91724 + [8163] = &_000087_hash,
91725 + [58766] = &_000088_hash,
91726 + [21048] = &_000089_hash,
91727 + [51221] = &_000090_hash,
91728 + [21498] = &_000091_hash,
91729 + [42627] = &_000092_hash,
91730 + [53059] = &_000094_hash,
91731 + [52870] = &_000095_hash,
91732 + [1567] = &_000096_hash,
91733 + [38330] = &_000097_hash,
91734 + [30892] = &_000098_hash,
91735 + [16927] = &_000099_hash,
91736 + [16461] = &_000100_hash,
91737 + [5634] = &_000101_hash,
91738 + [16496] = &_000103_hash,
91739 + [40012] = &_000104_hash,
91740 + [46014] = &_000105_hash,
91741 + [39600] = &_000106_hash,
91742 + [7435] = &_000107_hash,
91743 + [13332] = &_000109_hash,
91744 + [36665] = &_000110_hash,
91745 + [12413] = &_000111_hash,
91746 + [27279] = &_000112_hash,
91747 + [44774] = &_000113_hash,
91748 + [14479] = &_000114_hash,
91749 + [32447] = &_000115_hash,
91750 + [15439] = &_000116_hash,
91751 + [17932] = &_000117_hash,
91752 + [26096] = &_000118_hash,
91753 + [50814] = &_000119_hash,
91754 + [22598] = &_000120_hash,
91755 + [48287] = &_000121_hash,
91756 + [15611] = &_000122_hash,
91757 + [13414] = &_000123_hash,
91758 + [40371] = &_000124_hash,
91759 + [284] = &_000125_hash,
91760 + [6293] = &_000127_hash,
91761 + [60587] = &_000128_hash,
91762 + [8181] = &_000129_hash,
91763 + [27451] = &_000130_hash,
91764 + [29259] = &_000131_hash,
91765 + [41172] = &_000132_hash,
91766 + [3315] = &_000133_hash,
91767 + [37550] = &_000134_hash,
91768 + [40395] = &_000135_hash,
91769 + [24124] = &_000136_hash,
91770 + [63535] = &_000137_hash,
91771 + [14981] = &_000138_hash,
91772 + [52008] = &_000139_hash,
91773 + [22091] = &_000140_hash,
91774 + [64800] = &_000141_hash,
91775 + [14919] = &_000142_hash,
91776 + [60340] = &_000143_hash,
91777 + [34205] = &_000145_hash,
91778 + [65246] = &_000146_hash,
91779 + [1299] = &_000147_hash,
91780 + [33165] = &_000148_hash,
91781 + [22394] = &_000149_hash,
91782 + [49562] = &_000150_hash,
91783 + [56881] = &_000151_hash,
91784 + [13870] = &_000152_hash,
91785 + [65074] = &_000153_hash,
91786 + [11553] = &_000154_hash,
91787 + [43222] = &_000155_hash,
91788 + [17984] = &_000156_hash,
91789 + [26811] = &_000157_hash,
91790 + [30848] = &_000158_hash,
91791 + [15627] = &_000159_hash,
91792 + [43101] = &_000160_hash,
91793 + [4082] = &_000161_hash,
91794 + [43692] = &_000162_hash,
91795 + [21622] = &_000163_hash,
91796 + [50734] = &_000164_hash,
91797 + [803] = &_000166_hash,
91798 + [64674] = &_000168_hash,
91799 + [57538] = &_000170_hash,
91800 + [42442] = &_000171_hash,
91801 + [23031] = &_000172_hash,
91802 + [40663] = &_000173_hash,
91803 + [51180] = &_000174_hash,
91804 + [24173] = &_000175_hash,
91805 + [9286] = &_000176_hash,
91806 + [49517] = &_000177_hash,
91807 + [34878] = &_000180_hash,
91808 + [22819] = &_000181_hash,
91809 + [64314] = &_000182_hash,
91810 + [20494] = &_000183_hash,
91811 + [9483] = &_000184_hash,
91812 + [26518] = &_000185_hash,
91813 + [44651] = &_000186_hash,
91814 + [1188] = &_000187_hash,
91815 + [36031] = &_000188_hash,
91816 + [33469] = &_000189_hash,
91817 + [19672] = &_000190_hash,
91818 + [3216] = &_000191_hash,
91819 + [25071] = &_000192_hash,
91820 + [11744] = &_000194_hash,
91821 + [2358] = &_000196_hash,
91822 + [10146] = &_000198_hash,
91823 + [58709] = &_000199_hash,
91824 + [64773] = &_000200_hash,
91825 + [6159] = &_000201_hash,
91826 + [28617] = &_000202_hash,
91827 + [61067] = &_000203_hash,
91828 + [12884] = &_000204_hash,
91829 + [37308] = &_000205_hash,
91830 + [59973] = &_000206_hash,
91831 + [35895] = &_000207_hash,
91832 + [24951] = &_000208_hash,
91833 + [3070] = &_000209_hash,
91834 + [61023] = &_000210_hash,
91835 + [45702] = &_000211_hash,
91836 + [5533] = &_000212_hash,
91837 + [29186] = &_000213_hash,
91838 + [26311] = &_000214_hash,
91839 + [40182] = &_000215_hash,
91840 + [50505] = &_000216_hash,
91841 + [59061] = &_000217_hash,
91842 + [27511] = &_000218_hash,
91843 + [63286] = &_000219_hash,
91844 + [6678] = &_000220_hash,
91845 + [23065] = &_000222_hash,
91846 + [18156] = &_000223_hash,
91847 + [53757] = &_000224_hash,
91848 + [53720] = &_000225_hash,
91849 + [50241] = &_000226_hash,
91850 + [22498] = &_000227_hash,
91851 + [10991] = &_000228_hash,
91852 + [40026] = &_000229_hash,
91853 + [19995] = &_000230_hash,
91854 + [30445] = &_000231_hash,
91855 + [57691] = &_000232_hash,
91856 + [23150] = &_000233_hash,
91857 + [9960] = &_000234_hash,
91858 + [8736] = &_000235_hash,
91859 + [23750] = &_000237_hash,
91860 + [18393] = &_000238_hash,
91861 + [28541] = &_000240_hash,
91862 + [59944] = &_000241_hash,
91863 + [35042] = &_000242_hash,
91864 + [63488] = &_000243_hash,
91865 + [27286] = &_000244_hash,
91866 + [46922] = &_000245_hash,
91867 + [11860] = &_000246_hash,
91868 + [52928] = &_000247_hash,
91869 + [46714] = &_000248_hash,
91870 + [57313] = &_000249_hash,
91871 + [61978] = &_000250_hash,
91872 + [61063] = &_000251_hash,
91873 + [22271] = &_000252_hash,
91874 + [4214] = &_000253_hash,
91875 + [46247] = &_000254_hash,
91876 + [33246] = &_000255_hash,
91877 + [58325] = &_000257_hash,
91878 + [47399] = &_000259_hash,
91879 + [34963] = &_000260_hash,
91880 + [21221] = &_000261_hash,
91881 + [32211] = &_000262_hash,
91882 + [20854] = &_000263_hash,
91883 + [49351] = &_000264_hash,
91884 + [52341] = &_000265_hash,
91885 + [53533] = &_000266_hash,
91886 + [52267] = &_000267_hash,
91887 + [46753] = &_000268_hash,
91888 + [2115] = &_000269_hash,
91889 + [44017] = &_000271_hash,
91890 + [13495] = &_000272_hash,
91891 + [12988] = &_000273_hash,
91892 + [55227] = &_000274_hash,
91893 + [47762] = &_000276_hash,
91894 + [17613] = &_000277_hash,
91895 + [52037] = &_000278_hash,
91896 + [5994] = &_000279_hash,
91897 + [46818] = &_000280_hash,
91898 + [13467] = &_000281_hash,
91899 + [61848] = &_000282_hash,
91900 + [43082] = &_000284_hash,
91901 + [55732] = &_000286_hash,
91902 + [2543] = &_000287_hash,
91903 + [51694] = &_000288_hash,
91904 + [18402] = &_000289_hash,
91905 + [38282] = &_000290_hash,
91906 + [5456] = &_000291_hash,
91907 + [58261] = &_000292_hash,
91908 + [24792] = &_000293_hash,
91909 + [6422] = &_000294_hash,
91910 + [63953] = &_000295_hash,
91911 + [27384] = &_000296_hash,
91912 + [47213] = &_000297_hash,
91913 + [23548] = &_000298_hash,
91914 + [47858] = &_000299_hash,
91915 + [52501] = &_000300_hash,
91916 + [12475] = &_000301_hash,
91917 + [52921] = &_000302_hash,
91918 + [19120] = &_000303_hash,
91919 + [14355] = &_000304_hash,
91920 + [30563] = &_000305_hash,
91921 + [14942] = &_000306_hash,
91922 + [30969] = &_000307_hash,
91923 + [57776] = &_000308_hash,
91924 + [21956] = &_000309_hash,
91925 + [44050] = &_000310_hash,
91926 + [2193] = &_000311_hash,
91927 + [44818] = &_000312_hash,
91928 + [50616] = &_000313_hash,
91929 + [49299] = &_000314_hash,
91930 + [2796] = &_000315_hash,
91931 + [4190] = &_000316_hash,
91932 + [11548] = &_000317_hash,
91933 + [53798] = &_000318_hash,
91934 + [60370] = &_000319_hash,
91935 + [35863] = &_000320_hash,
91936 + [54595] = &_000322_hash,
91937 + [2808] = &_000323_hash,
91938 + [24656] = &_000324_hash,
91939 + [895] = &_000325_hash,
91940 + [32809] = &_000326_hash,
91941 + [55621] = &_000327_hash,
91942 + [1733] = &_000328_hash,
91943 + [36069] = &_000330_hash,
91944 + [23714] = &_000331_hash,
91945 + [26020] = &_000332_hash,
91946 + [63875] = &_000333_hash,
91947 + [8919] = &_000335_hash,
91948 + [23906] = &_000336_hash,
91949 + [59497] = &_000337_hash,
91950 + [34782] = &_000338_hash,
91951 + [40998] = &_000339_hash,
91952 + [33328] = &_000340_hash,
91953 + [17866] = &_000341_hash,
91954 + [38741] = &_000342_hash,
91955 + [53939] = &_000343_hash,
91956 + [14658] = &_000344_hash,
91957 + [42465] = &_000345_hash,
91958 + [49600] = &_000346_hash,
91959 + [7391] = &_000347_hash,
91960 + [43616] = &_000348_hash,
91961 + [16775] = &_000349_hash,
91962 + [41393] = &_000350_hash,
91963 + [10532] = &_000351_hash,
91964 + [50366] = &_000352_hash,
91965 + [33324] = &_000353_hash,
91966 + [38200] = &_000354_hash,
91967 + [59315] = &_000355_hash,
91968 + [33916] = &_000356_hash,
91969 + [36593] = &_000357_hash,
91970 + [63079] = &_000358_hash,
91971 + [379] = &_000359_hash,
91972 + [34248] = &_000360_hash,
91973 + [27251] = &_000361_hash,
91974 + [29460] = &_000362_hash,
91975 + [7461] = &_000363_hash,
91976 + [9870] = &_000364_hash,
91977 + [44596] = &_000365_hash,
91978 + [45157] = &_000366_hash,
91979 + [55069] = &_000367_hash,
91980 + [29452] = &_000368_hash,
91981 + [54888] = &_000369_hash,
91982 + [31885] = &_000370_hash,
91983 + [20206] = &_000371_hash,
91984 + [20325] = &_000373_hash,
91985 + [18488] = &_000374_hash,
91986 + [22017] = &_000375_hash,
91987 + [57485] = &_000376_hash,
91988 + [49827] = &_000377_hash,
91989 + [37770] = &_000379_hash,
91990 + [52668] = &_000380_hash,
91991 + [13724] = &_000381_hash,
91992 + [59701] = &_000382_hash,
91993 + [11954] = &_000383_hash,
91994 + [9890] = &_000384_hash,
91995 + [17684] = &_000385_hash,
91996 + [18158] = &_000386_hash,
91997 + [61318] = &_000387_hash,
91998 + [2760] = &_000388_hash,
91999 + [38444] = &_000390_hash,
92000 + [55856] = &_000392_hash,
92001 + [34762] = &_000393_hash,
92002 + [48360] = &_000394_hash,
92003 + [40885] = &_000395_hash,
92004 + [36032] = &_000396_hash,
92005 + [52057] = &_000397_hash,
92006 + [12463] = &_000398_hash,
92007 + [30616] = &_000399_hash,
92008 + [38680] = &_000400_hash,
92009 + [41742] = &_000401_hash,
92010 + [50662] = &_000402_hash,
92011 + [48440] = &_000403_hash,
92012 + [34418] = &_000404_hash,
92013 + [64275] = &_000405_hash,
92014 + [12231] = &_000406_hash,
92015 + [53530] = &_000407_hash,
92016 + [54723] = &_000408_hash,
92017 + [19490] = &_000409_hash,
92018 + [11595] = &_000410_hash,
92019 + [15277] = &_000411_hash,
92020 + [4811] = &_000412_hash,
92021 + [42017] = &_000413_hash,
92022 + [17238] = &_000414_hash,
92023 + [55439] = &_000415_hash,
92024 + [45794] = &_000416_hash,
92025 + [60027] = &_000417_hash,
92026 + [3750] = &_000418_hash,
92027 + [11091] = &_000419_hash,
92028 + [32935] = &_000420_hash,
92029 + [22809] = &_000422_hash,
92030 + [60193] = &_000423_hash,
92031 + [14396] = &_000424_hash,
92032 + [18101] = &_000425_hash,
92033 + [46395] = &_000426_hash,
92034 + [24339] = &_000427_hash,
92035 + [26065] = &_000428_hash,
92036 + [43016] = &_000429_hash,
92037 + [41996] = &_000430_hash,
92038 + [7371] = &_000431_hash,
92039 + [32968] = &_000432_hash,
92040 + [53082] = &_000433_hash,
92041 + [38798] = &_000434_hash,
92042 + [12726] = &_000435_hash,
92043 + [55018] = &_000436_hash,
92044 + [26114] = &_000437_hash,
92045 + [31697] = &_000438_hash,
92046 + [21401] = &_000441_hash,
92047 + [33193] = &_000442_hash,
92048 + [52271] = &_000443_hash,
92049 + [20847] = &_000444_hash,
92050 + [30754] = &_000445_hash,
92051 + [54440] = &_000446_hash,
92052 + [22059] = &_000447_hash,
92053 + [47566] = &_000448_hash,
92054 + [22926] = &_000449_hash,
92055 + [20788] = &_000450_hash,
92056 + [18162] = &_000451_hash,
92057 + [65006] = &_000452_hash,
92058 + [11523] = &_000453_hash,
92059 + [29207] = &_000454_hash,
92060 + [18071] = &_000455_hash,
92061 + [7601] = &_000456_hash,
92062 + [12773] = &_000457_hash,
92063 + [61543] = &_000458_hash,
92064 + [5578] = &_000460_hash,
92065 + [49050] = &_000461_hash,
92066 + [51965] = &_000462_hash,
92067 + [6807] = &_000463_hash,
92068 + [22982] = &_000464_hash,
92069 + [36769] = &_000465_hash,
92070 + [53892] = &_000466_hash,
92071 + [2547] = &_000467_hash,
92072 + [53678] = &_000468_hash,
92073 + [61439] = &_000469_hash,
92074 + [31287] = &_000470_hash,
92075 + [6125] = &_000471_hash,
92076 + [57511] = &_000472_hash,
92077 + [13001] = &_000473_hash,
92078 + [62932] = &_000474_hash,
92079 + [62284] = &_000475_hash,
92080 + [9472] = &_000476_hash,
92081 + [26260] = &_000477_hash,
92082 + [63065] = &_000478_hash,
92083 + [18949] = &_000479_hash,
92084 + [29891] = &_000481_hash,
92085 + [41916] = &_000482_hash,
92086 + [40474] = &_000483_hash,
92087 + [63551] = &_000484_hash,
92088 + [36557] = &_000485_hash,
92089 + [2994] = &_000486_hash,
92090 + [5521] = &_000487_hash,
92091 + [51016] = &_000488_hash,
92092 + [7644] = &_000489_hash,
92093 + [55103] = &_000490_hash,
92094 + [11488] = &_000491_hash,
92095 + [7184] = &_000492_hash,
92096 + [36934] = &_000493_hash,
92097 + [54855] = &_000494_hash,
92098 + [63193] = &_000495_hash,
92099 + [12369] = &_000496_hash,
92100 + [15828] = &_000497_hash,
92101 + [61322] = &_000498_hash,
92102 + [5412] = &_000499_hash,
92103 + [28089] = &_000500_hash,
92104 + [64306] = &_000502_hash,
92105 + [24071] = &_000503_hash,
92106 + [50308] = &_000504_hash,
92107 + [38790] = &_000505_hash,
92108 + [9838] = &_000506_hash,
92109 + [18983] = &_000507_hash,
92110 + [9656] = &_000508_hash,
92111 + [18950] = &_000509_hash,
92112 + [59749] = &_000510_hash,
92113 + [20465] = &_000511_hash,
92114 + [4765] = &_000512_hash,
92115 + [16169] = &_000513_hash,
92116 + [6930] = &_000514_hash,
92117 + [16926] = &_000515_hash,
92118 + [35218] = &_000516_hash,
92119 + [19956] = &_000517_hash,
92120 + [55255] = &_000518_hash,
92121 + [861] = &_000519_hash,
92122 + [26574] = &_000520_hash,
92123 + [26794] = &_000521_hash,
92124 + [2133] = &_000522_hash,
92125 + [44616] = &_000523_hash,
92126 + [12840] = &_000524_hash,
92127 + [60426] = &_000525_hash,
92128 + [18133] = &_000526_hash,
92129 + [30479] = &_000527_hash,
92130 + [3219] = &_000528_hash,
92131 + [36488] = &_000529_hash,
92132 + [62043] = &_000530_hash,
92133 + [21714] = &_000532_hash,
92134 + [48007] = &_000533_hash,
92135 + [49969] = &_000534_hash,
92136 + [7701] = &_000535_hash,
92137 + [11521] = &_000536_hash,
92138 + [4269] = &_000537_hash,
92139 + [37627] = &_000539_hash,
92140 + [33555] = &_000540_hash,
92141 + [25900] = &_000541_hash,
92142 + [31709] = &_000542_hash,
92143 + [44626] = &_000544_hash,
92144 + [1679] = &_000545_hash,
92145 + [18349] = &_000546_hash,
92146 + [15338] = &_000547_hash,
92147 + [57935] = &_000548_hash,
92148 + [55850] = &_000549_hash,
92149 + [36063] = &_000550_hash,
92150 + [56674] = &_000551_hash,
92151 + [21379] = &_000552_hash,
92152 + [18507] = &_000553_hash,
92153 + [55719] = &_000554_hash,
92154 + [31210] = &_000555_hash,
92155 + [36207] = &_000556_hash,
92156 + [64180] = &_000557_hash,
92157 + [41770] = &_000558_hash,
92158 + [11600] = &_000559_hash,
92159 + [36638] = &_000560_hash,
92160 + [25576] = &_000561_hash,
92161 + [7000] = &_000562_hash,
92162 + [34187] = &_000563_hash,
92163 + [58533] = &_000564_hash,
92164 + [5083] = &_000565_hash,
92165 + [62614] = &_000566_hash,
92166 + [20085] = &_000567_hash,
92167 + [1135] = &_000568_hash,
92168 + [25613] = &_000569_hash,
92169 + [9541] = &_000570_hash,
92170 + [30577] = &_000571_hash,
92171 + [35722] = &_000572_hash,
92172 + [60407] = &_000573_hash,
92173 + [29465] = &_000574_hash,
92174 + [46891] = &_000575_hash,
92175 + [43633] = &_000576_hash,
92176 + [53743] = &_000577_hash,
92177 + [16196] = &_000578_hash,
92178 + [34425] = &_000580_hash,
92179 + [9646] = &_000581_hash,
92180 + [59756] = &_000583_hash,
92181 + [45524] = &_000584_hash,
92182 + [36702] = &_000585_hash,
92183 + [36747] = &_000586_hash,
92184 + [33643] = &_000588_hash,
92185 + [29158] = &_000589_hash,
92186 + [49662] = &_000590_hash,
92187 + [51062] = &_000591_hash,
92188 + [64755] = &_000592_hash,
92189 + [4829] = &_000594_hash,
92190 + [16413] = &_000595_hash,
92191 + [36125] = &_000596_hash,
92192 + [36293] = &_000597_hash,
92193 + [39712] = &_000598_hash,
92194 + [32160] = &_000599_hash,
92195 + [22962] = &_000600_hash,
92196 + [32001] = &_000601_hash,
92197 + [35828] = &_000602_hash,
92198 + [3106] = &_000603_hash,
92199 + [34039] = &_000604_hash,
92200 + [22393] = &_000605_hash,
92201 + [3560] = &_000606_hash,
92202 + [28195] = &_000607_hash,
92203 + [2062] = &_000608_hash,
92204 + [64001] = &_000609_hash,
92205 + [42407] = &_000610_hash,
92206 + [6253] = &_000611_hash,
92207 + [58640] = &_000612_hash,
92208 + [32195] = &_000613_hash,
92209 + [26197] = &_000614_hash,
92210 + [58003] = &_000615_hash,
92211 + [21662] = &_000616_hash,
92212 + [45750] = &_000617_hash,
92213 + [25798] = &_000618_hash,
92214 + [41052] = &_000619_hash,
92215 + [14096] = &_000620_hash,
92216 + [1439] = &_000621_hash,
92217 + [29074] = &_000622_hash,
92218 + [2376] = &_000623_hash,
92219 + [24068] = &_000625_hash,
92220 + [59519] = &_000627_hash,
92221 + [9893] = &_000628_hash,
92222 + [39979] = &_000630_hash,
92223 + [41540] = &_000631_hash,
92224 + [43200] = &_000633_hash,
92225 + [33494] = &_000634_hash,
92226 + [2028] = &_000635_hash,
92227 + [27206] = &_000636_hash,
92228 + [24302] = &_000637_hash,
92229 + [38112] = &_000638_hash,
92230 + [46538] = &_000639_hash,
92231 + [35228] = &_000641_hash,
92232 + [8339] = &_000642_hash,
92233 + [45349] = &_000643_hash,
92234 + [48404] = &_000644_hash,
92235 + [37865] = &_000645_hash,
92236 + [45763] = &_000646_hash,
92237 + [62347] = &_000647_hash,
92238 + [21644] = &_000648_hash,
92239 + [53135] = &_000649_hash,
92240 + [25095] = &_000650_hash,
92241 + [11697] = &_000651_hash,
92242 + [27003] = &_000652_hash,
92243 + [32464] = &_000653_hash,
92244 + [65339] = &_000654_hash,
92245 + [44248] = &_000655_hash,
92246 + [16] = &_000656_hash,
92247 + [29933] = &_000657_hash,
92248 + [34359] = &_000658_hash,
92249 + [3154] = &_000659_hash,
92250 + [59308] = &_000660_hash,
92251 + [61661] = &_000661_hash,
92252 + [23959] = &_000662_hash,
92253 + [6724] = &_000663_hash,
92254 + [54587] = &_000664_hash,
92255 + [28479] = &_000665_hash,
92256 + [56583] = &_000666_hash,
92257 + [64644] = &_000667_hash,
92258 + [23284] = &_000668_hash,
92259 + [61655] = &_000669_hash,
92260 + [20980] = &_000670_hash,
92261 + [19794] = &_000671_hash,
92262 + [30036] = &_000672_hash,
92263 + [25649] = &_000673_hash,
92264 + [47428] = &_000674_hash,
92265 + [47737] = &_000675_hash,
92266 + [8367] = &_000676_hash,
92267 + [2987] = &_000677_hash,
92268 + [50962] = &_000678_hash,
92269 + [10760] = &_000679_hash,
92270 + [31678] = &_000680_hash,
92271 + [48558] = &_000681_hash,
92272 + [2274] = &_000682_hash,
92273 + [831] = &_000683_hash,
92274 + [61833] = &_000684_hash,
92275 + [56864] = &_000685_hash,
92276 + [31040] = &_000686_hash,
92277 + [22196] = &_000687_hash,
92278 + [20076] = &_000688_hash,
92279 + [52821] = &_000689_hash,
92280 + [21896] = &_000690_hash,
92281 + [49367] = &_000691_hash,
92282 + [64731] = &_000692_hash,
92283 + [37110] = &_000693_hash,
92284 + [53694] = &_000694_hash,
92285 + [6175] = &_000695_hash,
92286 + [33048] = &_000696_hash,
92287 + [34746] = &_000697_hash,
92288 + [23777] = &_000698_hash,
92289 + [53828] = &_000699_hash,
92290 + [26539] = &_000700_hash,
92291 + [42628] = &_000701_hash,
92292 + [59115] = &_000702_hash,
92293 + [4456] = &_000703_hash,
92294 + [63619] = &_000704_hash,
92295 + [47329] = &_000705_hash,
92296 + [13534] = &_000706_hash,
92297 + [36955] = &_000707_hash,
92298 + [9841] = &_000708_hash,
92299 + [19308] = &_000709_hash,
92300 + [52439] = &_000710_hash,
92301 + [24680] = &_000711_hash,
92302 + [55652] = &_000712_hash,
92303 + [7842] = &_000713_hash,
92304 + [6500] = &_000714_hash,
92305 + [33485] = &_000715_hash,
92306 + [49920] = &_000716_hash,
92307 + [50750] = &_000717_hash,
92308 + [22318] = &_000718_hash,
92309 + [44599] = &_000719_hash,
92310 + [46403] = &_000720_hash,
92311 + [44534] = &_000721_hash,
92312 + [303] = &_000722_hash,
92313 + [22960] = &_000723_hash,
92314 + [10544] = &_000724_hash,
92315 + [8236] = &_000725_hash,
92316 + [21239] = &_000726_hash,
92317 + [24712] = &_000727_hash,
92318 + [37974] = &_000728_hash,
92319 + [62082] = &_000729_hash,
92320 + [57054] = &_000730_hash,
92321 + [53265] = &_000731_hash,
92322 + [52239] = &_000732_hash,
92323 + [14753] = &_000733_hash,
92324 + [60221] = &_000736_hash,
92325 + [27142] = &_000737_hash,
92326 + [14295] = &_000738_hash,
92327 + [25923] = &_000739_hash,
92328 + [29213] = &_000740_hash,
92329 + [31865] = &_000741_hash,
92330 + [4764] = &_000742_hash,
92331 + [10574] = &_000743_hash,
92332 + [55766] = &_000744_hash,
92333 + [22483] = &_000745_hash,
92334 + [61047] = &_000746_hash,
92335 + [41044] = &_000747_hash,
92336 + [58978] = &_000748_hash,
92337 + [47578] = &_000749_hash,
92338 + [7730] = &_000750_hash,
92339 + [15904] = &_000751_hash,
92340 + [25081] = &_000752_hash,
92341 + [45743] = &_000753_hash,
92342 + [58830] = &_000754_hash,
92343 + [59081] = &_000755_hash,
92344 + [47533] = &_000756_hash,
92345 + [11305] = &_000757_hash,
92346 + [29096] = &_000758_hash,
92347 + [19749] = &_000759_hash,
92348 + [56290] = &_000760_hash,
92349 + [44963] = &_000761_hash,
92350 + [30026] = &_000762_hash,
92351 + [27694] = &_000763_hash,
92352 + [8089] = &_000764_hash,
92353 + [38583] = &_000765_hash,
92354 + [1144] = &_000766_hash,
92355 + [20939] = &_000767_hash,
92356 + [22231] = &_000768_hash,
92357 + [17486] = &_000769_hash,
92358 + [51811] = &_000770_hash,
92359 + [62746] = &_000771_hash,
92360 + [19181] = &_000772_hash,
92361 + [52661] = &_000773_hash,
92362 + [51148] = &_000774_hash,
92363 + [49864] = &_000775_hash,
92364 + [37978] = &_000776_hash,
92365 + [6280] = &_000777_hash,
92366 + [12961] = &_000778_hash,
92367 + [60541] = &_000779_hash,
92368 + [37021] = &_000780_hash,
92369 + [26028] = &_000781_hash,
92370 + [41363] = &_000782_hash,
92371 + [42016] = &_000783_hash,
92372 + [58540] = &_000784_hash,
92373 + [2326] = &_000785_hash,
92374 + [60981] = &_000786_hash,
92375 + [13313] = &_000787_hash,
92376 + [44188] = &_000788_hash,
92377 + [34638] = &_000789_hash,
92378 + [20304] = &_000790_hash,
92379 + [60975] = &_000791_hash,
92380 + [12244] = &_000792_hash,
92381 + [16266] = &_000793_hash,
92382 + [3395] = &_000794_hash,
92383 + [63321] = &_000795_hash,
92384 + [20509] = &_000796_hash,
92385 + [57365] = &_000797_hash,
92386 + [47449] = &_000798_hash,
92387 + [56693] = &_000799_hash,
92388 + [33936] = &_000800_hash,
92389 + [52548] = &_000801_hash,
92390 + [18733] = &_000802_hash,
92391 + [15560] = &_000803_hash,
92392 + [13231] = &_000804_hash,
92393 + [64518] = &_000806_hash,
92394 + [54551] = &_000807_hash,
92395 + [54359] = &_000809_hash,
92396 + [46503] = &_000810_hash,
92397 + [22258] = &_000811_hash,
92398 + [39434] = &_000812_hash,
92399 + [52887] = &_000813_hash,
92400 + [3079] = &_000814_hash,
92401 + [18813] = &_000816_hash,
92402 + [47614] = &_000817_hash,
92403 + [38186] = &_000818_hash,
92404 + [57652] = &_000819_hash,
92405 + [10078] = &_000820_hash,
92406 + [17910] = &_000821_hash,
92407 + [13567] = &_000822_hash,
92408 + [21531] = &_000823_hash,
92409 + [46135] = &_000824_hash,
92410 + [10582] = &_000825_hash,
92411 + [4662] = &_000826_hash,
92412 + [17969] = &_000827_hash,
92413 + [43943] = &_000828_hash,
92414 + [46740] = &_000829_hash,
92415 + [26716] = &_000830_hash,
92416 + [58230] = &_000831_hash,
92417 + [252] = &_000832_hash,
92418 + [15704] = &_000833_hash,
92419 + [59765] = &_000834_hash,
92420 + [7322] = &_000835_hash,
92421 + [43950] = &_000836_hash,
92422 + [53093] = &_000837_hash,
92423 + [21646] = &_000838_hash,
92424 + [57063] = &_000839_hash,
92425 + [17132] = &_000840_hash,
92426 + [53922] = &_000842_hash,
92427 + [49155] = &_000843_hash,
92428 + [16356] = &_000844_hash,
92429 + [60037] = &_000845_hash,
92430 + [17299] = &_000846_hash,
92431 + [25678] = &_000847_hash,
92432 + [15494] = &_000848_hash,
92433 + [15159] = &_000849_hash,
92434 + [28442] = &_000850_hash,
92435 + [3514] = &_000851_hash,
92436 + [38151] = &_000852_hash,
92437 + [4173] = &_000853_hash,
92438 + [7258] = &_000854_hash,
92439 + [65109] = &_000855_hash,
92440 + [58827] = &_000856_hash,
92441 + [33575] = &_000857_hash,
92442 + [33078] = &_000858_hash,
92443 + [47234] = &_000859_hash,
92444 + [39193] = &_000860_hash,
92445 + [10950] = &_000861_hash,
92446 + [15613] = &_000862_hash,
92447 + [16046] = &_000863_hash,
92448 + [50172] = &_000864_hash,
92449 + [26107] = &_000865_hash,
92450 + [60543] = &_000866_hash,
92451 + [56337] = &_000867_hash,
92452 + [47626] = &_000868_hash,
92453 + [24409] = &_000869_hash,
92454 + [11732] = &_000870_hash,
92455 + [30010] = &_000871_hash,
92456 + [51480] = &_000872_hash,
92457 + [28518] = &_000873_hash,
92458 + [2061] = &_000874_hash,
92459 + [10885] = &_000875_hash,
92460 + [29517] = &_000876_hash,
92461 + [45913] = &_000877_hash,
92462 + [51774] = &_000878_hash,
92463 + [62298] = &_000879_hash,
92464 + [8892] = &_000880_hash,
92465 + [64891] = &_000881_hash,
92466 + [64537] = &_000882_hash,
92467 + [38103] = &_000883_hash,
92468 + [55518] = &_000884_hash,
92469 + [27419] = &_000885_hash,
92470 + [13869] = &_000886_hash,
92471 + [53150] = &_000887_hash,
92472 + [2884] = &_000888_hash,
92473 + [10362] = &_000889_hash,
92474 + [6961] = &_000890_hash,
92475 + [56975] = &_000891_hash,
92476 + [12508] = &_000892_hash,
92477 + [54597] = &_000893_hash,
92478 + [60499] = &_000894_hash,
92479 + [50109] = &_000895_hash,
92480 + [944] = &_000896_hash,
92481 + [29229] = &_000897_hash,
92482 + [37648] = &_000898_hash,
92483 + [1568] = &_000899_hash,
92484 + [61793] = &_000900_hash,
92485 + [53395] = &_000901_hash,
92486 + [5519] = &_000902_hash,
92487 + [28637] = &_000903_hash,
92488 + [53687] = &_000904_hash,
92489 + [6783] = &_000905_hash,
92490 + [43312] = &_000906_hash,
92491 + [2373] = &_000907_hash,
92492 + [33482] = &_000908_hash,
92493 + [24886] = &_000909_hash,
92494 + [48154] = &_000910_hash,
92495 + [12838] = &_000911_hash,
92496 + [47012] = &_000912_hash,
92497 + [23691] = &_000913_hash,
92498 + [37924] = &_000914_hash,
92499 + [47346] = &_000915_hash,
92500 + [5624] = &_000916_hash,
92501 + [16842] = &_000918_hash,
92502 + [60399] = &_000919_hash,
92503 + [2312] = &_000920_hash,
92504 + [59212] = &_000921_hash,
92505 + [11923] = &_000922_hash,
92506 + [10805] = &_000923_hash,
92507 + [36577] = &_000924_hash,
92508 + [60948] = &_000925_hash,
92509 + [21711] = &_000926_hash,
92510 + [54830] = &_000927_hash,
92511 + [1822] = &_000928_hash,
92512 + [44573] = &_000929_hash,
92513 + [23805] = &_000930_hash,
92514 + [46061] = &_000931_hash,
92515 + [33996] = &_000932_hash,
92516 + [40856] = &_000933_hash,
92517 + [16299] = &_000934_hash,
92518 + [63446] = &_000935_hash,
92519 + [31205] = &_000936_hash,
92520 + [33100] = &_000937_hash,
92521 + [40843] = &_000938_hash,
92522 + [23712] = &_000939_hash,
92523 + [36962] = &_000940_hash,
92524 + [9845] = &_000942_hash,
92525 + [13738] = &_000943_hash,
92526 + [58099] = &_000944_hash,
92527 + [31869] = &_000945_hash,
92528 + [63501] = &_000946_hash,
92529 + [58188] = &_000947_hash,
92530 + [51338] = &_000948_hash,
92531 + [54999] = &_000949_hash,
92532 + [2434] = &_000950_hash,
92533 + [34958] = &_000951_hash,
92534 + [41487] = &_000952_hash,
92535 + [11941] = &_000953_hash,
92536 + [56728] = &_000954_hash,
92537 + [48150] = &_000955_hash,
92538 + [13905] = &_000956_hash,
92539 + [9054] = &_000957_hash,
92540 + [10758] = &_000958_hash,
92541 + [48056] = &_000959_hash,
92542 + [24231] = &_000960_hash,
92543 + [43748] = &_000961_hash,
92544 + [24237] = &_000962_hash,
92545 + [14899] = &_000963_hash,
92546 + [38652] = &_000964_hash,
92547 + [65013] = &_000965_hash,
92548 + [16645] = &_000967_hash,
92549 + [55031] = &_000968_hash,
92550 + [23978] = &_000969_hash,
92551 + [24208] = &_000970_hash,
92552 + [18098] = &_000971_hash,
92553 + [2303] = &_000972_hash,
92554 + [3338] = &_000973_hash,
92555 + [39219] = &_000974_hash,
92556 + [18609] = &_000976_hash,
92557 + [64412] = &_000977_hash,
92558 + [16962] = &_000978_hash,
92559 + [26346] = &_000979_hash,
92560 + [39380] = &_000980_hash,
92561 + [33020] = &_000981_hash,
92562 + [22639] = &_000982_hash,
92563 + [6453] = &_000983_hash,
92564 + [58602] = &_000984_hash,
92565 + [50920] = &_000985_hash,
92566 + [56471] = &_000987_hash,
92567 + [15378] = &_000988_hash,
92568 + [3589] = &_000989_hash,
92569 + [12558] = &_000990_hash,
92570 + [3201] = &_000991_hash,
92571 + [28175] = &_000993_hash,
92572 + [43888] = &_000995_hash,
92573 + [56010] = &_000996_hash,
92574 + [32456] = &_000997_hash,
92575 + [29036] = &_000998_hash,
92576 + [32330] = &_000999_hash,
92577 + [25603] = &_001000_hash,
92578 + [17675] = &_001001_hash,
92579 + [36271] = &_001002_hash,
92580 + [49814] = &_001003_hash,
92581 + [5693] = &_001004_hash,
92582 + [51009] = &_001005_hash,
92583 + [62835] = &_001006_hash,
92584 + [27139] = &_001007_hash,
92585 + [45155] = &_001008_hash,
92586 + [17186] = &_001009_hash,
92587 + [46734] = &_001010_hash,
92588 + [61957] = &_001011_hash,
92589 + [51389] = &_001012_hash,
92590 + [23687] = &_001013_hash,
92591 + [46825] = &_001014_hash,
92592 + [52287] = &_001016_hash,
92593 + [31408] = &_001017_hash,
92594 + [5396] = &_001018_hash,
92595 + [62247] = &_001019_hash,
92596 + [7946] = &_001020_hash,
92597 + [58210] = &_001022_hash,
92598 + [15618] = &_001023_hash,
92599 + [61225] = &_001024_hash,
92600 + [13163] = &_001025_hash,
92601 + [36882] = &_001026_hash,
92602 + [8719] = &_001027_hash,
92603 + [8539] = &_001028_hash,
92604 + [27134] = &_001029_hash,
92605 + [53335] = &_001030_hash,
92606 + [30381] = &_001031_hash,
92607 + [32336] = &_001032_hash,
92608 + [32867] = &_001033_hash,
92609 + [1238] = &_001034_hash,
92610 + [8174] = &_001035_hash,
92611 + [6368] = &_001036_hash,
92612 + [29170] = &_001037_hash,
92613 + [9687] = &_001038_hash,
92614 + [61116] = &_001039_hash,
92615 + [31681] = &_001040_hash,
92616 + [22119] = &_001041_hash,
92617 + [59885] = &_001042_hash,
92618 + [47789] = &_001043_hash,
92619 + [5796] = &_001044_hash,
92620 + [43376] = &_001045_hash,
92621 + [36706] = &_001046_hash,
92622 + [47945] = &_001047_hash,
92623 + [33208] = &_001048_hash,
92624 + [55431] = &_001049_hash,
92625 + [25291] = &_001050_hash,
92626 + [58805] = &_001051_hash,
92627 + [23708] = &_001052_hash,
92628 + [29278] = &_001053_hash,
92629 + [1272] = &_001054_hash,
92630 + [10199] = &_001055_hash,
92631 + [34666] = &_001056_hash,
92632 + [49317] = &_001057_hash,
92633 + [18604] = &_001058_hash,
92634 + [42545] = &_001059_hash,
92635 + [33157] = &_001060_hash,
92636 + [53343] = &_001061_hash,
92637 + [64842] = &_001062_hash,
92638 + [61865] = &_001063_hash,
92639 + [54010] = &_001064_hash,
92640 + [64638] = &_001065_hash,
92641 + [20480] = &_001066_hash,
92642 + [23341] = &_001067_hash,
92643 + [10350] = &_001068_hash,
92644 + [30970] = &_001069_hash,
92645 + [62360] = &_001070_hash,
92646 + [52537] = &_001071_hash,
92647 + [51386] = &_001072_hash,
92648 + [48731] = &_001073_hash,
92649 + [58061] = &_001074_hash,
92650 + [40405] = &_001075_hash,
92651 + [57198] = &_001076_hash,
92652 + [19290] = &_001077_hash,
92653 + [60403] = &_001078_hash,
92654 + [2738] = &_001079_hash,
92655 + [59721] = &_001080_hash,
92656 + [24980] = &_001081_hash,
92657 + [55896] = &_001082_hash,
92658 + [57055] = &_001083_hash,
92659 + [46010] = &_001084_hash,
92660 + [712] = &_001085_hash,
92661 + [37747] = &_001086_hash,
92662 + [59996] = &_001087_hash,
92663 + [45219] = &_001088_hash,
92664 + [16917] = &_001089_hash,
92665 + [7415] = &_001090_hash,
92666 + [29576] = &_001091_hash,
92667 + [13584] = &_001092_hash,
92668 + [53364] = &_001093_hash,
92669 + [14813] = &_001094_hash,
92670 + [25543] = &_001095_hash,
92671 + [29240] = &_001096_hash,
92672 + [38748] = &_001097_hash,
92673 + [34848] = &_001099_hash,
92674 + [46226] = &_001100_hash,
92675 + [55526] = &_001101_hash,
92676 + [48271] = &_001102_hash,
92677 + [24658] = &_001104_hash,
92678 + [46964] = &_001105_hash,
92679 + [2637] = &_001106_hash,
92680 + [55601] = &_001107_hash,
92681 + [60275] = &_001108_hash,
92682 + [52645] = &_001109_hash,
92683 + [11712] = &_001110_hash,
92684 + [51364] = &_001111_hash,
92685 + [5106] = &_001112_hash,
92686 + [24710] = &_001113_hash,
92687 + [13101] = &_001114_hash,
92688 + [46963] = &_001115_hash,
92689 + [6779] = &_001116_hash,
92690 + [9237] = &_001117_hash,
92691 + [61524] = &_001118_hash,
92692 + [38247] = &_001119_hash,
92693 + [48715] = &_001120_hash,
92694 + [40797] = &_001121_hash,
92695 + [46780] = &_001122_hash,
92696 + [22071] = &_001123_hash,
92697 + [49735] = &_001125_hash,
92698 + [63925] = &_001126_hash,
92699 + [30902] = &_001127_hash,
92700 + [39828] = &_001128_hash,
92701 + [53089] = &_001129_hash,
92702 + [6394] = &_001130_hash,
92703 + [5116] = &_001131_hash,
92704 + [50702] = &_001132_hash,
92705 + [59565] = &_001133_hash,
92706 + [61042] = &_001134_hash,
92707 + [14533] = &_001135_hash,
92708 + [23807] = &_001136_hash,
92709 + [24296] = &_001137_hash,
92710 + [8808] = &_001138_hash,
92711 + [52383] = &_001139_hash,
92712 + [30487] = &_001140_hash,
92713 + [30125] = &_001141_hash,
92714 + [40665] = &_001142_hash,
92715 + [60809] = &_001143_hash,
92716 + [4842] = &_001144_hash,
92717 + [13955] = &_001145_hash,
92718 + [33237] = &_001146_hash,
92719 + [40673] = &_001147_hash,
92720 + [48026] = &_001148_hash,
92721 + [64033] = &_001149_hash,
92722 + [13879] = &_001150_hash,
92723 + [60114] = &_001151_hash,
92724 + [19472] = &_001152_hash,
92725 + [33552] = &_001153_hash,
92726 + [28575] = &_001154_hash,
92727 + [19696] = &_001155_hash,
92728 + [19742] = &_001156_hash,
92729 + [15286] = &_001157_hash,
92730 + [24629] = &_001158_hash,
92731 + [28382] = &_001159_hash,
92732 + [18962] = &_001160_hash,
92733 + [45796] = &_001161_hash,
92734 + [51632] = &_001162_hash,
92735 + [16907] = &_001163_hash,
92736 + [49336] = &_001164_hash,
92737 + [25316] = &_001165_hash,
92738 + [39978] = &_001166_hash,
92739 + [8091] = &_001167_hash,
92740 + [30680] = &_001168_hash,
92741 + [2066] = &_001169_hash,
92742 + [24271] = &_001170_hash,
92743 + [34934] = &_001171_hash,
92744 + [29208] = &_001172_hash,
92745 + [18591] = &_001173_hash,
92746 + [24373] = &_001174_hash,
92747 + [41485] = &_001175_hash,
92748 + [45487] = &_001176_hash,
92749 + [29299] = &_001177_hash,
92750 + [53920] = &_001178_hash,
92751 + [25407] = &_001179_hash,
92752 + [5525] = &_001180_hash,
92753 + [3531] = &_001181_hash,
92754 + [25143] = &_001182_hash,
92755 + [56046] = &_001183_hash,
92756 + [34693] = &_001184_hash,
92757 + [48644] = &_001185_hash,
92758 + [21226] = &_001186_hash,
92759 + [14051] = &_001187_hash,
92760 + [7715] = &_001188_hash,
92761 + [30413] = &_001189_hash,
92762 + [13681] = &_001190_hash,
92763 + [6554] = &_001191_hash,
92764 + [12228] = &_001192_hash,
92765 + [25497] = &_001193_hash,
92766 + [52228] = &_001194_hash,
92767 + [49069] = &_001195_hash,
92768 + [26961] = &_001196_hash,
92769 + [13768] = &_001197_hash,
92770 + [56185] = &_001198_hash,
92771 + [41838] = &_001199_hash,
92772 + [60119] = &_001200_hash,
92773 + [3112] = &_001201_hash,
92774 + [62001] = &_001202_hash,
92775 + [35888] = &_001203_hash,
92776 + [64177] = &_001207_hash,
92777 + [57222] = &_001208_hash,
92778 + [5260] = &_001209_hash,
92779 + [55517] = &_001210_hash,
92780 + [18186] = &_001211_hash,
92781 + [14257] = &_001212_hash,
92782 + [26846] = &_001213_hash,
92783 + [56097] = &_001214_hash,
92784 + [55151] = &_001215_hash,
92785 + [2999] = &_001216_hash,
92786 + [3602] = &_001217_hash,
92787 + [18460] = &_001218_hash,
92788 + [3507] = &_001219_hash,
92789 + [57847] = &_001220_hash,
92790 + [58077] = &_001221_hash,
92791 + [2659] = &_001222_hash,
92792 + [39846] = &_001223_hash,
92793 + [18629] = &_001224_hash,
92794 + [2723] = &_001225_hash,
92795 + [45230] = &_001226_hash,
92796 + [26941] = &_001227_hash,
92797 + [4344] = &_001228_hash,
92798 + [8487] = &_001229_hash,
92799 + [9901] = &_001230_hash,
92800 + [43061] = &_001231_hash,
92801 + [42551] = &_001232_hash,
92802 + [63272] = &_001233_hash,
92803 + [37771] = &_001234_hash,
92804 + [28261] = &_001235_hash,
92805 + [44694] = &_001236_hash,
92806 + [8573] = &_001237_hash,
92807 + [60174] = &_001238_hash,
92808 + [28040] = &_001239_hash,
92809 + [39423] = &_001240_hash,
92810 + [98] = &_001241_hash,
92811 + [62874] = &_001242_hash,
92812 + [38726] = &_001243_hash,
92813 + [55348] = &_001244_hash,
92814 + [10997] = &_001245_hash,
92815 + [88] = &_001246_hash,
92816 + [60639] = &_001247_hash,
92817 + [48159] = &_001248_hash,
92818 + [47899] = &_001249_hash,
92819 + [25367] = &_001250_hash,
92820 + [55681] = &_001251_hash,
92821 + [44716] = &_001252_hash,
92822 + [26161] = &_001253_hash,
92823 + [55347] = &_001254_hash,
92824 + [14518] = &_001255_hash,
92825 + [8887] = &_001256_hash,
92826 + [23009] = &_001257_hash,
92827 + [27962] = &_001258_hash,
92828 + [20004] = &_001259_hash,
92829 + [61750] = &_001260_hash,
92830 + [11661] = &_001261_hash,
92831 + [37118] = &_001262_hash,
92832 + [9370] = &_001263_hash,
92833 + [15099] = &_001264_hash,
92834 + [2404] = &_001265_hash,
92835 + [64074] = &_001266_hash,
92836 + [7538] = &_001267_hash,
92837 + [19736] = &_001268_hash,
92838 + [8199] = &_001269_hash,
92839 + [40711] = &_001270_hash,
92840 + [47859] = &_001271_hash,
92841 + [53925] = &_001272_hash,
92842 + [46888] = &_001273_hash,
92843 + [21783] = &_001274_hash,
92844 + [37305] = &_001275_hash,
92845 + [18414] = &_001276_hash,
92846 + [62423] = &_001277_hash,
92847 + [30371] = &_001278_hash,
92848 + [32617] = &_001279_hash,
92849 + [14530] = &_001281_hash,
92850 + [48623] = &_001282_hash,
92851 + [12845] = &_001283_hash,
92852 + [8895] = &_001284_hash,
92853 + [33661] = &_001285_hash,
92854 + [23178] = &_001286_hash,
92855 + [54706] = &_001287_hash,
92856 + [27133] = &_001288_hash,
92857 + [52745] = &_001289_hash,
92858 + [64420] = &_001290_hash,
92859 + [25617] = &_001291_hash,
92860 + [25414] = &_001292_hash,
92861 + [20445] = &_001293_hash,
92862 + [64006] = &_001294_hash,
92863 + [52646] = &_001295_hash,
92864 + [30281] = &_001296_hash,
92865 + [3761] = &_001297_hash,
92866 + [44345] = &_001298_hash,
92867 + [14713] = &_001299_hash,
92868 + [26043] = &_001300_hash,
92869 + [41679] = &_001301_hash,
92870 + [6267] = &_001302_hash,
92871 + [22247] = &_001304_hash,
92872 + [9440] = &_001305_hash,
92873 + [54676] = &_001306_hash,
92874 + [53982] = &_001308_hash,
92875 + [9467] = &_001309_hash,
92876 + [53419] = &_001310_hash,
92877 + [1424] = &_001311_hash,
92878 + [17561] = &_001312_hash,
92879 + [28161] = &_001313_hash,
92880 + [57262] = &_001314_hash,
92881 + [61071] = &_001315_hash,
92882 + [20067] = &_001316_hash,
92883 + [34321] = &_001317_hash,
92884 + [56199] = &_001318_hash,
92885 + [29070] = &_001319_hash,
92886 + [15698] = &_001320_hash,
92887 + [14173] = &_001321_hash,
92888 + [41224] = &_001322_hash,
92889 + [56438] = &_001323_hash,
92890 + [41894] = &_001324_hash,
92891 + [20885] = &_001325_hash,
92892 + [23275] = &_001326_hash,
92893 + [45043] = &_001327_hash,
92894 + [22143] = &_001328_hash,
92895 + [38029] = &_001329_hash,
92896 + [55343] = &_001330_hash,
92897 + [40624] = &_001331_hash,
92898 + [26476] = &_001332_hash,
92899 + [43128] = &_001333_hash,
92900 + [45115] = &_001334_hash,
92901 + [32958] = &_001335_hash,
92902 + [43091] = &_001336_hash,
92903 + [33299] = &_001337_hash,
92904 + [55021] = &_001338_hash,
92905 + [5509] = &_001339_hash,
92906 + [53012] = &_001340_hash,
92907 + [57849] = &_001341_hash,
92908 + [63282] = &_001342_hash,
92909 + [27883] = &_001343_hash,
92910 + [1670] = &_001344_hash,
92911 + [24095] = &_001345_hash,
92912 + [47810] = &_001346_hash,
92913 + [40759] = &_001347_hash,
92914 + [42139] = &_001348_hash,
92915 + [50484] = &_001349_hash,
92916 + [2305] = &_001350_hash,
92917 + [59832] = &_001351_hash,
92918 + [17662] = &_001352_hash,
92919 + [58943] = &_001353_hash,
92920 + [37417] = &_001356_hash,
92921 + [25127] = &_001357_hash,
92922 + [15006] = &_001358_hash,
92923 + [54292] = &_001359_hash,
92924 + [30642] = &_001360_hash,
92925 + [39939] = &_001361_hash,
92926 + [34818] = &_001362_hash,
92927 + [23378] = &_001363_hash,
92928 + [24090] = &_001364_hash,
92929 + [11111] = &_001365_hash,
92930 + [64141] = &_001366_hash,
92931 + [46457] = &_001367_hash,
92932 + [57927] = &_001368_hash,
92933 + [58877] = &_001371_hash,
92934 + [13880] = &_001372_hash,
92935 + [62888] = &_001373_hash,
92936 + [57962] = &_001374_hash,
92937 + [9117] = &_001375_hash,
92938 + [52012] = &_001376_hash,
92939 + [49246] = &_001377_hash,
92940 + [52701] = &_001378_hash,
92941 + [29857] = &_001379_hash,
92942 + [49420] = &_001380_hash,
92943 + [45897] = &_001381_hash,
92944 + [15141] = &_001382_hash,
92945 + [24177] = &_001383_hash,
92946 + [10325] = &_001384_hash,
92947 + [52861] = &_001385_hash,
92948 + [28922] = &_001386_hash,
92949 + [31089] = &_001387_hash,
92950 + [63084] = &_001388_hash,
92951 + [26245] = &_001389_hash,
92952 + [60000] = &_001390_hash,
92953 + [56935] = &_001391_hash,
92954 + [37569] = &_001392_hash,
92955 + [6446] = &_001394_hash,
92956 + [35883] = &_001395_hash,
92957 + [9123] = &_001396_hash,
92958 + [51457] = &_001397_hash,
92959 + [1787] = &_001398_hash,
92960 + [10135] = &_001399_hash,
92961 + [952] = &_001400_hash,
92962 + [53578] = &_001401_hash,
92963 + [9923] = &_001402_hash,
92964 + [45249] = &_001403_hash,
92965 + [52860] = &_001404_hash,
92966 + [29558] = &_001405_hash,
92967 + [40556] = &_001406_hash,
92968 + [53210] = &_001407_hash,
92969 + [2506] = &_001408_hash,
92970 + [48262] = &_001409_hash,
92971 + [46939] = &_001410_hash,
92972 + [17901] = &_001411_hash,
92973 + [27204] = &_001412_hash,
92974 + [52516] = &_001413_hash,
92975 + [55885] = &_001414_hash,
92976 + [6681] = &_001415_hash,
92977 + [42360] = &_001416_hash,
92978 + [20259] = &_001417_hash,
92979 + [8874] = &_001418_hash,
92980 + [53363] = &_001419_hash,
92981 + [17500] = &_001420_hash,
92982 + [63988] = &_001421_hash,
92983 + [26378] = &_001422_hash,
92984 + [7768] = &_001423_hash,
92985 + [12938] = &_001424_hash,
92986 + [6755] = &_001425_hash,
92987 + [43806] = &_001426_hash,
92988 + [15976] = &_001427_hash,
92989 + [2732] = &_001428_hash,
92990 + [2519] = &_001429_hash,
92991 + [14340] = &_001430_hash,
92992 + [34772] = &_001431_hash,
92993 + [36433] = &_001432_hash,
92994 + [16068] = &_001433_hash,
92995 + [22052] = &_001434_hash,
92996 + [8929] = &_001435_hash,
92997 + [63220] = &_001436_hash,
92998 + [18246] = &_001437_hash,
92999 + [37678] = &_001438_hash,
93000 + [4932] = &_001439_hash,
93001 + [46960] = &_001440_hash,
93002 + [16909] = &_001441_hash,
93003 + [44429] = &_001442_hash,
93004 + [59514] = &_001443_hash,
93005 + [62760] = &_001444_hash,
93006 + [41841] = &_001445_hash,
93007 + [25417] = &_001446_hash,
93008 + [63230] = &_001447_hash,
93009 + [39532] = &_001448_hash,
93010 + [24688] = &_001449_hash,
93011 + [18555] = &_001450_hash,
93012 + [54499] = &_001451_hash,
93013 + [10719] = &_001452_hash,
93014 + [1644] = &_001453_hash,
93015 + [15109] = &_001454_hash,
93016 + [15787] = &_001455_hash,
93017 + [57869] = &_001456_hash,
93018 + [54445] = &_001457_hash,
93019 + [19398] = &_001458_hash,
93020 + [9488] = &_001459_hash,
93021 + [12587] = &_001460_hash,
93022 + [17124] = &_001461_hash,
93023 + [53665] = &_001462_hash,
93024 + [40386] = &_001463_hash,
93025 + [39444] = &_001464_hash,
93026 + [28873] = &_001465_hash,
93027 + [11290] = &_001466_hash,
93028 + [51313] = &_001467_hash,
93029 + [23354] = &_001469_hash,
93030 + [49559] = &_001470_hash,
93031 + [49312] = &_001471_hash,
93032 + [36333] = &_001472_hash,
93033 + [59349] = &_001473_hash,
93034 + [60316] = &_001474_hash,
93035 + [2546] = &_001475_hash,
93036 + [57483] = &_001476_hash,
93037 + [14569] = &_001478_hash,
93038 + [61842] = &_001481_hash,
93039 + [32923] = &_001482_hash,
93040 + [57471] = &_001483_hash,
93041 + [83] = &_001484_hash,
93042 + [40242] = &_001485_hash,
93043 + [42578] = &_001486_hash,
93044 + [62037] = &_001487_hash,
93045 + [8131] = &_001488_hash,
93046 + [752] = &_001489_hash,
93047 + [56376] = &_001490_hash,
93048 + [22290] = &_001491_hash,
93049 + [46232] = &_001492_hash,
93050 + [35132] = &_001493_hash,
93051 + [23825] = &_001494_hash,
93052 + [43262] = &_001495_hash,
93053 + [8138] = &_001496_hash,
93054 + [31489] = &_001497_hash,
93055 + [57578] = &_001498_hash,
93056 + [28007] = &_001499_hash,
93057 + [28688] = &_001500_hash,
93058 + [19319] = &_001501_hash,
93059 + [12575] = &_001502_hash,
93060 + [62762] = &_001504_hash,
93061 + [47450] = &_001505_hash,
93062 + [1869] = &_001506_hash,
93063 + [51225] = &_001507_hash,
93064 + [19561] = &_001508_hash,
93065 + [64894] = &_001509_hash,
93066 + [6829] = &_001510_hash,
93067 + [30644] = &_001511_hash,
93068 + [63391] = &_001512_hash,
93069 + [11655] = &_001514_hash,
93070 + [28229] = &_001515_hash,
93071 + [22382] = &_001516_hash,
93072 + [22649] = &_001517_hash,
93073 + [42619] = &_001518_hash,
93074 + [19761] = &_001519_hash,
93075 + [56990] = &_001520_hash,
93076 + [19531] = &_001521_hash,
93077 + [26514] = &_001522_hash,
93078 + [56773] = &_001523_hash,
93079 + [15563] = &_001524_hash,
93080 + [26212] = &_001525_hash,
93081 + [29203] = &_001526_hash,
93082 + [32768] = &_001527_hash,
93083 + [15110] = &_001528_hash,
93084 + [3885] = &_001529_hash,
93085 + [13788] = &_001530_hash,
93086 + [27875] = &_001531_hash,
93087 + [54959] = &_001532_hash,
93088 + [20945] = &_001533_hash,
93089 + [59640] = &_001534_hash,
93090 + [4693] = &_001535_hash,
93091 + [13793] = &_001536_hash,
93092 + [25659] = &_001537_hash,
93093 + [18734] = &_001538_hash,
93094 + [17869] = &_001539_hash,
93095 + [26270] = &_001540_hash,
93096 + [18458] = &_001541_hash,
93097 + [58468] = &_001542_hash,
93098 + [61257] = &_001543_hash,
93099 + [39946] = &_001544_hash,
93100 + [52382] = &_001545_hash,
93101 + [18428] = &_001546_hash,
93102 + [31069] = &_001547_hash,
93103 + [61614] = &_001548_hash,
93104 + [60044] = &_001549_hash,
93105 + [36818] = &_001550_hash,
93106 + [54353] = &_001551_hash,
93107 + [55994] = &_001552_hash,
93108 + [65142] = &_001553_hash,
93109 + [1664] = &_001554_hash,
93110 + [32212] = &_001555_hash,
93111 + [63087] = &_001556_hash,
93112 + [29916] = &_001557_hash,
93113 + [54912] = &_001558_hash,
93114 + [10318] = &_001559_hash,
93115 + [44031] = &_001560_hash,
93116 + [50108] = &_001561_hash,
93117 + [57812] = &_001562_hash,
93118 + [63190] = &_001563_hash,
93119 + [48246] = &_001564_hash,
93120 + [3744] = &_001565_hash,
93121 + [56321] = &_001566_hash,
93122 + [42691] = &_001567_hash,
93123 + [62052] = &_001568_hash,
93124 + [21999] = &_001569_hash,
93125 + [13672] = &_001570_hash,
93126 + [20648] = &_001571_hash,
93127 + [42500] = &_001572_hash,
93128 + [22795] = &_001573_hash,
93129 + [19496] = &_001574_hash,
93130 + [35556] = &_001575_hash,
93131 + [57144] = &_001576_hash,
93132 + [1019] = &_001577_hash,
93133 + [28818] = &_001578_hash,
93134 + [52880] = &_001579_hash,
93135 + [6543] = &_001580_hash,
93136 + [18895] = &_001581_hash,
93137 + [857] = &_001582_hash,
93138 + [45966] = &_001583_hash,
93139 + [11785] = &_001584_hash,
93140 + [7736] = &_001585_hash,
93141 + [4308] = &_001586_hash,
93142 + [51095] = &_001587_hash,
93143 + [12101] = &_001588_hash,
93144 + [427] = &_001589_hash,
93145 + [4021] = &_001590_hash,
93146 + [54201] = &_001591_hash,
93147 + [5615] = &_001592_hash,
93148 + [16234] = &_001593_hash,
93149 + [51718] = &_001594_hash,
93150 + [42390] = &_001595_hash,
93151 + [55391] = &_001596_hash,
93152 + [28539] = &_001597_hash,
93153 + [943] = &_001598_hash,
93154 + [32683] = &_001599_hash,
93155 + [39182] = &_001600_hash,
93156 + [33198] = &_001601_hash,
93157 + [39446] = &_001602_hash,
93158 + [16394] = &_001603_hash,
93159 + [30791] = &_001604_hash,
93160 + [35530] = &_001605_hash,
93161 + [53193] = &_001607_hash,
93162 + [39401] = &_001608_hash,
93163 + [28624] = &_001609_hash,
93164 + [12066] = &_001610_hash,
93165 + [63492] = &_001611_hash,
93166 + [14897] = &_001612_hash,
93167 + [29641] = &_001613_hash,
93168 + [10165] = &_001614_hash,
93169 + [60046] = &_001615_hash,
93170 + [12429] = &_001616_hash,
93171 + [32788] = &_001617_hash,
93172 + [52698] = &_001618_hash,
93173 + [13130] = &_001620_hash,
93174 + [28643] = &_001621_hash,
93175 + [50666] = &_001622_hash,
93176 + [35126] = &_001623_hash,
93177 + [33593] = &_001624_hash,
93178 + [27547] = &_001625_hash,
93179 + [5484] = &_001626_hash,
93180 + [26642] = &_001627_hash,
93181 + [25586] = &_001628_hash,
93182 + [58757] = &_001629_hash,
93183 + [18701] = &_001630_hash,
93184 + [26271] = &_001631_hash,
93185 + [23829] = &_001632_hash,
93186 + [63659] = &_001634_hash,
93187 + [26603] = &_001635_hash,
93188 + [25704] = &_001636_hash,
93189 + [21149] = &_001637_hash,
93190 + [36900] = &_001638_hash,
93191 + [61577] = &_001640_hash,
93192 + [54095] = &_001641_hash,
93193 + [31650] = &_001642_hash,
93194 + [48970] = &_001643_hash,
93195 + [49357] = &_001644_hash,
93196 + [33835] = &_001645_hash,
93197 + [46861] = &_001646_hash,
93198 + [1428] = &_001647_hash,
93199 + [36247] = &_001648_hash,
93200 + [21600] = &_001649_hash,
93201 + [24747] = &_001650_hash,
93202 + [51012] = &_001651_hash,
93203 + [38974] = &_001653_hash,
93204 + [30549] = &_001655_hash,
93205 + [40146] = &_001656_hash,
93206 + [41756] = &_001657_hash,
93207 + [37010] = &_001658_hash,
93208 + [35072] = &_001660_hash,
93209 + [2114] = &_001661_hash,
93210 + [48424] = &_001662_hash,
93211 + [61522] = &_001663_hash,
93212 + [50633] = &_001664_hash,
93213 + [2283] = &_001665_hash,
93214 + [61763] = &_001666_hash,
93215 + [48195] = &_001667_hash,
93216 + [31000] = &_001668_hash,
93217 + [23856] = &_001669_hash,
93218 + [37421] = &_001670_hash,
93219 + [10019] = &_001672_hash,
93220 + [5148] = &_001673_hash,
93221 + [14363] = &_001674_hash,
93222 + [57354] = &_001675_hash,
93223 + [62460] = &_001676_hash,
93224 + [45174] = &_001677_hash,
93225 + [31054] = &_001678_hash,
93226 + [62195] = &_001679_hash,
93227 + [14976] = &_001680_hash,
93228 + [55676] = &_001681_hash,
93229 + [1025] = &_001682_hash,
93230 + [6921] = &_001683_hash,
93231 + [22158] = &_001684_hash,
93232 + [18050] = &_001685_hash,
93233 + [18612] = &_001686_hash,
93234 + [31107] = &_001687_hash,
93235 + [45212] = &_001688_hash,
93236 + [29599] = &_001689_hash,
93237 + [30827] = &_001690_hash,
93238 + [25086] = &_001691_hash,
93239 + [27146] = &_001692_hash,
93240 + [2467] = &_001693_hash,
93241 + [45786] = &_001694_hash,
93242 + [51909] = &_001695_hash,
93243 + [64604] = &_001696_hash,
93244 + [57819] = &_001697_hash,
93245 + [11001] = &_001698_hash,
93246 + [20326] = &_001699_hash,
93247 + [12682] = &_001700_hash,
93248 + [28932] = &_001701_hash,
93249 + [53491] = &_001702_hash,
93250 + [63894] = &_001703_hash,
93251 + [51191] = &_001704_hash,
93252 + [59759] = &_001705_hash,
93253 + [15691] = &_001706_hash,
93254 + [38786] = &_001707_hash,
93255 + [51546] = &_001708_hash,
93256 + [10121] = &_001709_hash,
93257 + [60786] = &_001710_hash,
93258 + [19952] = &_001712_hash,
93259 + [7271] = &_001715_hash,
93260 + [10729] = &_001716_hash,
93261 + [28883] = &_001717_hash,
93262 + [52042] = &_001718_hash,
93263 + [49606] = &_001719_hash,
93264 + [33243] = &_001720_hash,
93265 + [57341] = &_001721_hash,
93266 + [7978] = &_001722_hash,
93267 + [36330] = &_001723_hash,
93268 + [39035] = &_001724_hash,
93269 + [34498] = &_001725_hash,
93270 + [19789] = &_001726_hash,
93271 + [55685] = &_001727_hash,
93272 + [55419] = &_001728_hash,
93273 + [27798] = &_001729_hash,
93274 + [54599] = &_001730_hash,
93275 + [65522] = &_001731_hash,
93276 + [38111] = &_001732_hash,
93277 + [57077] = &_001733_hash,
93278 + [53053] = &_001734_hash,
93279 + [14190] = &_001735_hash,
93280 + [47037] = &_001736_hash,
93281 + [33296] = &_001737_hash,
93282 + [23803] = &_001738_hash,
93283 + [48773] = &_001739_hash,
93284 + [63014] = &_001740_hash,
93285 + [64392] = &_001741_hash,
93286 + [44203] = &_001742_hash,
93287 + [47717] = &_001743_hash,
93288 + [38399] = &_001744_hash,
93289 + [30385] = &_001745_hash,
93290 + [61693] = &_001746_hash,
93291 + [32049] = &_001747_hash,
93292 + [26133] = &_001748_hash,
93293 + [45038] = &_001749_hash,
93294 + [8582] = &_001751_hash,
93295 + [38182] = &_001753_hash,
93296 + [62457] = &_001754_hash,
93297 + [27937] = &_001755_hash,
93298 + [3795] = &_001756_hash,
93299 + [23228] = &_001757_hash,
93300 + [56511] = &_001758_hash,
93301 + [47807] = &_001759_hash,
93302 + [60528] = &_001760_hash,
93303 + [51858] = &_001761_hash,
93304 + [49183] = &_001762_hash,
93305 + [33807] = &_001763_hash,
93306 + [34791] = &_001764_hash,
93307 + [8150] = &_001765_hash,
93308 + [19691] = &_001767_hash,
93309 + [20519] = &_001770_hash,
93310 + [17144] = &_001771_hash,
93311 + [19394] = &_001772_hash,
93312 + [53730] = &_001773_hash,
93313 + [8447] = &_001774_hash,
93314 + [30004] = &_001775_hash,
93315 + [40939] = &_001776_hash,
93316 + [53674] = &_001777_hash,
93317 + [11820] = &_001778_hash,
93318 + [23401] = &_001779_hash,
93319 + [9641] = &_001780_hash,
93320 + [2721] = &_001781_hash,
93321 + [19700] = &_001782_hash,
93322 + [1619] = &_001783_hash,
93323 + [23272] = &_001784_hash,
93324 + [56424] = &_001785_hash,
93325 + [14483] = &_001786_hash,
93326 + [1599] = &_001787_hash,
93327 + [27604] = &_001788_hash,
93328 + [37219] = &_001789_hash,
93329 + [31958] = &_001790_hash,
93330 + [5273] = &_001791_hash,
93331 + [46712] = &_001792_hash,
93332 + [27259] = &_001794_hash,
93333 + [23674] = &_001797_hash,
93334 + [40509] = &_001798_hash,
93335 + [17549] = &_001799_hash,
93336 + [53992] = &_001800_hash,
93337 + [24062] = &_001801_hash,
93338 + [23371] = &_001802_hash,
93339 + [19115] = &_001803_hash,
93340 + [51532] = &_001804_hash,
93341 + [45193] = &_001805_hash,
93342 + [29340] = &_001806_hash,
93343 + [5048] = &_001807_hash,
93344 + [65040] = &_001808_hash,
93345 + [39155] = &_001809_hash,
93346 + [31406] = &_001810_hash,
93347 + [49182] = &_001811_hash,
93348 + [37695] = &_001812_hash,
93349 + [28432] = &_001813_hash,
93350 + [23482] = &_001814_hash,
93351 + [56550] = &_001815_hash,
93352 + [7374] = &_001816_hash,
93353 + [57050] = &_001817_hash,
93354 + [57011] = &_001818_hash,
93355 + [27529] = &_001819_hash,
93356 + [33662] = &_001820_hash,
93357 + [4314] = &_001821_hash,
93358 + [22812] = &_001822_hash,
93359 + [47555] = &_001823_hash,
93360 + [38737] = &_001824_hash,
93361 + [36101] = &_001826_hash,
93362 + [877] = &_001828_hash,
93363 + [2639] = &_001830_hash,
93364 + [64343] = &_001831_hash,
93365 + [11150] = &_001832_hash,
93366 + [46486] = &_001833_hash,
93367 + [18719] = &_001834_hash,
93368 + [49574] = &_001835_hash,
93369 + [37617] = &_001836_hash,
93370 + [3045] = &_001837_hash,
93371 + [39395] = &_001838_hash,
93372 + [15297] = &_001839_hash,
93373 + [50862] = &_001840_hash,
93374 + [28877] = &_001841_hash,
93375 + [57117] = &_001842_hash,
93376 + [62064] = &_001843_hash,
93377 + [64610] = &_001844_hash,
93378 + [24065] = &_001845_hash,
93379 + [24846] = &_001846_hash,
93380 + [8624] = &_001847_hash,
93381 + [14000] = &_001848_hash,
93382 + [31148] = &_001849_hash,
93383 + [62594] = &_001850_hash,
93384 + [39210] = &_001851_hash,
93385 + [2077] = &_001852_hash,
93386 + [23497] = &_001853_hash,
93387 + [34512] = &_001854_hash,
93388 + [16268] = &_001856_hash,
93389 + [14562] = &_001857_hash,
93390 + [17606] = &_001859_hash,
93391 + [25654] = &_001860_hash,
93392 + [56078] = &_001861_hash,
93393 + [61088] = &_001862_hash,
93394 + [53442] = &_001863_hash,
93395 + [54456] = &_001864_hash,
93396 + [22038] = &_001865_hash,
93397 + [58394] = &_001866_hash,
93398 + [38953] = &_001867_hash,
93399 + [16109] = &_001868_hash,
93400 + [3812] = &_001869_hash,
93401 + [5084] = &_001870_hash,
93402 + [41893] = &_001871_hash,
93403 + [45486] = &_001872_hash,
93404 + [50226] = &_001873_hash,
93405 + [63694] = &_001874_hash,
93406 + [56763] = &_001875_hash,
93407 + [20905] = &_001876_hash,
93408 + [13080] = &_001877_hash,
93409 + [54700] = &_001878_hash,
93410 + [40947] = &_001879_hash,
93411 + [32645] = &_001880_hash,
93412 + [57462] = &_001881_hash,
93413 + [33853] = &_001882_hash,
93414 + [57940] = &_001883_hash,
93415 + [45583] = &_001884_hash,
93416 + [49704] = &_001885_hash,
93417 + [39232] = &_001886_hash,
93418 + [5140] = &_001887_hash,
93419 + [45726] = &_001888_hash,
93420 + [35392] = &_001889_hash,
93421 + [44895] = &_001890_hash,
93422 + [17219] = &_001891_hash,
93423 + [50185] = &_001892_hash,
93424 + [3062] = &_001893_hash,
93425 + [9784] = &_001894_hash,
93426 + [52513] = &_001895_hash,
93427 + [52678] = &_001896_hash,
93428 + [36258] = &_001897_hash,
93429 + [2885] = &_001898_hash,
93430 + [11588] = &_001899_hash,
93431 + [65337] = &_001900_hash,
93432 + [19329] = &_001901_hash,
93433 + [23791] = &_001902_hash,
93434 + [38078] = &_001903_hash,
93435 + [42270] = &_001904_hash,
93436 + [30475] = &_001905_hash,
93437 + [25564] = &_001906_hash,
93438 + [33581] = &_001907_hash,
93439 + [59644] = &_001908_hash,
93440 + [5800] = &_001909_hash,
93441 + [42227] = &_001910_hash,
93442 + [54718] = &_001911_hash,
93443 + [41255] = &_001912_hash,
93444 + [31502] = &_001913_hash,
93445 + [44929] = &_001914_hash,
93446 + [47332] = &_001915_hash,
93447 + [10107] = &_001916_hash,
93448 + [47137] = &_001917_hash,
93449 + [26017] = &_001918_hash,
93450 + [41477] = &_001919_hash,
93451 + [6656] = &_001920_hash,
93452 + [50198] = &_001921_hash,
93453 + [48909] = &_001922_hash,
93454 + [9474] = &_001923_hash,
93455 + [58554] = &_001924_hash,
93456 + [45747] = &_001925_hash,
93457 + [43151] = &_001926_hash,
93458 + [15626] = &_001927_hash,
93459 + [17364] = &_001928_hash,
93460 + [15077] = &_001929_hash,
93461 + [31912] = &_001930_hash,
93462 + [2803] = &_001931_hash,
93463 + [42715] = &_001932_hash,
93464 + [12552] = &_001933_hash,
93465 + [13099] = &_001934_hash,
93466 + [40973] = &_001935_hash,
93467 + [20988] = &_001936_hash,
93468 + [16939] = &_001937_hash,
93469 + [48587] = &_001938_hash,
93470 + [52889] = &_001939_hash,
93471 + [38776] = &_001940_hash,
93472 + [58608] = &_001941_hash,
93473 + [4360] = &_001942_hash,
93474 + [53447] = &_001943_hash,
93475 + [25355] = &_001944_hash,
93476 + [14955] = &_001946_hash,
93477 + [5428] = &_001947_hash,
93478 + [11063] = &_001948_hash,
93479 + [59852] = &_001949_hash,
93480 + [45648] = &_001950_hash,
93481 + [21855] = &_001951_hash,
93482 + [54573] = &_001952_hash,
93483 + [56316] = &_001953_hash,
93484 +};
93485 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
93486 new file mode 100644
93487 index 0000000..92b8ee6
93488 --- /dev/null
93489 +++ b/tools/gcc/size_overflow_plugin.c
93490 @@ -0,0 +1,1188 @@
93491 +/*
93492 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
93493 + * Licensed under the GPL v2, or (at your option) v3
93494 + *
93495 + * Homepage:
93496 + * http://www.grsecurity.net/~ephox/overflow_plugin/
93497 + *
93498 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
93499 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
93500 + * The recomputed argument is checked against INT_MAX and an event is logged on overflow and the triggering process is killed.
93501 + *
93502 + * Usage:
93503 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c
93504 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
93505 + */
93506 +
93507 +#include "gcc-plugin.h"
93508 +#include "config.h"
93509 +#include "system.h"
93510 +#include "coretypes.h"
93511 +#include "tree.h"
93512 +#include "tree-pass.h"
93513 +#include "intl.h"
93514 +#include "plugin-version.h"
93515 +#include "tm.h"
93516 +#include "toplev.h"
93517 +#include "function.h"
93518 +#include "tree-flow.h"
93519 +#include "plugin.h"
93520 +#include "gimple.h"
93521 +#include "c-common.h"
93522 +#include "diagnostic.h"
93523 +#include "cfgloop.h"
93524 +
93525 +struct size_overflow_hash {
93526 + struct size_overflow_hash *next;
93527 + const char *name;
93528 + const char *file;
93529 + unsigned short param1:1;
93530 + unsigned short param2:1;
93531 + unsigned short param3:1;
93532 + unsigned short param4:1;
93533 + unsigned short param5:1;
93534 + unsigned short param6:1;
93535 + unsigned short param7:1;
93536 + unsigned short param8:1;
93537 + unsigned short param9:1;
93538 +};
93539 +
93540 +#include "size_overflow_hash.h"
93541 +
93542 +#define __unused __attribute__((__unused__))
93543 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
93544 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
93545 +#define BEFORE_STMT true
93546 +#define AFTER_STMT false
93547 +#define CREATE_NEW_VAR NULL_TREE
93548 +
93549 +int plugin_is_GPL_compatible;
93550 +void debug_gimple_stmt(gimple gs);
93551 +
93552 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
93553 +static tree signed_size_overflow_type;
93554 +static tree unsigned_size_overflow_type;
93555 +static tree report_size_overflow_decl;
93556 +static tree const_char_ptr_type_node;
93557 +static unsigned int handle_function(void);
93558 +static bool file_match = true;
93559 +
93560 +static struct plugin_info size_overflow_plugin_info = {
93561 + .version = "20120521beta",
93562 + .help = "no-size_overflow\tturn off size overflow checking\n",
93563 +};
93564 +
93565 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
93566 +{
93567 + unsigned int arg_count = type_num_arguments(*node);
93568 +
93569 + for (; args; args = TREE_CHAIN(args)) {
93570 + tree position = TREE_VALUE(args);
93571 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
93572 + error("handle_size_overflow_attribute: overflow parameter outside range.");
93573 + *no_add_attrs = true;
93574 + }
93575 + }
93576 + return NULL_TREE;
93577 +}
93578 +
93579 +static struct attribute_spec no_size_overflow_attr = {
93580 + .name = "size_overflow",
93581 + .min_length = 1,
93582 + .max_length = -1,
93583 + .decl_required = false,
93584 + .type_required = true,
93585 + .function_type_required = true,
93586 + .handler = handle_size_overflow_attribute
93587 +};
93588 +
93589 +static void register_attributes(void __unused *event_data, void __unused *data)
93590 +{
93591 + register_attribute(&no_size_overflow_attr);
93592 +}
93593 +
93594 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
93595 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
93596 +{
93597 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
93598 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
93599 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
93600 +
93601 + const unsigned int m = 0x57559429;
93602 + const unsigned int n = 0x5052acdb;
93603 + const unsigned int *key4 = (const unsigned int *)key;
93604 + unsigned int h = len;
93605 + unsigned int k = len + seed + n;
93606 + unsigned long long p;
93607 +
93608 + while (len >= 8) {
93609 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
93610 + len -= 8;
93611 + }
93612 + if (len >= 4) {
93613 + cwmixb(key4[0]) key4 += 1;
93614 + len -= 4;
93615 + }
93616 + if (len)
93617 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
93618 + cwmixb(h ^ (k + n));
93619 + return k ^ h;
93620 +
93621 +#undef cwfold
93622 +#undef cwmixa
93623 +#undef cwmixb
93624 +}
93625 +
93626 +static inline unsigned int get_hash_num(const char *fndecl, const char *loc_file, unsigned int seed)
93627 +{
93628 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
93629 + unsigned int file = CrapWow(loc_file, strlen(loc_file), seed) & 0xffff;
93630 +
93631 + if (file_match)
93632 + return fn ^ file;
93633 + else
93634 + return fn;
93635 +}
93636 +
93637 +static inline tree get_original_function_decl(tree fndecl)
93638 +{
93639 + if (DECL_ABSTRACT_ORIGIN(fndecl))
93640 + return DECL_ABSTRACT_ORIGIN(fndecl);
93641 + return fndecl;
93642 +}
93643 +
93644 +static inline gimple get_def_stmt(tree node)
93645 +{
93646 + gcc_assert(TREE_CODE(node) == SSA_NAME);
93647 + return SSA_NAME_DEF_STMT(node);
93648 +}
93649 +
93650 +static struct size_overflow_hash *get_function_hash(tree fndecl, const char *loc_file)
93651 +{
93652 + unsigned int hash;
93653 + struct size_overflow_hash *entry;
93654 + const char *func_name = NAME(fndecl);
93655 +
93656 + hash = get_hash_num(NAME(fndecl), loc_file, 0);
93657 +
93658 + entry = size_overflow_hash[hash];
93659 + while (entry) {
93660 + if (!strcmp(entry->name, func_name) && (!file_match || !strcmp(entry->file, loc_file)))
93661 + return entry;
93662 + entry = entry->next;
93663 + }
93664 +
93665 + return NULL;
93666 +}
93667 +
93668 +static void check_arg_type(tree var)
93669 +{
93670 + tree type = TREE_TYPE(var);
93671 + enum tree_code code = TREE_CODE(type);
93672 +
93673 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
93674 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
93675 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
93676 +}
93677 +
93678 +static int find_arg_number(tree arg, tree func)
93679 +{
93680 + tree var;
93681 + bool match = false;
93682 + unsigned int argnum = 1;
93683 +
93684 + if (TREE_CODE(arg) == SSA_NAME)
93685 + arg = SSA_NAME_VAR(arg);
93686 +
93687 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
93688 + if (strcmp(NAME(arg), NAME(var))) {
93689 + argnum++;
93690 + continue;
93691 + }
93692 + check_arg_type(var);
93693 +
93694 + match = true;
93695 + if (!TYPE_UNSIGNED(TREE_TYPE(var)))
93696 + return 0;
93697 + break;
93698 + }
93699 + if (!match) {
93700 + warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func));
93701 + return 0;
93702 + }
93703 + return argnum;
93704 +}
93705 +
93706 +static void print_missing_msg(tree func, const char *filename, unsigned int argnum)
93707 +{
93708 + unsigned int new_hash;
93709 + location_t loc = DECL_SOURCE_LOCATION(func);
93710 + const char *curfunc = NAME(func);
93711 +
93712 + new_hash = get_hash_num(curfunc, filename, 0);
93713 +// inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+%s+", curfunc, curfunc, argnum, new_hash, filename);
93714 +}
93715 +
93716 +static void check_missing_attribute(tree arg)
93717 +{
93718 + tree type, func = get_original_function_decl(current_function_decl);
93719 + unsigned int argnum;
93720 + struct size_overflow_hash *hash;
93721 + const char *filename;
93722 +
93723 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
93724 +
93725 + type = TREE_TYPE(arg);
93726 + // skip function pointers
93727 + if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
93728 + return;
93729 +
93730 + if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
93731 + return;
93732 +
93733 + argnum = find_arg_number(arg, func);
93734 + if (argnum == 0)
93735 + return;
93736 +
93737 + filename = DECL_SOURCE_FILE(func);
93738 +
93739 + hash = get_function_hash(func, filename);
93740 + if (!hash) {
93741 + print_missing_msg(func, filename, argnum);
93742 + return;
93743 + }
93744 +
93745 +#define check_param(num) \
93746 + if (num == argnum && hash->param##num) \
93747 + return;
93748 + check_param(1);
93749 + check_param(2);
93750 + check_param(3);
93751 + check_param(4);
93752 + check_param(5);
93753 + check_param(6);
93754 + check_param(7);
93755 + check_param(8);
93756 + check_param(9);
93757 +#undef check_param
93758 +
93759 + print_missing_msg(func, filename, argnum);
93760 +}
93761 +
93762 +static tree create_new_var(tree type)
93763 +{
93764 + tree new_var = create_tmp_var(type, "cicus");
93765 +
93766 + add_referenced_var(new_var);
93767 + mark_sym_for_renaming(new_var);
93768 + return new_var;
93769 +}
93770 +
93771 +static bool is_bool(tree node)
93772 +{
93773 + tree type;
93774 +
93775 + if (node == NULL_TREE)
93776 + return false;
93777 +
93778 + type = TREE_TYPE(node);
93779 + if (!INTEGRAL_TYPE_P(type))
93780 + return false;
93781 + if (TREE_CODE(type) == BOOLEAN_TYPE)
93782 + return true;
93783 + if (TYPE_PRECISION(type) == 1)
93784 + return true;
93785 + return false;
93786 +}
93787 +
93788 +static tree cast_a_tree(tree type, tree var)
93789 +{
93790 + gcc_assert(fold_convertible_p(type, var));
93791 +
93792 + return fold_convert(type, var);
93793 +}
93794 +
93795 +static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
93796 +{
93797 + gimple assign;
93798 +
93799 + if (new_var == CREATE_NEW_VAR)
93800 + new_var = create_new_var(type);
93801 +
93802 + assign = gimple_build_assign(new_var, cast_a_tree(type, var));
93803 + gimple_set_location(assign, loc);
93804 + gimple_set_lhs(assign, make_ssa_name(new_var, assign));
93805 +
93806 + return assign;
93807 +}
93808 +
93809 +static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
93810 +{
93811 + tree oldstmt_rhs1;
93812 + enum tree_code code;
93813 + gimple stmt;
93814 + gimple_stmt_iterator gsi;
93815 +
93816 + if (!*potentionally_overflowed)
93817 + return NULL_TREE;
93818 +
93819 + if (rhs1 == NULL_TREE) {
93820 + debug_gimple_stmt(oldstmt);
93821 + error("create_assign: rhs1 is NULL_TREE");
93822 + gcc_unreachable();
93823 + }
93824 +
93825 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
93826 + code = TREE_CODE(oldstmt_rhs1);
93827 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
93828 + check_missing_attribute(oldstmt_rhs1);
93829 +
93830 + stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
93831 + gsi = gsi_for_stmt(oldstmt);
93832 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
93833 + basic_block next_bb, cur_bb;
93834 + edge e;
93835 +
93836 + gcc_assert(before == false);
93837 + gcc_assert(stmt_can_throw_internal(oldstmt));
93838 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
93839 + gcc_assert(!gsi_end_p(gsi));
93840 +
93841 + cur_bb = gimple_bb(oldstmt);
93842 + next_bb = cur_bb->next_bb;
93843 + e = find_edge(cur_bb, next_bb);
93844 + gcc_assert(e != NULL);
93845 + gcc_assert(e->flags & EDGE_FALLTHRU);
93846 +
93847 + gsi = gsi_after_labels(next_bb);
93848 + gcc_assert(!gsi_end_p(gsi));
93849 + before = true;
93850 + }
93851 + if (before)
93852 + gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
93853 + else
93854 + gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
93855 + update_stmt(stmt);
93856 + pointer_set_insert(visited, oldstmt);
93857 + return gimple_get_lhs(stmt);
93858 +}
93859 +
93860 +static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
93861 +{
93862 + tree new_var, lhs = gimple_get_lhs(oldstmt);
93863 + gimple stmt;
93864 + gimple_stmt_iterator gsi;
93865 +
93866 + if (!*potentionally_overflowed)
93867 + return NULL_TREE;
93868 +
93869 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
93870 + rhs1 = gimple_assign_rhs1(oldstmt);
93871 + rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
93872 + }
93873 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
93874 + rhs2 = gimple_assign_rhs2(oldstmt);
93875 + rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
93876 + }
93877 +
93878 + stmt = gimple_copy(oldstmt);
93879 + gimple_set_location(stmt, gimple_location(oldstmt));
93880 +
93881 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
93882 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
93883 +
93884 + if (is_bool(lhs))
93885 + new_var = SSA_NAME_VAR(lhs);
93886 + else
93887 + new_var = create_new_var(signed_size_overflow_type);
93888 + new_var = make_ssa_name(new_var, stmt);
93889 + gimple_set_lhs(stmt, new_var);
93890 +
93891 + if (rhs1 != NULL_TREE) {
93892 + if (!gimple_assign_cast_p(oldstmt))
93893 + rhs1 = cast_a_tree(signed_size_overflow_type, rhs1);
93894 + gimple_assign_set_rhs1(stmt, rhs1);
93895 + }
93896 +
93897 + if (rhs2 != NULL_TREE)
93898 + gimple_assign_set_rhs2(stmt, rhs2);
93899 +#if BUILDING_GCC_VERSION >= 4007
93900 + if (rhs3 != NULL_TREE)
93901 + gimple_assign_set_rhs3(stmt, rhs3);
93902 +#endif
93903 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
93904 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
93905 +
93906 + gsi = gsi_for_stmt(oldstmt);
93907 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
93908 + update_stmt(stmt);
93909 + pointer_set_insert(visited, oldstmt);
93910 + return gimple_get_lhs(stmt);
93911 +}
93912 +
93913 +static gimple overflow_create_phi_node(gimple oldstmt, tree var)
93914 +{
93915 + basic_block bb;
93916 + gimple phi;
93917 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
93918 +
93919 + bb = gsi_bb(gsi);
93920 +
93921 + phi = create_phi_node(var, bb);
93922 + gsi = gsi_last(phi_nodes(bb));
93923 + gsi_remove(&gsi, false);
93924 +
93925 + gsi = gsi_for_stmt(oldstmt);
93926 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
93927 + gimple_set_bb(phi, bb);
93928 + return phi;
93929 +}
93930 +
93931 +static tree signed_cast_constant(tree node)
93932 +{
93933 + gcc_assert(is_gimple_constant(node));
93934 +
93935 + return cast_a_tree(signed_size_overflow_type, node);
93936 +}
93937 +
93938 +static basic_block create_a_first_bb(void)
93939 +{
93940 + basic_block first_bb;
93941 +
93942 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
93943 + if (dom_info_available_p(CDI_DOMINATORS))
93944 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
93945 + return first_bb;
93946 +}
93947 +
93948 +static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
93949 +{
93950 + basic_block bb;
93951 + gimple newstmt, def_stmt;
93952 + gimple_stmt_iterator gsi;
93953 +
93954 + newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
93955 + if (TREE_CODE(arg) == SSA_NAME) {
93956 + def_stmt = get_def_stmt(arg);
93957 + if (gimple_code(def_stmt) != GIMPLE_NOP) {
93958 + gsi = gsi_for_stmt(def_stmt);
93959 + gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
93960 + return newstmt;
93961 + }
93962 + }
93963 +
93964 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
93965 + if (bb->index == 0)
93966 + bb = create_a_first_bb();
93967 + gsi = gsi_after_labels(bb);
93968 + gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
93969 + return newstmt;
93970 +}
93971 +
93972 +static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
93973 +{
93974 + gimple newstmt;
93975 + gimple_stmt_iterator gsi;
93976 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
93977 + gimple def_newstmt = get_def_stmt(new_rhs);
93978 +
93979 + gsi_insert = gsi_insert_after;
93980 + gsi = gsi_for_stmt(def_newstmt);
93981 +
93982 + switch (gimple_code(get_def_stmt(arg))) {
93983 + case GIMPLE_PHI:
93984 + newstmt = gimple_build_assign(new_var, new_rhs);
93985 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
93986 + gsi_insert = gsi_insert_before;
93987 + break;
93988 + case GIMPLE_ASM:
93989 + case GIMPLE_CALL:
93990 + newstmt = gimple_build_assign(new_var, new_rhs);
93991 + break;
93992 + case GIMPLE_ASSIGN:
93993 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
93994 + break;
93995 + default:
93996 + /* unknown gimple_code (handle_build_new_phi_arg) */
93997 + gcc_unreachable();
93998 + }
93999 +
94000 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
94001 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
94002 + update_stmt(newstmt);
94003 + return newstmt;
94004 +}
94005 +
94006 +static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
94007 +{
94008 + gimple newstmt;
94009 + tree new_rhs;
94010 +
94011 + new_rhs = expand(visited, potentionally_overflowed, arg);
94012 +
94013 + if (new_rhs == NULL_TREE)
94014 + return NULL_TREE;
94015 +
94016 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
94017 + return gimple_get_lhs(newstmt);
94018 +}
94019 +
94020 +static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
94021 +{
94022 + gimple phi;
94023 + tree new_var = create_new_var(signed_size_overflow_type);
94024 + unsigned int i, n = gimple_phi_num_args(oldstmt);
94025 +
94026 + pointer_set_insert(visited, oldstmt);
94027 + phi = overflow_create_phi_node(oldstmt, new_var);
94028 + for (i = 0; i < n; i++) {
94029 + tree arg, lhs;
94030 +
94031 + arg = gimple_phi_arg_def(oldstmt, i);
94032 + if (is_gimple_constant(arg))
94033 + arg = signed_cast_constant(arg);
94034 + lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
94035 + if (lhs == NULL_TREE)
94036 + lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
94037 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
94038 + }
94039 +
94040 + update_stmt(phi);
94041 + return gimple_phi_result(phi);
94042 +}
94043 +
94044 +static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94045 +{
94046 + gimple def_stmt = get_def_stmt(var);
94047 + tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
94048 +
94049 + *potentionally_overflowed = true;
94050 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
94051 + if (new_rhs1 == NULL_TREE) {
94052 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
94053 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94054 + else
94055 + return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
94056 + }
94057 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
94058 +}
94059 +
94060 +static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94061 +{
94062 + gimple def_stmt = get_def_stmt(var);
94063 + tree rhs1 = gimple_assign_rhs1(def_stmt);
94064 +
94065 + if (is_gimple_constant(rhs1))
94066 + return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE);
94067 +
94068 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
94069 + switch (TREE_CODE(rhs1)) {
94070 + case SSA_NAME:
94071 + return handle_unary_rhs(visited, potentionally_overflowed, var);
94072 +
94073 + case ARRAY_REF:
94074 + case BIT_FIELD_REF:
94075 + case ADDR_EXPR:
94076 + case COMPONENT_REF:
94077 + case INDIRECT_REF:
94078 +#if BUILDING_GCC_VERSION >= 4006
94079 + case MEM_REF:
94080 +#endif
94081 + case PARM_DECL:
94082 + case TARGET_MEM_REF:
94083 + case VAR_DECL:
94084 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94085 +
94086 + default:
94087 + debug_gimple_stmt(def_stmt);
94088 + debug_tree(rhs1);
94089 + gcc_unreachable();
94090 + }
94091 +}
94092 +
94093 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
94094 +{
94095 + gimple cond_stmt;
94096 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
94097 +
94098 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
94099 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
94100 + update_stmt(cond_stmt);
94101 +}
94102 +
94103 +static tree create_string_param(tree string)
94104 +{
94105 + tree i_type, a_type;
94106 + int length = TREE_STRING_LENGTH(string);
94107 +
94108 + gcc_assert(length > 0);
94109 +
94110 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
94111 + a_type = build_array_type(char_type_node, i_type);
94112 +
94113 + TREE_TYPE(string) = a_type;
94114 + TREE_CONSTANT(string) = 1;
94115 + TREE_READONLY(string) = 1;
94116 +
94117 + return build1(ADDR_EXPR, ptr_type_node, string);
94118 +}
94119 +
94120 +static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
94121 +{
94122 + gimple func_stmt, def_stmt;
94123 + tree current_func, loc_file, loc_line;
94124 + expanded_location xloc;
94125 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
94126 +
94127 + def_stmt = get_def_stmt(arg);
94128 + xloc = expand_location(gimple_location(def_stmt));
94129 +
94130 + if (!gimple_has_location(def_stmt)) {
94131 + xloc = expand_location(gimple_location(stmt));
94132 + if (!gimple_has_location(stmt))
94133 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
94134 + }
94135 +
94136 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
94137 +
94138 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
94139 + loc_file = create_string_param(loc_file);
94140 +
94141 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
94142 + current_func = create_string_param(current_func);
94143 +
94144 + // void report_size_overflow(const char *file, unsigned int line, const char *func)
94145 + func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
94146 +
94147 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
94148 +}
94149 +
94150 +static void __unused print_the_code_insertions(gimple stmt)
94151 +{
94152 + location_t loc = gimple_location(stmt);
94153 +
94154 + inform(loc, "Integer size_overflow check applied here.");
94155 +}
94156 +
94157 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
94158 +{
94159 + basic_block cond_bb, join_bb, bb_true;
94160 + edge e;
94161 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
94162 +
94163 + cond_bb = gimple_bb(stmt);
94164 + gsi_prev(&gsi);
94165 + if (gsi_end_p(gsi))
94166 + e = split_block_after_labels(cond_bb);
94167 + else
94168 + e = split_block(cond_bb, gsi_stmt(gsi));
94169 + cond_bb = e->src;
94170 + join_bb = e->dest;
94171 + e->flags = EDGE_FALSE_VALUE;
94172 + e->probability = REG_BR_PROB_BASE;
94173 +
94174 + bb_true = create_empty_bb(cond_bb);
94175 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
94176 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
94177 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
94178 +
94179 + if (dom_info_available_p(CDI_DOMINATORS)) {
94180 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
94181 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
94182 + }
94183 +
94184 + if (current_loops != NULL) {
94185 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
94186 + add_bb_to_loop(bb_true, cond_bb->loop_father);
94187 + }
94188 +
94189 + insert_cond(cond_bb, arg, cond_code, type_value);
94190 + insert_cond_result(bb_true, stmt, arg);
94191 +
94192 +// print_the_code_insertions(stmt);
94193 +}
94194 +
94195 +static tree get_type_for_check(tree rhs)
94196 +{
94197 + tree def_rhs;
94198 + gimple def_stmt = get_def_stmt(rhs);
94199 +
94200 + if (!gimple_assign_cast_p(def_stmt))
94201 + return TREE_TYPE(rhs);
94202 + def_rhs = gimple_assign_rhs1(def_stmt);
94203 + if (TREE_CODE(TREE_TYPE(def_rhs)) == INTEGER_TYPE)
94204 + return TREE_TYPE(def_rhs);
94205 + return TREE_TYPE(rhs);
94206 +}
94207 +
94208 +static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
94209 +{
94210 + gimple ucast_stmt;
94211 + gimple_stmt_iterator gsi;
94212 + location_t loc = gimple_location(stmt);
94213 +
94214 + ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
94215 + gsi = gsi_for_stmt(stmt);
94216 + gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
94217 + return ucast_stmt;
94218 +}
94219 +
94220 +static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
94221 +{
94222 + tree type_max, type_min, rhs_type;
94223 + gimple ucast_stmt;
94224 +
94225 + if (!*potentionally_overflowed)
94226 + return;
94227 +
94228 + rhs_type = get_type_for_check(rhs);
94229 +
94230 + if (TYPE_UNSIGNED(rhs_type)) {
94231 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
94232 + type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
94233 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
94234 + } else {
94235 + type_max = cast_a_tree(signed_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
94236 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
94237 +
94238 + type_min = cast_a_tree(signed_size_overflow_type, TYPE_MIN_VALUE(rhs_type));
94239 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
94240 + }
94241 +}
94242 +
94243 +static tree change_assign_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt, tree orig_rhs)
94244 +{
94245 + gimple assign;
94246 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
94247 + tree new_rhs, origtype = TREE_TYPE(orig_rhs);
94248 +
94249 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
94250 +
94251 + new_rhs = expand(visited, potentionally_overflowed, orig_rhs);
94252 + if (new_rhs == NULL_TREE)
94253 + return NULL_TREE;
94254 +
94255 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
94256 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
94257 + update_stmt(assign);
94258 + return gimple_get_lhs(assign);
94259 +}
94260 +
94261 +static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
94262 +{
94263 + tree new_rhs, cast_rhs;
94264 +
94265 + if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
94266 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
94267 +
94268 + new_rhs = change_assign_rhs(visited, potentionally_overflowed, def_stmt, rhs);
94269 + if (new_rhs != NULL_TREE) {
94270 + gimple_assign_set_rhs(def_stmt, new_rhs);
94271 + update_stmt(def_stmt);
94272 +
94273 + cast_rhs = gimple_assign_rhs1(get_def_stmt(new_rhs));
94274 +
94275 + check_size_overflow(def_stmt, cast_rhs, rhs, potentionally_overflowed);
94276 + }
94277 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94278 +}
94279 +
94280 +static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94281 +{
94282 + tree rhs1, rhs2;
94283 + gimple def_stmt = get_def_stmt(var);
94284 + tree new_rhs1 = NULL_TREE;
94285 + tree new_rhs2 = NULL_TREE;
94286 +
94287 + rhs1 = gimple_assign_rhs1(def_stmt);
94288 + rhs2 = gimple_assign_rhs2(def_stmt);
94289 +
94290 + /* no DImode/TImode division in the 32/64 bit kernel */
94291 + switch (gimple_assign_rhs_code(def_stmt)) {
94292 + case RDIV_EXPR:
94293 + case TRUNC_DIV_EXPR:
94294 + case CEIL_DIV_EXPR:
94295 + case FLOOR_DIV_EXPR:
94296 + case ROUND_DIV_EXPR:
94297 + case TRUNC_MOD_EXPR:
94298 + case CEIL_MOD_EXPR:
94299 + case FLOOR_MOD_EXPR:
94300 + case ROUND_MOD_EXPR:
94301 + case EXACT_DIV_EXPR:
94302 + case POINTER_PLUS_EXPR:
94303 + case BIT_AND_EXPR:
94304 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94305 + default:
94306 + break;
94307 + }
94308 +
94309 + *potentionally_overflowed = true;
94310 +
94311 + if (TREE_CODE(rhs1) == SSA_NAME)
94312 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
94313 + if (TREE_CODE(rhs2) == SSA_NAME)
94314 + new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
94315 +
94316 + if (is_gimple_constant(rhs2))
94317 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, signed_cast_constant(rhs2), &gimple_assign_set_rhs1);
94318 +
94319 + if (is_gimple_constant(rhs1))
94320 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, signed_cast_constant(rhs1), new_rhs2, &gimple_assign_set_rhs2);
94321 +
94322 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
94323 +}
94324 +
94325 +#if BUILDING_GCC_VERSION >= 4007
94326 +static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
94327 +{
94328 + if (is_gimple_constant(rhs))
94329 + return signed_cast_constant(rhs);
94330 + if (TREE_CODE(rhs) != SSA_NAME)
94331 + return NULL_TREE;
94332 + return expand(visited, potentionally_overflowed, rhs);
94333 +}
94334 +
94335 +static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94336 +{
94337 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
94338 + gimple def_stmt = get_def_stmt(var);
94339 +
94340 + *potentionally_overflowed = true;
94341 +
94342 + rhs1 = gimple_assign_rhs1(def_stmt);
94343 + rhs2 = gimple_assign_rhs2(def_stmt);
94344 + rhs3 = gimple_assign_rhs3(def_stmt);
94345 + new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
94346 + new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
94347 + new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
94348 +
94349 + if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
94350 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
94351 + error("handle_ternary_ops: unknown rhs");
94352 + gcc_unreachable();
94353 +}
94354 +#endif
94355 +
94356 +static void set_size_overflow_type(tree node)
94357 +{
94358 + switch (TYPE_MODE(TREE_TYPE(node))) {
94359 + case SImode:
94360 + signed_size_overflow_type = intDI_type_node;
94361 + unsigned_size_overflow_type = unsigned_intDI_type_node;
94362 + break;
94363 + case DImode:
94364 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
94365 + signed_size_overflow_type = intDI_type_node;
94366 + unsigned_size_overflow_type = unsigned_intDI_type_node;
94367 + } else {
94368 + signed_size_overflow_type = intTI_type_node;
94369 + unsigned_size_overflow_type = unsigned_intTI_type_node;
94370 + }
94371 + break;
94372 + default:
94373 + error("set_size_overflow_type: unsupported gcc configuration.");
94374 + gcc_unreachable();
94375 + }
94376 +}
94377 +
94378 +static tree expand_visited(gimple def_stmt)
94379 +{
94380 + gimple tmp;
94381 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
94382 +
94383 + gsi_next(&gsi);
94384 + tmp = gsi_stmt(gsi);
94385 + switch (gimple_code(tmp)) {
94386 + case GIMPLE_ASSIGN:
94387 + return gimple_get_lhs(tmp);
94388 + case GIMPLE_PHI:
94389 + return gimple_phi_result(tmp);
94390 + case GIMPLE_CALL:
94391 + return gimple_call_lhs(tmp);
94392 + default:
94393 + return NULL_TREE;
94394 + }
94395 +}
94396 +
94397 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
94398 +{
94399 + gimple def_stmt;
94400 + enum tree_code code = TREE_CODE(TREE_TYPE(var));
94401 +
94402 + if (is_gimple_constant(var))
94403 + return NULL_TREE;
94404 +
94405 + if (TREE_CODE(var) == ADDR_EXPR)
94406 + return NULL_TREE;
94407 +
94408 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
94409 + if (code != INTEGER_TYPE)
94410 + return NULL_TREE;
94411 +
94412 + if (SSA_NAME_IS_DEFAULT_DEF(var)) {
94413 + check_missing_attribute(var);
94414 + return NULL_TREE;
94415 + }
94416 +
94417 + def_stmt = get_def_stmt(var);
94418 +
94419 + if (!def_stmt)
94420 + return NULL_TREE;
94421 +
94422 + if (pointer_set_contains(visited, def_stmt))
94423 + return expand_visited(def_stmt);
94424 +
94425 + switch (gimple_code(def_stmt)) {
94426 + case GIMPLE_NOP:
94427 + check_missing_attribute(var);
94428 + return NULL_TREE;
94429 + case GIMPLE_PHI:
94430 + return build_new_phi(visited, potentionally_overflowed, def_stmt);
94431 + case GIMPLE_CALL:
94432 + case GIMPLE_ASM:
94433 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
94434 + case GIMPLE_ASSIGN:
94435 + switch (gimple_num_ops(def_stmt)) {
94436 + case 2:
94437 + return handle_unary_ops(visited, potentionally_overflowed, var);
94438 + case 3:
94439 + return handle_binary_ops(visited, potentionally_overflowed, var);
94440 +#if BUILDING_GCC_VERSION >= 4007
94441 + case 4:
94442 + return handle_ternary_ops(visited, potentionally_overflowed, var);
94443 +#endif
94444 + }
94445 + default:
94446 + debug_gimple_stmt(def_stmt);
94447 + error("expand: unknown gimple code");
94448 + gcc_unreachable();
94449 + }
94450 +}
94451 +
94452 +static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
94453 +{
94454 + gimple assign;
94455 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
94456 + tree origtype = TREE_TYPE(origarg);
94457 +
94458 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
94459 +
94460 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
94461 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
94462 + update_stmt(assign);
94463 +
94464 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
94465 + update_stmt(stmt);
94466 +}
94467 +
94468 +static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
94469 +{
94470 + const char *origid;
94471 + tree arg, origarg;
94472 +
94473 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
94474 + gcc_assert(gimple_call_num_args(stmt) > argnum);
94475 + return gimple_call_arg(stmt, argnum);
94476 + }
94477 +
94478 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
94479 + while (origarg && argnum) {
94480 + argnum--;
94481 + origarg = TREE_CHAIN(origarg);
94482 + }
94483 +
94484 + gcc_assert(argnum == 0);
94485 +
94486 + gcc_assert(origarg != NULL_TREE);
94487 + origid = NAME(origarg);
94488 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
94489 + if (!strcmp(origid, NAME(arg)))
94490 + return arg;
94491 + }
94492 + return NULL_TREE;
94493 +}
94494 +
94495 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
94496 +{
94497 + struct pointer_set_t *visited;
94498 + tree arg, newarg, type_max;
94499 + gimple ucast_stmt;
94500 + bool potentionally_overflowed;
94501 +
94502 + arg = get_function_arg(argnum, stmt, fndecl);
94503 + if (arg == NULL_TREE)
94504 + return;
94505 +
94506 + if (is_gimple_constant(arg))
94507 + return;
94508 + if (TREE_CODE(arg) != SSA_NAME)
94509 + return;
94510 +
94511 + check_arg_type(arg);
94512 +
94513 + set_size_overflow_type(arg);
94514 +
94515 + visited = pointer_set_create();
94516 + potentionally_overflowed = false;
94517 + newarg = expand(visited, &potentionally_overflowed, arg);
94518 + pointer_set_destroy(visited);
94519 +
94520 + if (newarg == NULL_TREE || !potentionally_overflowed)
94521 + return;
94522 +
94523 + change_function_arg(stmt, arg, argnum, newarg);
94524 +
94525 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, newarg);
94526 +
94527 + type_max = build_int_cstu(unsigned_size_overflow_type, 0x7fffffff);
94528 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
94529 +}
94530 +
94531 +static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
94532 +{
94533 + tree p = TREE_VALUE(attr);
94534 + do {
94535 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
94536 + p = TREE_CHAIN(p);
94537 + } while (p);
94538 +}
94539 +
94540 +static void handle_function_by_hash(gimple stmt, tree fndecl)
94541 +{
94542 + tree orig_fndecl;
94543 + struct size_overflow_hash *hash;
94544 + const char *filename = DECL_SOURCE_FILE(fndecl);
94545 +
94546 + orig_fndecl = get_original_function_decl(fndecl);
94547 + hash = get_function_hash(orig_fndecl, filename);
94548 + if (!hash)
94549 + return;
94550 +
94551 +#define search_param(argnum) \
94552 + if (hash->param##argnum) \
94553 + handle_function_arg(stmt, fndecl, argnum - 1);
94554 +
94555 + search_param(1);
94556 + search_param(2);
94557 + search_param(3);
94558 + search_param(4);
94559 + search_param(5);
94560 + search_param(6);
94561 + search_param(7);
94562 + search_param(8);
94563 + search_param(9);
94564 +#undef search_param
94565 +}
94566 +
94567 +static unsigned int handle_function(void)
94568 +{
94569 + basic_block bb = ENTRY_BLOCK_PTR->next_bb;
94570 + int saved_last_basic_block = last_basic_block;
94571 +
94572 + do {
94573 + gimple_stmt_iterator gsi;
94574 + basic_block next = bb->next_bb;
94575 +
94576 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
94577 + tree fndecl, attr;
94578 + gimple stmt = gsi_stmt(gsi);
94579 +
94580 + if (!(is_gimple_call(stmt)))
94581 + continue;
94582 + fndecl = gimple_call_fndecl(stmt);
94583 + if (fndecl == NULL_TREE)
94584 + continue;
94585 + if (gimple_call_num_args(stmt) == 0)
94586 + continue;
94587 + attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
94588 + if (!attr || !TREE_VALUE(attr))
94589 + handle_function_by_hash(stmt, fndecl);
94590 + else
94591 + handle_function_by_attribute(stmt, attr, fndecl);
94592 + gsi = gsi_for_stmt(stmt);
94593 + }
94594 + bb = next;
94595 + } while (bb && bb->index <= saved_last_basic_block);
94596 + return 0;
94597 +}
94598 +
94599 +static struct gimple_opt_pass size_overflow_pass = {
94600 + .pass = {
94601 + .type = GIMPLE_PASS,
94602 + .name = "size_overflow",
94603 + .gate = NULL,
94604 + .execute = handle_function,
94605 + .sub = NULL,
94606 + .next = NULL,
94607 + .static_pass_number = 0,
94608 + .tv_id = TV_NONE,
94609 + .properties_required = PROP_cfg | PROP_referenced_vars,
94610 + .properties_provided = 0,
94611 + .properties_destroyed = 0,
94612 + .todo_flags_start = 0,
94613 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
94614 + }
94615 +};
94616 +
94617 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
94618 +{
94619 + tree fntype;
94620 +
94621 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
94622 +
94623 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
94624 + fntype = build_function_type_list(void_type_node,
94625 + const_char_ptr_type_node,
94626 + unsigned_type_node,
94627 + const_char_ptr_type_node,
94628 + NULL_TREE);
94629 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
94630 +
94631 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
94632 + TREE_PUBLIC(report_size_overflow_decl) = 1;
94633 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
94634 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
94635 +}
94636 +
94637 +extern struct gimple_opt_pass pass_dce;
94638 +
94639 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
94640 +{
94641 + int i;
94642 + const char * const plugin_name = plugin_info->base_name;
94643 + const int argc = plugin_info->argc;
94644 + const struct plugin_argument * const argv = plugin_info->argv;
94645 + bool enable = true;
94646 +
94647 + struct register_pass_info size_overflow_pass_info = {
94648 + .pass = &size_overflow_pass.pass,
94649 + .reference_pass_name = "ssa",
94650 + .ref_pass_instance_number = 1,
94651 + .pos_op = PASS_POS_INSERT_AFTER
94652 + };
94653 +
94654 + if (!plugin_default_version_check(version, &gcc_version)) {
94655 + error(G_("incompatible gcc/plugin versions"));
94656 + return 1;
94657 + }
94658 +
94659 + for (i = 0; i < argc; ++i) {
94660 + if (!strcmp(argv[i].key, "no-size-overflow")) {
94661 + enable = false;
94662 + continue;
94663 + } else if (!(strcmp(argv[i].key, "no-file-match"))) {
94664 + file_match = false;
94665 + continue;
94666 + }
94667 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
94668 + }
94669 +
94670 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
94671 + if (enable) {
94672 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
94673 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
94674 + }
94675 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
94676 +
94677 + return 0;
94678 +}
94679 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
94680 new file mode 100644
94681 index 0000000..b87ec9d
94682 --- /dev/null
94683 +++ b/tools/gcc/stackleak_plugin.c
94684 @@ -0,0 +1,313 @@
94685 +/*
94686 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
94687 + * Licensed under the GPL v2
94688 + *
94689 + * Note: the choice of the license means that the compilation process is
94690 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
94691 + * but for the kernel it doesn't matter since it doesn't link against
94692 + * any of the gcc libraries
94693 + *
94694 + * gcc plugin to help implement various PaX features
94695 + *
94696 + * - track lowest stack pointer
94697 + *
94698 + * TODO:
94699 + * - initialize all local variables
94700 + *
94701 + * BUGS:
94702 + * - none known
94703 + */
94704 +#include "gcc-plugin.h"
94705 +#include "config.h"
94706 +#include "system.h"
94707 +#include "coretypes.h"
94708 +#include "tree.h"
94709 +#include "tree-pass.h"
94710 +#include "flags.h"
94711 +#include "intl.h"
94712 +#include "toplev.h"
94713 +#include "plugin.h"
94714 +//#include "expr.h" where are you...
94715 +#include "diagnostic.h"
94716 +#include "plugin-version.h"
94717 +#include "tm.h"
94718 +#include "function.h"
94719 +#include "basic-block.h"
94720 +#include "gimple.h"
94721 +#include "rtl.h"
94722 +#include "emit-rtl.h"
94723 +
94724 +extern void print_gimple_stmt(FILE *, gimple, int, int);
94725 +
94726 +int plugin_is_GPL_compatible;
94727 +
94728 +static int track_frame_size = -1;
94729 +static const char track_function[] = "pax_track_stack";
94730 +static const char check_function[] = "pax_check_alloca";
94731 +static bool init_locals;
94732 +
94733 +static struct plugin_info stackleak_plugin_info = {
94734 + .version = "201203140940",
94735 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
94736 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
94737 +};
94738 +
94739 +static bool gate_stackleak_track_stack(void);
94740 +static unsigned int execute_stackleak_tree_instrument(void);
94741 +static unsigned int execute_stackleak_final(void);
94742 +
94743 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
94744 + .pass = {
94745 + .type = GIMPLE_PASS,
94746 + .name = "stackleak_tree_instrument",
94747 + .gate = gate_stackleak_track_stack,
94748 + .execute = execute_stackleak_tree_instrument,
94749 + .sub = NULL,
94750 + .next = NULL,
94751 + .static_pass_number = 0,
94752 + .tv_id = TV_NONE,
94753 + .properties_required = PROP_gimple_leh | PROP_cfg,
94754 + .properties_provided = 0,
94755 + .properties_destroyed = 0,
94756 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
94757 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
94758 + }
94759 +};
94760 +
94761 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
94762 + .pass = {
94763 + .type = RTL_PASS,
94764 + .name = "stackleak_final",
94765 + .gate = gate_stackleak_track_stack,
94766 + .execute = execute_stackleak_final,
94767 + .sub = NULL,
94768 + .next = NULL,
94769 + .static_pass_number = 0,
94770 + .tv_id = TV_NONE,
94771 + .properties_required = 0,
94772 + .properties_provided = 0,
94773 + .properties_destroyed = 0,
94774 + .todo_flags_start = 0,
94775 + .todo_flags_finish = TODO_dump_func
94776 + }
94777 +};
94778 +
94779 +static bool gate_stackleak_track_stack(void)
94780 +{
94781 + return track_frame_size >= 0;
94782 +}
94783 +
94784 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
94785 +{
94786 + gimple check_alloca;
94787 + tree fntype, fndecl, alloca_size;
94788 +
94789 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
94790 + fndecl = build_fn_decl(check_function, fntype);
94791 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
94792 +
94793 + // insert call to void pax_check_alloca(unsigned long size)
94794 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
94795 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
94796 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
94797 +}
94798 +
94799 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
94800 +{
94801 + gimple track_stack;
94802 + tree fntype, fndecl;
94803 +
94804 + fntype = build_function_type_list(void_type_node, NULL_TREE);
94805 + fndecl = build_fn_decl(track_function, fntype);
94806 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
94807 +
94808 + // insert call to void pax_track_stack(void)
94809 + track_stack = gimple_build_call(fndecl, 0);
94810 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
94811 +}
94812 +
94813 +#if BUILDING_GCC_VERSION == 4005
94814 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
94815 +{
94816 + tree fndecl;
94817 +
94818 + if (!is_gimple_call(stmt))
94819 + return false;
94820 + fndecl = gimple_call_fndecl(stmt);
94821 + if (!fndecl)
94822 + return false;
94823 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
94824 + return false;
94825 +// print_node(stderr, "pax", fndecl, 4);
94826 + return DECL_FUNCTION_CODE(fndecl) == code;
94827 +}
94828 +#endif
94829 +
94830 +static bool is_alloca(gimple stmt)
94831 +{
94832 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
94833 + return true;
94834 +
94835 +#if BUILDING_GCC_VERSION >= 4007
94836 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
94837 + return true;
94838 +#endif
94839 +
94840 + return false;
94841 +}
94842 +
94843 +static unsigned int execute_stackleak_tree_instrument(void)
94844 +{
94845 + basic_block bb, entry_bb;
94846 + bool prologue_instrumented = false, is_leaf = true;
94847 +
94848 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
94849 +
94850 + // 1. loop through BBs and GIMPLE statements
94851 + FOR_EACH_BB(bb) {
94852 + gimple_stmt_iterator gsi;
94853 +
94854 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
94855 + gimple stmt;
94856 +
94857 + stmt = gsi_stmt(gsi);
94858 +
94859 + if (is_gimple_call(stmt))
94860 + is_leaf = false;
94861 +
94862 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
94863 + if (!is_alloca(stmt))
94864 + continue;
94865 +
94866 + // 2. insert stack overflow check before each __builtin_alloca call
94867 + stackleak_check_alloca(&gsi);
94868 +
94869 + // 3. insert track call after each __builtin_alloca call
94870 + stackleak_add_instrumentation(&gsi);
94871 + if (bb == entry_bb)
94872 + prologue_instrumented = true;
94873 + }
94874 + }
94875 +
94876 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
94877 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
94878 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
94879 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
94880 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
94881 + return 0;
94882 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
94883 + return 0;
94884 +
94885 + // 4. insert track call at the beginning
94886 + if (!prologue_instrumented) {
94887 + gimple_stmt_iterator gsi;
94888 +
94889 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
94890 + if (dom_info_available_p(CDI_DOMINATORS))
94891 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
94892 + gsi = gsi_start_bb(bb);
94893 + stackleak_add_instrumentation(&gsi);
94894 + }
94895 +
94896 + return 0;
94897 +}
94898 +
94899 +static unsigned int execute_stackleak_final(void)
94900 +{
94901 + rtx insn;
94902 +
94903 + if (cfun->calls_alloca)
94904 + return 0;
94905 +
94906 + // keep calls only if function frame is big enough
94907 + if (get_frame_size() >= track_frame_size)
94908 + return 0;
94909 +
94910 + // 1. find pax_track_stack calls
94911 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
94912 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
94913 + rtx body;
94914 +
94915 + if (!CALL_P(insn))
94916 + continue;
94917 + body = PATTERN(insn);
94918 + if (GET_CODE(body) != CALL)
94919 + continue;
94920 + body = XEXP(body, 0);
94921 + if (GET_CODE(body) != MEM)
94922 + continue;
94923 + body = XEXP(body, 0);
94924 + if (GET_CODE(body) != SYMBOL_REF)
94925 + continue;
94926 + if (strcmp(XSTR(body, 0), track_function))
94927 + continue;
94928 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
94929 + // 2. delete call
94930 + insn = delete_insn_and_edges(insn);
94931 +#if BUILDING_GCC_VERSION >= 4007
94932 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
94933 + insn = delete_insn_and_edges(insn);
94934 +#endif
94935 + }
94936 +
94937 +// print_simple_rtl(stderr, get_insns());
94938 +// print_rtl(stderr, get_insns());
94939 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
94940 +
94941 + return 0;
94942 +}
94943 +
94944 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
94945 +{
94946 + const char * const plugin_name = plugin_info->base_name;
94947 + const int argc = plugin_info->argc;
94948 + const struct plugin_argument * const argv = plugin_info->argv;
94949 + int i;
94950 + struct register_pass_info stackleak_tree_instrument_pass_info = {
94951 + .pass = &stackleak_tree_instrument_pass.pass,
94952 +// .reference_pass_name = "tree_profile",
94953 + .reference_pass_name = "optimized",
94954 + .ref_pass_instance_number = 0,
94955 + .pos_op = PASS_POS_INSERT_BEFORE
94956 + };
94957 + struct register_pass_info stackleak_final_pass_info = {
94958 + .pass = &stackleak_final_rtl_opt_pass.pass,
94959 + .reference_pass_name = "final",
94960 + .ref_pass_instance_number = 0,
94961 + .pos_op = PASS_POS_INSERT_BEFORE
94962 + };
94963 +
94964 + if (!plugin_default_version_check(version, &gcc_version)) {
94965 + error(G_("incompatible gcc/plugin versions"));
94966 + return 1;
94967 + }
94968 +
94969 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
94970 +
94971 + for (i = 0; i < argc; ++i) {
94972 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
94973 + if (!argv[i].value) {
94974 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
94975 + continue;
94976 + }
94977 + track_frame_size = atoi(argv[i].value);
94978 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
94979 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
94980 + continue;
94981 + }
94982 + if (!strcmp(argv[i].key, "initialize-locals")) {
94983 + if (argv[i].value) {
94984 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
94985 + continue;
94986 + }
94987 + init_locals = true;
94988 + continue;
94989 + }
94990 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
94991 + }
94992 +
94993 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
94994 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
94995 +
94996 + return 0;
94997 +}
94998 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
94999 index 6789d78..4afd019 100644
95000 --- a/tools/perf/util/include/asm/alternative-asm.h
95001 +++ b/tools/perf/util/include/asm/alternative-asm.h
95002 @@ -5,4 +5,7 @@
95003
95004 #define altinstruction_entry #
95005
95006 + .macro pax_force_retaddr rip=0, reload=0
95007 + .endm
95008 +
95009 #endif
95010 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
95011 index af0f22f..9a7d479 100644
95012 --- a/usr/gen_init_cpio.c
95013 +++ b/usr/gen_init_cpio.c
95014 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
95015 int retval;
95016 int rc = -1;
95017 int namesize;
95018 - int i;
95019 + unsigned int i;
95020
95021 mode |= S_IFREG;
95022
95023 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
95024 *env_var = *expanded = '\0';
95025 strncat(env_var, start + 2, end - start - 2);
95026 strncat(expanded, new_location, start - new_location);
95027 - strncat(expanded, getenv(env_var), PATH_MAX);
95028 - strncat(expanded, end + 1, PATH_MAX);
95029 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
95030 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
95031 strncpy(new_location, expanded, PATH_MAX);
95032 + new_location[PATH_MAX] = 0;
95033 } else
95034 break;
95035 }
95036 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
95037 index 9739b53..6d457e3 100644
95038 --- a/virt/kvm/kvm_main.c
95039 +++ b/virt/kvm/kvm_main.c
95040 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
95041
95042 static cpumask_var_t cpus_hardware_enabled;
95043 static int kvm_usage_count = 0;
95044 -static atomic_t hardware_enable_failed;
95045 +static atomic_unchecked_t hardware_enable_failed;
95046
95047 struct kmem_cache *kvm_vcpu_cache;
95048 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
95049 @@ -2247,7 +2247,7 @@ static void hardware_enable_nolock(void *junk)
95050
95051 if (r) {
95052 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
95053 - atomic_inc(&hardware_enable_failed);
95054 + atomic_inc_unchecked(&hardware_enable_failed);
95055 printk(KERN_INFO "kvm: enabling virtualization on "
95056 "CPU%d failed\n", cpu);
95057 }
95058 @@ -2301,10 +2301,10 @@ static int hardware_enable_all(void)
95059
95060 kvm_usage_count++;
95061 if (kvm_usage_count == 1) {
95062 - atomic_set(&hardware_enable_failed, 0);
95063 + atomic_set_unchecked(&hardware_enable_failed, 0);
95064 on_each_cpu(hardware_enable_nolock, NULL, 1);
95065
95066 - if (atomic_read(&hardware_enable_failed)) {
95067 + if (atomic_read_unchecked(&hardware_enable_failed)) {
95068 hardware_disable_all_nolock();
95069 r = -EBUSY;
95070 }
95071 @@ -2667,7 +2667,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
95072 kvm_arch_vcpu_put(vcpu);
95073 }
95074
95075 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
95076 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
95077 struct module *module)
95078 {
95079 int r;
95080 @@ -2730,7 +2730,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
95081 if (!vcpu_align)
95082 vcpu_align = __alignof__(struct kvm_vcpu);
95083 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
95084 - 0, NULL);
95085 + SLAB_USERCOPY, NULL);
95086 if (!kvm_vcpu_cache) {
95087 r = -ENOMEM;
95088 goto out_free_3;
95089 @@ -2740,9 +2740,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
95090 if (r)
95091 goto out_free;
95092
95093 - kvm_chardev_ops.owner = module;
95094 - kvm_vm_fops.owner = module;
95095 - kvm_vcpu_fops.owner = module;
95096 + pax_open_kernel();
95097 + *(void **)&kvm_chardev_ops.owner = module;
95098 + *(void **)&kvm_vm_fops.owner = module;
95099 + *(void **)&kvm_vcpu_fops.owner = module;
95100 + pax_close_kernel();
95101
95102 r = misc_register(&kvm_dev);
95103 if (r) {