]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/3.3.7/grsecurity-2.9-3.3.7-201205261259.patch
Auto commit, grsecurity-3.1-4.9.13-201703052141.patch added.
[thirdparty/grsecurity-scrape.git] / test / 3.3.7 / grsecurity-2.9-3.3.7-201205261259.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index 0c083c5..bf13011 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 @@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41 +PERF*
42 SCCS
43 System.map*
44 TAGS
45 @@ -92,19 +98,24 @@ bounds.h
46 bsetup
47 btfixupprep
48 build
49 +builtin-policy.h
50 bvmlinux
51 bzImage*
52 capability_names.h
53 capflags.c
54 classlist.h*
55 +clut_vga16.c
56 +common-cmds.h
57 comp*.log
58 compile.h*
59 conf
60 config
61 config-*
62 config_data.h*
63 +config.c
64 config.mak
65 config.mak.autogen
66 +config.tmp
67 conmakehash
68 consolemap_deftbl.c*
69 cpustr.h
70 @@ -115,9 +126,11 @@ devlist.h*
71 dnotify_test
72 docproc
73 dslm
74 +dtc-lexer.lex.c
75 elf2ecoff
76 elfconfig.h*
77 evergreen_reg_safe.h
78 +exception_policy.conf
79 fixdep
80 flask.h
81 fore200e_mkfirm
82 @@ -125,12 +138,15 @@ fore200e_pca_fw.c*
83 gconf
84 gconf.glade.h
85 gen-devlist
86 +gen-kdb_cmds.c
87 gen_crc32table
88 gen_init_cpio
89 generated
90 genheaders
91 genksyms
92 *_gray256.c
93 +hash
94 +hid-example
95 hpet_example
96 hugepage-mmap
97 hugepage-shm
98 @@ -145,7 +161,7 @@ int32.c
99 int4.c
100 int8.c
101 kallsyms
102 -kconfig
103 +kern_constants.h
104 keywords.c
105 ksym.c*
106 ksym.h*
107 @@ -153,7 +169,7 @@ kxgettext
108 lkc_defs.h
109 lex.c
110 lex.*.c
111 -linux
112 +lib1funcs.S
113 logo_*.c
114 logo_*_clut224.c
115 logo_*_mono.c
116 @@ -165,14 +181,15 @@ machtypes.h
117 map
118 map_hugetlb
119 maui_boot.h
120 -media
121 mconf
122 +mdp
123 miboot*
124 mk_elfconfig
125 mkboot
126 mkbugboot
127 mkcpustr
128 mkdep
129 +mkpiggy
130 mkprep
131 mkregtable
132 mktables
133 @@ -208,6 +225,7 @@ r300_reg_safe.h
134 r420_reg_safe.h
135 r600_reg_safe.h
136 recordmcount
137 +regdb.c
138 relocs
139 rlim_names.h
140 rn50_reg_safe.h
141 @@ -218,6 +236,7 @@ setup
142 setup.bin
143 setup.elf
144 sImage
145 +slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
149 @@ -228,6 +247,7 @@ tftpboot.img
150 timeconst.h
151 times.h*
152 trix_boot.h
153 +user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
157 @@ -245,7 +265,9 @@ vmlinux
158 vmlinux-*
159 vmlinux.aout
160 vmlinux.bin.all
161 +vmlinux.bin.bz2
162 vmlinux.lds
163 +vmlinux.relocs
164 vmlinuz
165 voffset.h
166 vsyscall.lds
167 @@ -253,9 +275,11 @@ vsyscall_32.lds
168 wanxlfw.inc
169 uImage
170 unifdef
171 +utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177 +zconf.lex.c
178 zoffset.h
179 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
180 index d99fd9c..8689fef 100644
181 --- a/Documentation/kernel-parameters.txt
182 +++ b/Documentation/kernel-parameters.txt
183 @@ -1977,6 +1977,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
184 the specified number of seconds. This is to be used if
185 your oopses keep scrolling off the screen.
186
187 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
188 + virtualization environments that don't cope well with the
189 + expand down segment used by UDEREF on X86-32 or the frequent
190 + page table updates on X86-64.
191 +
192 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
193 +
194 pcbit= [HW,ISDN]
195
196 pcd. [PARIDE]
197 diff --git a/Makefile b/Makefile
198 index 073f74f..b379941 100644
199 --- a/Makefile
200 +++ b/Makefile
201 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
202
203 HOSTCC = gcc
204 HOSTCXX = g++
205 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
206 -HOSTCXXFLAGS = -O2
207 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
208 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
209 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
210
211 # Decide whether to build built-in, modular, or both.
212 # Normally, just do built-in.
213 @@ -357,8 +358,8 @@ CFLAGS_GCOV = -fprofile-arcs -ftest-coverage
214
215 # Use LINUXINCLUDE when you must reference the include/ directory.
216 # Needed to be compatible with the O= option
217 -LINUXINCLUDE := -I$(srctree)/arch/$(hdr-arch)/include \
218 - -Iarch/$(hdr-arch)/include/generated -Iinclude \
219 +LINUXINCLUDE := -isystem arch/$(hdr-arch)/include \
220 + -isystem arch/$(hdr-arch)/include/generated -isystem include \
221 $(if $(KBUILD_SRC), -I$(srctree)/include) \
222 -include $(srctree)/include/linux/kconfig.h
223
224 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
225 # Rules shared between *config targets and build targets
226
227 # Basic helpers built in scripts/
228 -PHONY += scripts_basic
229 -scripts_basic:
230 +PHONY += scripts_basic gcc-plugins
231 +scripts_basic: gcc-plugins
232 $(Q)$(MAKE) $(build)=scripts/basic
233 $(Q)rm -f .tmp_quiet_recordmcount
234
235 @@ -564,6 +565,55 @@ else
236 KBUILD_CFLAGS += -O2
237 endif
238
239 +ifndef DISABLE_PAX_PLUGINS
240 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
241 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
242 +ifndef CONFIG_UML
243 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
244 +endif
245 +endif
246 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
247 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
248 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
249 +endif
250 +ifdef CONFIG_KALLOCSTAT_PLUGIN
251 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
252 +endif
253 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
254 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
255 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
256 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
257 +endif
258 +ifdef CONFIG_CHECKER_PLUGIN
259 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
260 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
261 +endif
262 +endif
263 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
264 +ifdef CONFIG_PAX_SIZE_OVERFLOW
265 +SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
266 +endif
267 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
268 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
269 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
270 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
271 +ifeq ($(KBUILD_EXTMOD),)
272 +gcc-plugins:
273 + $(Q)$(MAKE) $(build)=tools/gcc
274 +else
275 +gcc-plugins: ;
276 +endif
277 +else
278 +gcc-plugins:
279 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
280 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
281 +else
282 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
283 +endif
284 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
285 +endif
286 +endif
287 +
288 include $(srctree)/arch/$(SRCARCH)/Makefile
289
290 ifneq ($(CONFIG_FRAME_WARN),0)
291 @@ -708,7 +758,7 @@ export mod_strip_cmd
292
293
294 ifeq ($(KBUILD_EXTMOD),)
295 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
296 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
297
298 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
299 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
300 @@ -932,6 +982,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
301
302 # The actual objects are generated when descending,
303 # make sure no implicit rule kicks in
304 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
305 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
306 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
307
308 # Handle descending into subdirectories listed in $(vmlinux-dirs)
309 @@ -941,7 +993,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
310 # Error messages still appears in the original language
311
312 PHONY += $(vmlinux-dirs)
313 -$(vmlinux-dirs): prepare scripts
314 +$(vmlinux-dirs): gcc-plugins prepare scripts
315 $(Q)$(MAKE) $(build)=$@
316
317 # Store (new) KERNELRELASE string in include/config/kernel.release
318 @@ -985,6 +1037,7 @@ prepare0: archprepare FORCE
319 $(Q)$(MAKE) $(build)=.
320
321 # All the preparing..
322 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
323 prepare: prepare0
324
325 # Generate some files
326 @@ -1089,6 +1142,8 @@ all: modules
327 # using awk while concatenating to the final file.
328
329 PHONY += modules
330 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
331 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
332 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
333 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
334 @$(kecho) ' Building modules, stage 2.';
335 @@ -1104,7 +1159,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
336
337 # Target to prepare building external modules
338 PHONY += modules_prepare
339 -modules_prepare: prepare scripts
340 +modules_prepare: gcc-plugins prepare scripts
341
342 # Target to install modules
343 PHONY += modules_install
344 @@ -1201,6 +1256,7 @@ distclean: mrproper
345 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
346 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
347 -o -name '.*.rej' \
348 + -o -name '.*.rej' -o -name '*.so' \
349 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
350 -type f -print | xargs rm -f
351
352 @@ -1361,6 +1417,8 @@ PHONY += $(module-dirs) modules
353 $(module-dirs): crmodverdir $(objtree)/Module.symvers
354 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
355
356 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
357 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
358 modules: $(module-dirs)
359 @$(kecho) ' Building modules, stage 2.';
360 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
361 @@ -1487,17 +1545,21 @@ else
362 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
363 endif
364
365 -%.s: %.c prepare scripts FORCE
366 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
367 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
368 +%.s: %.c gcc-plugins prepare scripts FORCE
369 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
370 %.i: %.c prepare scripts FORCE
371 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
372 -%.o: %.c prepare scripts FORCE
373 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
374 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
375 +%.o: %.c gcc-plugins prepare scripts FORCE
376 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
377 %.lst: %.c prepare scripts FORCE
378 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
379 -%.s: %.S prepare scripts FORCE
380 +%.s: %.S gcc-plugins prepare scripts FORCE
381 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
382 -%.o: %.S prepare scripts FORCE
383 +%.o: %.S gcc-plugins prepare scripts FORCE
384 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
385 %.symtypes: %.c prepare scripts FORCE
386 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
387 @@ -1507,11 +1569,15 @@ endif
388 $(cmd_crmodverdir)
389 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
390 $(build)=$(build-dir)
391 -%/: prepare scripts FORCE
392 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
393 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
394 +%/: gcc-plugins prepare scripts FORCE
395 $(cmd_crmodverdir)
396 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
397 $(build)=$(build-dir)
398 -%.ko: prepare scripts FORCE
399 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
400 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
401 +%.ko: gcc-plugins prepare scripts FORCE
402 $(cmd_crmodverdir)
403 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
404 $(build)=$(build-dir) $(@:.ko=.o)
405 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
406 index 640f909..48b6597 100644
407 --- a/arch/alpha/include/asm/atomic.h
408 +++ b/arch/alpha/include/asm/atomic.h
409 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
410 #define atomic_dec(v) atomic_sub(1,(v))
411 #define atomic64_dec(v) atomic64_sub(1,(v))
412
413 +#define atomic64_read_unchecked(v) atomic64_read(v)
414 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
415 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
416 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
417 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
418 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
419 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
420 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
421 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
422 +
423 #define smp_mb__before_atomic_dec() smp_mb()
424 #define smp_mb__after_atomic_dec() smp_mb()
425 #define smp_mb__before_atomic_inc() smp_mb()
426 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
427 index ad368a9..fbe0f25 100644
428 --- a/arch/alpha/include/asm/cache.h
429 +++ b/arch/alpha/include/asm/cache.h
430 @@ -4,19 +4,19 @@
431 #ifndef __ARCH_ALPHA_CACHE_H
432 #define __ARCH_ALPHA_CACHE_H
433
434 +#include <linux/const.h>
435
436 /* Bytes per L1 (data) cache line. */
437 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
438 -# define L1_CACHE_BYTES 64
439 # define L1_CACHE_SHIFT 6
440 #else
441 /* Both EV4 and EV5 are write-through, read-allocate,
442 direct-mapped, physical.
443 */
444 -# define L1_CACHE_BYTES 32
445 # define L1_CACHE_SHIFT 5
446 #endif
447
448 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
449 #define SMP_CACHE_BYTES L1_CACHE_BYTES
450
451 #endif
452 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
453 index da5449e..7418343 100644
454 --- a/arch/alpha/include/asm/elf.h
455 +++ b/arch/alpha/include/asm/elf.h
456 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
457
458 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
459
460 +#ifdef CONFIG_PAX_ASLR
461 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
462 +
463 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
464 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
465 +#endif
466 +
467 /* $0 is set by ld.so to a pointer to a function which might be
468 registered using atexit. This provides a mean for the dynamic
469 linker to call DT_FINI functions for shared libraries that have
470 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
471 index bc2a0da..8ad11ee 100644
472 --- a/arch/alpha/include/asm/pgalloc.h
473 +++ b/arch/alpha/include/asm/pgalloc.h
474 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
475 pgd_set(pgd, pmd);
476 }
477
478 +static inline void
479 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
480 +{
481 + pgd_populate(mm, pgd, pmd);
482 +}
483 +
484 extern pgd_t *pgd_alloc(struct mm_struct *mm);
485
486 static inline void
487 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
488 index de98a73..bd4f1f8 100644
489 --- a/arch/alpha/include/asm/pgtable.h
490 +++ b/arch/alpha/include/asm/pgtable.h
491 @@ -101,6 +101,17 @@ struct vm_area_struct;
492 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
493 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
494 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
495 +
496 +#ifdef CONFIG_PAX_PAGEEXEC
497 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
498 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
499 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
500 +#else
501 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
502 +# define PAGE_COPY_NOEXEC PAGE_COPY
503 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
504 +#endif
505 +
506 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
507
508 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
509 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
510 index 2fd00b7..cfd5069 100644
511 --- a/arch/alpha/kernel/module.c
512 +++ b/arch/alpha/kernel/module.c
513 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
514
515 /* The small sections were sorted to the end of the segment.
516 The following should definitely cover them. */
517 - gp = (u64)me->module_core + me->core_size - 0x8000;
518 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
519 got = sechdrs[me->arch.gotsecindex].sh_addr;
520
521 for (i = 0; i < n; i++) {
522 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
523 index 01e8715..be0e80f 100644
524 --- a/arch/alpha/kernel/osf_sys.c
525 +++ b/arch/alpha/kernel/osf_sys.c
526 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
527 /* At this point: (!vma || addr < vma->vm_end). */
528 if (limit - len < addr)
529 return -ENOMEM;
530 - if (!vma || addr + len <= vma->vm_start)
531 + if (check_heap_stack_gap(vma, addr, len))
532 return addr;
533 addr = vma->vm_end;
534 vma = vma->vm_next;
535 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
536 merely specific addresses, but regions of memory -- perhaps
537 this feature should be incorporated into all ports? */
538
539 +#ifdef CONFIG_PAX_RANDMMAP
540 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
541 +#endif
542 +
543 if (addr) {
544 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
545 if (addr != (unsigned long) -ENOMEM)
546 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
547 }
548
549 /* Next, try allocating at TASK_UNMAPPED_BASE. */
550 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
551 - len, limit);
552 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
553 +
554 if (addr != (unsigned long) -ENOMEM)
555 return addr;
556
557 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
558 index fadd5f8..904e73a 100644
559 --- a/arch/alpha/mm/fault.c
560 +++ b/arch/alpha/mm/fault.c
561 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
562 __reload_thread(pcb);
563 }
564
565 +#ifdef CONFIG_PAX_PAGEEXEC
566 +/*
567 + * PaX: decide what to do with offenders (regs->pc = fault address)
568 + *
569 + * returns 1 when task should be killed
570 + * 2 when patched PLT trampoline was detected
571 + * 3 when unpatched PLT trampoline was detected
572 + */
573 +static int pax_handle_fetch_fault(struct pt_regs *regs)
574 +{
575 +
576 +#ifdef CONFIG_PAX_EMUPLT
577 + int err;
578 +
579 + do { /* PaX: patched PLT emulation #1 */
580 + unsigned int ldah, ldq, jmp;
581 +
582 + err = get_user(ldah, (unsigned int *)regs->pc);
583 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
584 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
585 +
586 + if (err)
587 + break;
588 +
589 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
590 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
591 + jmp == 0x6BFB0000U)
592 + {
593 + unsigned long r27, addr;
594 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
595 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
596 +
597 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
598 + err = get_user(r27, (unsigned long *)addr);
599 + if (err)
600 + break;
601 +
602 + regs->r27 = r27;
603 + regs->pc = r27;
604 + return 2;
605 + }
606 + } while (0);
607 +
608 + do { /* PaX: patched PLT emulation #2 */
609 + unsigned int ldah, lda, br;
610 +
611 + err = get_user(ldah, (unsigned int *)regs->pc);
612 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
613 + err |= get_user(br, (unsigned int *)(regs->pc+8));
614 +
615 + if (err)
616 + break;
617 +
618 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
619 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
620 + (br & 0xFFE00000U) == 0xC3E00000U)
621 + {
622 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
623 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
624 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
625 +
626 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
627 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
628 + return 2;
629 + }
630 + } while (0);
631 +
632 + do { /* PaX: unpatched PLT emulation */
633 + unsigned int br;
634 +
635 + err = get_user(br, (unsigned int *)regs->pc);
636 +
637 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
638 + unsigned int br2, ldq, nop, jmp;
639 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
640 +
641 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
642 + err = get_user(br2, (unsigned int *)addr);
643 + err |= get_user(ldq, (unsigned int *)(addr+4));
644 + err |= get_user(nop, (unsigned int *)(addr+8));
645 + err |= get_user(jmp, (unsigned int *)(addr+12));
646 + err |= get_user(resolver, (unsigned long *)(addr+16));
647 +
648 + if (err)
649 + break;
650 +
651 + if (br2 == 0xC3600000U &&
652 + ldq == 0xA77B000CU &&
653 + nop == 0x47FF041FU &&
654 + jmp == 0x6B7B0000U)
655 + {
656 + regs->r28 = regs->pc+4;
657 + regs->r27 = addr+16;
658 + regs->pc = resolver;
659 + return 3;
660 + }
661 + }
662 + } while (0);
663 +#endif
664 +
665 + return 1;
666 +}
667 +
668 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
669 +{
670 + unsigned long i;
671 +
672 + printk(KERN_ERR "PAX: bytes at PC: ");
673 + for (i = 0; i < 5; i++) {
674 + unsigned int c;
675 + if (get_user(c, (unsigned int *)pc+i))
676 + printk(KERN_CONT "???????? ");
677 + else
678 + printk(KERN_CONT "%08x ", c);
679 + }
680 + printk("\n");
681 +}
682 +#endif
683
684 /*
685 * This routine handles page faults. It determines the address,
686 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
687 good_area:
688 si_code = SEGV_ACCERR;
689 if (cause < 0) {
690 - if (!(vma->vm_flags & VM_EXEC))
691 + if (!(vma->vm_flags & VM_EXEC)) {
692 +
693 +#ifdef CONFIG_PAX_PAGEEXEC
694 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
695 + goto bad_area;
696 +
697 + up_read(&mm->mmap_sem);
698 + switch (pax_handle_fetch_fault(regs)) {
699 +
700 +#ifdef CONFIG_PAX_EMUPLT
701 + case 2:
702 + case 3:
703 + return;
704 +#endif
705 +
706 + }
707 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
708 + do_group_exit(SIGKILL);
709 +#else
710 goto bad_area;
711 +#endif
712 +
713 + }
714 } else if (!cause) {
715 /* Allow reads even for write-only mappings */
716 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
717 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
718 index 86976d0..c63ea6b 100644
719 --- a/arch/arm/include/asm/atomic.h
720 +++ b/arch/arm/include/asm/atomic.h
721 @@ -15,6 +15,10 @@
722 #include <linux/types.h>
723 #include <asm/system.h>
724
725 +#ifdef CONFIG_GENERIC_ATOMIC64
726 +#include <asm-generic/atomic64.h>
727 +#endif
728 +
729 #define ATOMIC_INIT(i) { (i) }
730
731 #ifdef __KERNEL__
732 @@ -25,7 +29,15 @@
733 * atomic_set() is the clrex or dummy strex done on every exception return.
734 */
735 #define atomic_read(v) (*(volatile int *)&(v)->counter)
736 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
737 +{
738 + return v->counter;
739 +}
740 #define atomic_set(v,i) (((v)->counter) = (i))
741 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
742 +{
743 + v->counter = i;
744 +}
745
746 #if __LINUX_ARM_ARCH__ >= 6
747
748 @@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
749 int result;
750
751 __asm__ __volatile__("@ atomic_add\n"
752 +"1: ldrex %1, [%3]\n"
753 +" adds %0, %1, %4\n"
754 +
755 +#ifdef CONFIG_PAX_REFCOUNT
756 +" bvc 3f\n"
757 +"2: bkpt 0xf103\n"
758 +"3:\n"
759 +#endif
760 +
761 +" strex %1, %0, [%3]\n"
762 +" teq %1, #0\n"
763 +" bne 1b"
764 +
765 +#ifdef CONFIG_PAX_REFCOUNT
766 +"\n4:\n"
767 + _ASM_EXTABLE(2b, 4b)
768 +#endif
769 +
770 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
771 + : "r" (&v->counter), "Ir" (i)
772 + : "cc");
773 +}
774 +
775 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
776 +{
777 + unsigned long tmp;
778 + int result;
779 +
780 + __asm__ __volatile__("@ atomic_add_unchecked\n"
781 "1: ldrex %0, [%3]\n"
782 " add %0, %0, %4\n"
783 " strex %1, %0, [%3]\n"
784 @@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
785 smp_mb();
786
787 __asm__ __volatile__("@ atomic_add_return\n"
788 +"1: ldrex %1, [%3]\n"
789 +" adds %0, %1, %4\n"
790 +
791 +#ifdef CONFIG_PAX_REFCOUNT
792 +" bvc 3f\n"
793 +" mov %0, %1\n"
794 +"2: bkpt 0xf103\n"
795 +"3:\n"
796 +#endif
797 +
798 +" strex %1, %0, [%3]\n"
799 +" teq %1, #0\n"
800 +" bne 1b"
801 +
802 +#ifdef CONFIG_PAX_REFCOUNT
803 +"\n4:\n"
804 + _ASM_EXTABLE(2b, 4b)
805 +#endif
806 +
807 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
808 + : "r" (&v->counter), "Ir" (i)
809 + : "cc");
810 +
811 + smp_mb();
812 +
813 + return result;
814 +}
815 +
816 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
817 +{
818 + unsigned long tmp;
819 + int result;
820 +
821 + smp_mb();
822 +
823 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
824 "1: ldrex %0, [%3]\n"
825 " add %0, %0, %4\n"
826 " strex %1, %0, [%3]\n"
827 @@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
828 int result;
829
830 __asm__ __volatile__("@ atomic_sub\n"
831 +"1: ldrex %1, [%3]\n"
832 +" subs %0, %1, %4\n"
833 +
834 +#ifdef CONFIG_PAX_REFCOUNT
835 +" bvc 3f\n"
836 +"2: bkpt 0xf103\n"
837 +"3:\n"
838 +#endif
839 +
840 +" strex %1, %0, [%3]\n"
841 +" teq %1, #0\n"
842 +" bne 1b"
843 +
844 +#ifdef CONFIG_PAX_REFCOUNT
845 +"\n4:\n"
846 + _ASM_EXTABLE(2b, 4b)
847 +#endif
848 +
849 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
850 + : "r" (&v->counter), "Ir" (i)
851 + : "cc");
852 +}
853 +
854 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
855 +{
856 + unsigned long tmp;
857 + int result;
858 +
859 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
860 "1: ldrex %0, [%3]\n"
861 " sub %0, %0, %4\n"
862 " strex %1, %0, [%3]\n"
863 @@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
864 smp_mb();
865
866 __asm__ __volatile__("@ atomic_sub_return\n"
867 -"1: ldrex %0, [%3]\n"
868 -" sub %0, %0, %4\n"
869 +"1: ldrex %1, [%3]\n"
870 +" sub %0, %1, %4\n"
871 +
872 +#ifdef CONFIG_PAX_REFCOUNT
873 +" bvc 3f\n"
874 +" mov %0, %1\n"
875 +"2: bkpt 0xf103\n"
876 +"3:\n"
877 +#endif
878 +
879 " strex %1, %0, [%3]\n"
880 " teq %1, #0\n"
881 " bne 1b"
882 +
883 +#ifdef CONFIG_PAX_REFCOUNT
884 +"\n4:\n"
885 + _ASM_EXTABLE(2b, 4b)
886 +#endif
887 +
888 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
889 : "r" (&v->counter), "Ir" (i)
890 : "cc");
891 @@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
892 return oldval;
893 }
894
895 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
896 +{
897 + unsigned long oldval, res;
898 +
899 + smp_mb();
900 +
901 + do {
902 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
903 + "ldrex %1, [%3]\n"
904 + "mov %0, #0\n"
905 + "teq %1, %4\n"
906 + "strexeq %0, %5, [%3]\n"
907 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
908 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
909 + : "cc");
910 + } while (res);
911 +
912 + smp_mb();
913 +
914 + return oldval;
915 +}
916 +
917 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
918 {
919 unsigned long tmp, tmp2;
920 @@ -165,7 +307,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
921
922 return val;
923 }
924 +#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
925 #define atomic_add(i, v) (void) atomic_add_return(i, v)
926 +#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
927
928 static inline int atomic_sub_return(int i, atomic_t *v)
929 {
930 @@ -179,7 +323,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
931
932 return val;
933 }
934 +#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
935 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
936 +#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
937
938 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
939 {
940 @@ -194,6 +340,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
941
942 return ret;
943 }
944 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
945
946 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
947 {
948 @@ -207,6 +354,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
949 #endif /* __LINUX_ARM_ARCH__ */
950
951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
953 +{
954 + return xchg(&v->counter, new);
955 +}
956
957 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
958 {
959 @@ -219,11 +370,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
960 }
961
962 #define atomic_inc(v) atomic_add(1, v)
963 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
964 +{
965 + atomic_add_unchecked(1, v);
966 +}
967 #define atomic_dec(v) atomic_sub(1, v)
968 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
969 +{
970 + atomic_sub_unchecked(1, v);
971 +}
972
973 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
974 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
975 +{
976 + return atomic_add_return_unchecked(1, v) == 0;
977 +}
978 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
979 #define atomic_inc_return(v) (atomic_add_return(1, v))
980 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
981 +{
982 + return atomic_add_return_unchecked(1, v);
983 +}
984 #define atomic_dec_return(v) (atomic_sub_return(1, v))
985 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
986
987 @@ -239,6 +406,14 @@ typedef struct {
988 u64 __aligned(8) counter;
989 } atomic64_t;
990
991 +#ifdef CONFIG_PAX_REFCOUNT
992 +typedef struct {
993 + u64 __aligned(8) counter;
994 +} atomic64_unchecked_t;
995 +#else
996 +typedef atomic64_t atomic64_unchecked_t;
997 +#endif
998 +
999 #define ATOMIC64_INIT(i) { (i) }
1000
1001 static inline u64 atomic64_read(atomic64_t *v)
1002 @@ -254,6 +429,19 @@ static inline u64 atomic64_read(atomic64_t *v)
1003 return result;
1004 }
1005
1006 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1007 +{
1008 + u64 result;
1009 +
1010 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1011 +" ldrexd %0, %H0, [%1]"
1012 + : "=&r" (result)
1013 + : "r" (&v->counter), "Qo" (v->counter)
1014 + );
1015 +
1016 + return result;
1017 +}
1018 +
1019 static inline void atomic64_set(atomic64_t *v, u64 i)
1020 {
1021 u64 tmp;
1022 @@ -268,6 +456,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1023 : "cc");
1024 }
1025
1026 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1027 +{
1028 + u64 tmp;
1029 +
1030 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1031 +"1: ldrexd %0, %H0, [%2]\n"
1032 +" strexd %0, %3, %H3, [%2]\n"
1033 +" teq %0, #0\n"
1034 +" bne 1b"
1035 + : "=&r" (tmp), "=Qo" (v->counter)
1036 + : "r" (&v->counter), "r" (i)
1037 + : "cc");
1038 +}
1039 +
1040 static inline void atomic64_add(u64 i, atomic64_t *v)
1041 {
1042 u64 result;
1043 @@ -276,6 +478,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1044 __asm__ __volatile__("@ atomic64_add\n"
1045 "1: ldrexd %0, %H0, [%3]\n"
1046 " adds %0, %0, %4\n"
1047 +" adcs %H0, %H0, %H4\n"
1048 +
1049 +#ifdef CONFIG_PAX_REFCOUNT
1050 +" bvc 3f\n"
1051 +"2: bkpt 0xf103\n"
1052 +"3:\n"
1053 +#endif
1054 +
1055 +" strexd %1, %0, %H0, [%3]\n"
1056 +" teq %1, #0\n"
1057 +" bne 1b"
1058 +
1059 +#ifdef CONFIG_PAX_REFCOUNT
1060 +"\n4:\n"
1061 + _ASM_EXTABLE(2b, 4b)
1062 +#endif
1063 +
1064 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1065 + : "r" (&v->counter), "r" (i)
1066 + : "cc");
1067 +}
1068 +
1069 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1070 +{
1071 + u64 result;
1072 + unsigned long tmp;
1073 +
1074 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1075 +"1: ldrexd %0, %H0, [%3]\n"
1076 +" adds %0, %0, %4\n"
1077 " adc %H0, %H0, %H4\n"
1078 " strexd %1, %0, %H0, [%3]\n"
1079 " teq %1, #0\n"
1080 @@ -287,12 +519,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1081
1082 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1083 {
1084 - u64 result;
1085 - unsigned long tmp;
1086 + u64 result, tmp;
1087
1088 smp_mb();
1089
1090 __asm__ __volatile__("@ atomic64_add_return\n"
1091 +"1: ldrexd %1, %H1, [%3]\n"
1092 +" adds %0, %1, %4\n"
1093 +" adcs %H0, %H1, %H4\n"
1094 +
1095 +#ifdef CONFIG_PAX_REFCOUNT
1096 +" bvc 3f\n"
1097 +" mov %0, %1\n"
1098 +" mov %H0, %H1\n"
1099 +"2: bkpt 0xf103\n"
1100 +"3:\n"
1101 +#endif
1102 +
1103 +" strexd %1, %0, %H0, [%3]\n"
1104 +" teq %1, #0\n"
1105 +" bne 1b"
1106 +
1107 +#ifdef CONFIG_PAX_REFCOUNT
1108 +"\n4:\n"
1109 + _ASM_EXTABLE(2b, 4b)
1110 +#endif
1111 +
1112 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1113 + : "r" (&v->counter), "r" (i)
1114 + : "cc");
1115 +
1116 + smp_mb();
1117 +
1118 + return result;
1119 +}
1120 +
1121 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1122 +{
1123 + u64 result;
1124 + unsigned long tmp;
1125 +
1126 + smp_mb();
1127 +
1128 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1129 "1: ldrexd %0, %H0, [%3]\n"
1130 " adds %0, %0, %4\n"
1131 " adc %H0, %H0, %H4\n"
1132 @@ -316,6 +585,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1133 __asm__ __volatile__("@ atomic64_sub\n"
1134 "1: ldrexd %0, %H0, [%3]\n"
1135 " subs %0, %0, %4\n"
1136 +" sbcs %H0, %H0, %H4\n"
1137 +
1138 +#ifdef CONFIG_PAX_REFCOUNT
1139 +" bvc 3f\n"
1140 +"2: bkpt 0xf103\n"
1141 +"3:\n"
1142 +#endif
1143 +
1144 +" strexd %1, %0, %H0, [%3]\n"
1145 +" teq %1, #0\n"
1146 +" bne 1b"
1147 +
1148 +#ifdef CONFIG_PAX_REFCOUNT
1149 +"\n4:\n"
1150 + _ASM_EXTABLE(2b, 4b)
1151 +#endif
1152 +
1153 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1154 + : "r" (&v->counter), "r" (i)
1155 + : "cc");
1156 +}
1157 +
1158 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1159 +{
1160 + u64 result;
1161 + unsigned long tmp;
1162 +
1163 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1164 +"1: ldrexd %0, %H0, [%3]\n"
1165 +" subs %0, %0, %4\n"
1166 " sbc %H0, %H0, %H4\n"
1167 " strexd %1, %0, %H0, [%3]\n"
1168 " teq %1, #0\n"
1169 @@ -327,18 +626,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1170
1171 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1172 {
1173 - u64 result;
1174 - unsigned long tmp;
1175 + u64 result, tmp;
1176
1177 smp_mb();
1178
1179 __asm__ __volatile__("@ atomic64_sub_return\n"
1180 -"1: ldrexd %0, %H0, [%3]\n"
1181 -" subs %0, %0, %4\n"
1182 -" sbc %H0, %H0, %H4\n"
1183 +"1: ldrexd %1, %H1, [%3]\n"
1184 +" subs %0, %1, %4\n"
1185 +" sbc %H0, %H1, %H4\n"
1186 +
1187 +#ifdef CONFIG_PAX_REFCOUNT
1188 +" bvc 3f\n"
1189 +" mov %0, %1\n"
1190 +" mov %H0, %H1\n"
1191 +"2: bkpt 0xf103\n"
1192 +"3:\n"
1193 +#endif
1194 +
1195 " strexd %1, %0, %H0, [%3]\n"
1196 " teq %1, #0\n"
1197 " bne 1b"
1198 +
1199 +#ifdef CONFIG_PAX_REFCOUNT
1200 +"\n4:\n"
1201 + _ASM_EXTABLE(2b, 4b)
1202 +#endif
1203 +
1204 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1205 : "r" (&v->counter), "r" (i)
1206 : "cc");
1207 @@ -372,6 +685,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1208 return oldval;
1209 }
1210
1211 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1212 +{
1213 + u64 oldval;
1214 + unsigned long res;
1215 +
1216 + smp_mb();
1217 +
1218 + do {
1219 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1220 + "ldrexd %1, %H1, [%3]\n"
1221 + "mov %0, #0\n"
1222 + "teq %1, %4\n"
1223 + "teqeq %H1, %H4\n"
1224 + "strexdeq %0, %5, %H5, [%3]"
1225 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1226 + : "r" (&ptr->counter), "r" (old), "r" (new)
1227 + : "cc");
1228 + } while (res);
1229 +
1230 + smp_mb();
1231 +
1232 + return oldval;
1233 +}
1234 +
1235 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1236 {
1237 u64 result;
1238 @@ -395,21 +732,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1239
1240 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1241 {
1242 - u64 result;
1243 - unsigned long tmp;
1244 + u64 result, tmp;
1245
1246 smp_mb();
1247
1248 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1249 -"1: ldrexd %0, %H0, [%3]\n"
1250 -" subs %0, %0, #1\n"
1251 -" sbc %H0, %H0, #0\n"
1252 +"1: ldrexd %1, %H1, [%3]\n"
1253 +" subs %0, %1, #1\n"
1254 +" sbc %H0, %H1, #0\n"
1255 +
1256 +#ifdef CONFIG_PAX_REFCOUNT
1257 +" bvc 3f\n"
1258 +" mov %0, %1\n"
1259 +" mov %H0, %H1\n"
1260 +"2: bkpt 0xf103\n"
1261 +"3:\n"
1262 +#endif
1263 +
1264 " teq %H0, #0\n"
1265 -" bmi 2f\n"
1266 +" bmi 4f\n"
1267 " strexd %1, %0, %H0, [%3]\n"
1268 " teq %1, #0\n"
1269 " bne 1b\n"
1270 -"2:"
1271 +"4:\n"
1272 +
1273 +#ifdef CONFIG_PAX_REFCOUNT
1274 + _ASM_EXTABLE(2b, 4b)
1275 +#endif
1276 +
1277 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1278 : "r" (&v->counter)
1279 : "cc");
1280 @@ -432,13 +782,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1281 " teq %0, %5\n"
1282 " teqeq %H0, %H5\n"
1283 " moveq %1, #0\n"
1284 -" beq 2f\n"
1285 +" beq 4f\n"
1286 " adds %0, %0, %6\n"
1287 " adc %H0, %H0, %H6\n"
1288 +
1289 +#ifdef CONFIG_PAX_REFCOUNT
1290 +" bvc 3f\n"
1291 +"2: bkpt 0xf103\n"
1292 +"3:\n"
1293 +#endif
1294 +
1295 " strexd %2, %0, %H0, [%4]\n"
1296 " teq %2, #0\n"
1297 " bne 1b\n"
1298 -"2:"
1299 +"4:\n"
1300 +
1301 +#ifdef CONFIG_PAX_REFCOUNT
1302 + _ASM_EXTABLE(2b, 4b)
1303 +#endif
1304 +
1305 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1306 : "r" (&v->counter), "r" (u), "r" (a)
1307 : "cc");
1308 @@ -451,10 +813,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1309
1310 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1311 #define atomic64_inc(v) atomic64_add(1LL, (v))
1312 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1313 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1314 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1315 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1316 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1317 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1318 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1319 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1320 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1321 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1322 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1323 index 75fe66b..2255c86 100644
1324 --- a/arch/arm/include/asm/cache.h
1325 +++ b/arch/arm/include/asm/cache.h
1326 @@ -4,8 +4,10 @@
1327 #ifndef __ASMARM_CACHE_H
1328 #define __ASMARM_CACHE_H
1329
1330 +#include <linux/const.h>
1331 +
1332 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1333 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1334 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1335
1336 /*
1337 * Memory returned by kmalloc() may be used for DMA, so we must make
1338 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1339 index d5d8d5c..ad92c96 100644
1340 --- a/arch/arm/include/asm/cacheflush.h
1341 +++ b/arch/arm/include/asm/cacheflush.h
1342 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1343 void (*dma_unmap_area)(const void *, size_t, int);
1344
1345 void (*dma_flush_range)(const void *, const void *);
1346 -};
1347 +} __no_const;
1348
1349 /*
1350 * Select the calling method
1351 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1352 index 0e9ce8d..6ef1e03 100644
1353 --- a/arch/arm/include/asm/elf.h
1354 +++ b/arch/arm/include/asm/elf.h
1355 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1356 the loader. We need to make sure that it is out of the way of the program
1357 that it will "exec", and that there is sufficient room for the brk. */
1358
1359 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1360 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1361 +
1362 +#ifdef CONFIG_PAX_ASLR
1363 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1364 +
1365 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1366 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1367 +#endif
1368
1369 /* When the program starts, a1 contains a pointer to a function to be
1370 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1371 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1372 extern void elf_set_personality(const struct elf32_hdr *);
1373 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1374
1375 -struct mm_struct;
1376 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1377 -#define arch_randomize_brk arch_randomize_brk
1378 -
1379 extern int vectors_user_mapping(void);
1380 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1381 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1382 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1383 index e51b1e8..32a3113 100644
1384 --- a/arch/arm/include/asm/kmap_types.h
1385 +++ b/arch/arm/include/asm/kmap_types.h
1386 @@ -21,6 +21,7 @@ enum km_type {
1387 KM_L1_CACHE,
1388 KM_L2_CACHE,
1389 KM_KDB,
1390 + KM_CLEARPAGE,
1391 KM_TYPE_NR
1392 };
1393
1394 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1395 index 53426c6..c7baff3 100644
1396 --- a/arch/arm/include/asm/outercache.h
1397 +++ b/arch/arm/include/asm/outercache.h
1398 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1399 #endif
1400 void (*set_debug)(unsigned long);
1401 void (*resume)(void);
1402 -};
1403 +} __no_const;
1404
1405 #ifdef CONFIG_OUTER_CACHE
1406
1407 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1408 index 97b440c..b7ff179 100644
1409 --- a/arch/arm/include/asm/page.h
1410 +++ b/arch/arm/include/asm/page.h
1411 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1412 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1413 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1414 unsigned long vaddr, struct vm_area_struct *vma);
1415 -};
1416 +} __no_const;
1417
1418 #ifdef MULTI_USER
1419 extern struct cpu_user_fns cpu_user;
1420 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1421 index 943504f..bf8d667 100644
1422 --- a/arch/arm/include/asm/pgalloc.h
1423 +++ b/arch/arm/include/asm/pgalloc.h
1424 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1425 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1426 }
1427
1428 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1429 +{
1430 + pud_populate(mm, pud, pmd);
1431 +}
1432 +
1433 #else /* !CONFIG_ARM_LPAE */
1434
1435 /*
1436 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1437 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1438 #define pmd_free(mm, pmd) do { } while (0)
1439 #define pud_populate(mm,pmd,pte) BUG()
1440 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1441
1442 #endif /* CONFIG_ARM_LPAE */
1443
1444 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1445 index e4c96cc..1145653 100644
1446 --- a/arch/arm/include/asm/system.h
1447 +++ b/arch/arm/include/asm/system.h
1448 @@ -98,6 +98,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1449
1450 #define xchg(ptr,x) \
1451 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1452 +#define xchg_unchecked(ptr,x) \
1453 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1454
1455 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1456
1457 @@ -534,6 +536,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1458
1459 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1460
1461 +#define _ASM_EXTABLE(from, to) \
1462 +" .pushsection __ex_table,\"a\"\n"\
1463 +" .align 3\n" \
1464 +" .long " #from ", " #to"\n" \
1465 +" .popsection"
1466 +
1467 +
1468 #endif /* __ASSEMBLY__ */
1469
1470 #define arch_align_stack(x) (x)
1471 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1472 index d4c24d4..4ac53e8 100644
1473 --- a/arch/arm/include/asm/thread_info.h
1474 +++ b/arch/arm/include/asm/thread_info.h
1475 @@ -141,6 +141,12 @@ extern void vfp_flush_hwstate(struct thread_info *);
1476 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1477 #define TIF_SYSCALL_TRACE 8
1478 #define TIF_SYSCALL_AUDIT 9
1479 +
1480 +/* within 8 bits of TIF_SYSCALL_TRACE
1481 + to meet flexible second operand requirements
1482 +*/
1483 +#define TIF_GRSEC_SETXID 10
1484 +
1485 #define TIF_POLLING_NRFLAG 16
1486 #define TIF_USING_IWMMXT 17
1487 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1488 @@ -156,9 +162,11 @@ extern void vfp_flush_hwstate(struct thread_info *);
1489 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1490 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1491 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1492 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1493
1494 /* Checks for any syscall work in entry-common.S */
1495 -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1496 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1497 + _TIF_GRSEC_SETXID)
1498
1499 /*
1500 * Change these and you break ASM code in entry-common.S
1501 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1502 index 2958976..12ccac4 100644
1503 --- a/arch/arm/include/asm/uaccess.h
1504 +++ b/arch/arm/include/asm/uaccess.h
1505 @@ -22,6 +22,8 @@
1506 #define VERIFY_READ 0
1507 #define VERIFY_WRITE 1
1508
1509 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1510 +
1511 /*
1512 * The exception table consists of pairs of addresses: the first is the
1513 * address of an instruction that is allowed to fault, and the second is
1514 @@ -387,8 +389,23 @@ do { \
1515
1516
1517 #ifdef CONFIG_MMU
1518 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1519 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1520 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1521 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1522 +
1523 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1524 +{
1525 + if (!__builtin_constant_p(n))
1526 + check_object_size(to, n, false);
1527 + return ___copy_from_user(to, from, n);
1528 +}
1529 +
1530 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1531 +{
1532 + if (!__builtin_constant_p(n))
1533 + check_object_size(from, n, true);
1534 + return ___copy_to_user(to, from, n);
1535 +}
1536 +
1537 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1538 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1539 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1540 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1541
1542 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1543 {
1544 + if ((long)n < 0)
1545 + return n;
1546 +
1547 if (access_ok(VERIFY_READ, from, n))
1548 n = __copy_from_user(to, from, n);
1549 else /* security hole - plug it */
1550 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1551
1552 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1553 {
1554 + if ((long)n < 0)
1555 + return n;
1556 +
1557 if (access_ok(VERIFY_WRITE, to, n))
1558 n = __copy_to_user(to, from, n);
1559 return n;
1560 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1561 index 5b0bce6..becd81c 100644
1562 --- a/arch/arm/kernel/armksyms.c
1563 +++ b/arch/arm/kernel/armksyms.c
1564 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1565 #ifdef CONFIG_MMU
1566 EXPORT_SYMBOL(copy_page);
1567
1568 -EXPORT_SYMBOL(__copy_from_user);
1569 -EXPORT_SYMBOL(__copy_to_user);
1570 +EXPORT_SYMBOL(___copy_from_user);
1571 +EXPORT_SYMBOL(___copy_to_user);
1572 EXPORT_SYMBOL(__clear_user);
1573
1574 EXPORT_SYMBOL(__get_user_1);
1575 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1576 index 971d65c..cc936fb 100644
1577 --- a/arch/arm/kernel/process.c
1578 +++ b/arch/arm/kernel/process.c
1579 @@ -28,7 +28,6 @@
1580 #include <linux/tick.h>
1581 #include <linux/utsname.h>
1582 #include <linux/uaccess.h>
1583 -#include <linux/random.h>
1584 #include <linux/hw_breakpoint.h>
1585 #include <linux/cpuidle.h>
1586
1587 @@ -273,9 +272,10 @@ void machine_power_off(void)
1588 machine_shutdown();
1589 if (pm_power_off)
1590 pm_power_off();
1591 + BUG();
1592 }
1593
1594 -void machine_restart(char *cmd)
1595 +__noreturn void machine_restart(char *cmd)
1596 {
1597 machine_shutdown();
1598
1599 @@ -517,12 +517,6 @@ unsigned long get_wchan(struct task_struct *p)
1600 return 0;
1601 }
1602
1603 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1604 -{
1605 - unsigned long range_end = mm->brk + 0x02000000;
1606 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1607 -}
1608 -
1609 #ifdef CONFIG_MMU
1610 /*
1611 * The vectors page is always readable from user space for the
1612 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1613 index f5ce8ab..4b73893 100644
1614 --- a/arch/arm/kernel/ptrace.c
1615 +++ b/arch/arm/kernel/ptrace.c
1616 @@ -905,10 +905,19 @@ long arch_ptrace(struct task_struct *child, long request,
1617 return ret;
1618 }
1619
1620 +#ifdef CONFIG_GRKERNSEC_SETXID
1621 +extern void gr_delayed_cred_worker(void);
1622 +#endif
1623 +
1624 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1625 {
1626 unsigned long ip;
1627
1628 +#ifdef CONFIG_GRKERNSEC_SETXID
1629 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1630 + gr_delayed_cred_worker();
1631 +#endif
1632 +
1633 if (why)
1634 audit_syscall_exit(regs);
1635 else
1636 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1637 index a255c39..4a19b25 100644
1638 --- a/arch/arm/kernel/setup.c
1639 +++ b/arch/arm/kernel/setup.c
1640 @@ -109,13 +109,13 @@ struct processor processor __read_mostly;
1641 struct cpu_tlb_fns cpu_tlb __read_mostly;
1642 #endif
1643 #ifdef MULTI_USER
1644 -struct cpu_user_fns cpu_user __read_mostly;
1645 +struct cpu_user_fns cpu_user __read_only;
1646 #endif
1647 #ifdef MULTI_CACHE
1648 -struct cpu_cache_fns cpu_cache __read_mostly;
1649 +struct cpu_cache_fns cpu_cache __read_only;
1650 #endif
1651 #ifdef CONFIG_OUTER_CACHE
1652 -struct outer_cache_fns outer_cache __read_mostly;
1653 +struct outer_cache_fns outer_cache __read_only;
1654 EXPORT_SYMBOL(outer_cache);
1655 #endif
1656
1657 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1658 index f84dfe6..13e94f7 100644
1659 --- a/arch/arm/kernel/traps.c
1660 +++ b/arch/arm/kernel/traps.c
1661 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1662
1663 static DEFINE_RAW_SPINLOCK(die_lock);
1664
1665 +extern void gr_handle_kernel_exploit(void);
1666 +
1667 /*
1668 * This function is protected against re-entrancy.
1669 */
1670 @@ -291,6 +293,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1671 panic("Fatal exception in interrupt");
1672 if (panic_on_oops)
1673 panic("Fatal exception");
1674 +
1675 + gr_handle_kernel_exploit();
1676 +
1677 if (ret != NOTIFY_STOP)
1678 do_exit(SIGSEGV);
1679 }
1680 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1681 index 66a477a..bee61d3 100644
1682 --- a/arch/arm/lib/copy_from_user.S
1683 +++ b/arch/arm/lib/copy_from_user.S
1684 @@ -16,7 +16,7 @@
1685 /*
1686 * Prototype:
1687 *
1688 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1689 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1690 *
1691 * Purpose:
1692 *
1693 @@ -84,11 +84,11 @@
1694
1695 .text
1696
1697 -ENTRY(__copy_from_user)
1698 +ENTRY(___copy_from_user)
1699
1700 #include "copy_template.S"
1701
1702 -ENDPROC(__copy_from_user)
1703 +ENDPROC(___copy_from_user)
1704
1705 .pushsection .fixup,"ax"
1706 .align 0
1707 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1708 index 6ee2f67..d1cce76 100644
1709 --- a/arch/arm/lib/copy_page.S
1710 +++ b/arch/arm/lib/copy_page.S
1711 @@ -10,6 +10,7 @@
1712 * ASM optimised string functions
1713 */
1714 #include <linux/linkage.h>
1715 +#include <linux/const.h>
1716 #include <asm/assembler.h>
1717 #include <asm/asm-offsets.h>
1718 #include <asm/cache.h>
1719 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1720 index d066df6..df28194 100644
1721 --- a/arch/arm/lib/copy_to_user.S
1722 +++ b/arch/arm/lib/copy_to_user.S
1723 @@ -16,7 +16,7 @@
1724 /*
1725 * Prototype:
1726 *
1727 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1728 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1729 *
1730 * Purpose:
1731 *
1732 @@ -88,11 +88,11 @@
1733 .text
1734
1735 ENTRY(__copy_to_user_std)
1736 -WEAK(__copy_to_user)
1737 +WEAK(___copy_to_user)
1738
1739 #include "copy_template.S"
1740
1741 -ENDPROC(__copy_to_user)
1742 +ENDPROC(___copy_to_user)
1743 ENDPROC(__copy_to_user_std)
1744
1745 .pushsection .fixup,"ax"
1746 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1747 index 5c908b1..e712687 100644
1748 --- a/arch/arm/lib/uaccess.S
1749 +++ b/arch/arm/lib/uaccess.S
1750 @@ -20,7 +20,7 @@
1751
1752 #define PAGE_SHIFT 12
1753
1754 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1755 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1756 * Purpose : copy a block to user memory from kernel memory
1757 * Params : to - user memory
1758 * : from - kernel memory
1759 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1760 sub r2, r2, ip
1761 b .Lc2u_dest_aligned
1762
1763 -ENTRY(__copy_to_user)
1764 +ENTRY(___copy_to_user)
1765 stmfd sp!, {r2, r4 - r7, lr}
1766 cmp r2, #4
1767 blt .Lc2u_not_enough
1768 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1769 ldrgtb r3, [r1], #0
1770 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1771 b .Lc2u_finished
1772 -ENDPROC(__copy_to_user)
1773 +ENDPROC(___copy_to_user)
1774
1775 .pushsection .fixup,"ax"
1776 .align 0
1777 9001: ldmfd sp!, {r0, r4 - r7, pc}
1778 .popsection
1779
1780 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1781 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1782 * Purpose : copy a block from user memory to kernel memory
1783 * Params : to - kernel memory
1784 * : from - user memory
1785 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1786 sub r2, r2, ip
1787 b .Lcfu_dest_aligned
1788
1789 -ENTRY(__copy_from_user)
1790 +ENTRY(___copy_from_user)
1791 stmfd sp!, {r0, r2, r4 - r7, lr}
1792 cmp r2, #4
1793 blt .Lcfu_not_enough
1794 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1795 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1796 strgtb r3, [r0], #1
1797 b .Lcfu_finished
1798 -ENDPROC(__copy_from_user)
1799 +ENDPROC(___copy_from_user)
1800
1801 .pushsection .fixup,"ax"
1802 .align 0
1803 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1804 index 025f742..8432b08 100644
1805 --- a/arch/arm/lib/uaccess_with_memcpy.c
1806 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1807 @@ -104,7 +104,7 @@ out:
1808 }
1809
1810 unsigned long
1811 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1812 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1813 {
1814 /*
1815 * This test is stubbed out of the main function above to keep
1816 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1817 index 6722627..8f97548c 100644
1818 --- a/arch/arm/mach-omap2/board-n8x0.c
1819 +++ b/arch/arm/mach-omap2/board-n8x0.c
1820 @@ -597,7 +597,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1821 }
1822 #endif
1823
1824 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1825 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1826 .late_init = n8x0_menelaus_late_init,
1827 };
1828
1829 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1830 index 2b2d51c..0127490 100644
1831 --- a/arch/arm/mach-ux500/mbox-db5500.c
1832 +++ b/arch/arm/mach-ux500/mbox-db5500.c
1833 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1834 return sprintf(buf, "0x%X\n", mbox_value);
1835 }
1836
1837 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1838 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1839
1840 static int mbox_show(struct seq_file *s, void *data)
1841 {
1842 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1843 index 90e366a..1b92505 100644
1844 --- a/arch/arm/mm/fault.c
1845 +++ b/arch/arm/mm/fault.c
1846 @@ -172,6 +172,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1847 }
1848 #endif
1849
1850 +#ifdef CONFIG_PAX_PAGEEXEC
1851 + if (fsr & FSR_LNX_PF) {
1852 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1853 + do_group_exit(SIGKILL);
1854 + }
1855 +#endif
1856 +
1857 tsk->thread.address = addr;
1858 tsk->thread.error_code = fsr;
1859 tsk->thread.trap_no = 14;
1860 @@ -395,6 +402,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1861 }
1862 #endif /* CONFIG_MMU */
1863
1864 +#ifdef CONFIG_PAX_PAGEEXEC
1865 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1866 +{
1867 + long i;
1868 +
1869 + printk(KERN_ERR "PAX: bytes at PC: ");
1870 + for (i = 0; i < 20; i++) {
1871 + unsigned char c;
1872 + if (get_user(c, (__force unsigned char __user *)pc+i))
1873 + printk(KERN_CONT "?? ");
1874 + else
1875 + printk(KERN_CONT "%02x ", c);
1876 + }
1877 + printk("\n");
1878 +
1879 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1880 + for (i = -1; i < 20; i++) {
1881 + unsigned long c;
1882 + if (get_user(c, (__force unsigned long __user *)sp+i))
1883 + printk(KERN_CONT "???????? ");
1884 + else
1885 + printk(KERN_CONT "%08lx ", c);
1886 + }
1887 + printk("\n");
1888 +}
1889 +#endif
1890 +
1891 /*
1892 * First Level Translation Fault Handler
1893 *
1894 @@ -575,6 +609,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1895 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1896 struct siginfo info;
1897
1898 +#ifdef CONFIG_PAX_REFCOUNT
1899 + if (fsr_fs(ifsr) == 2) {
1900 + unsigned int bkpt;
1901 +
1902 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1903 + current->thread.error_code = ifsr;
1904 + current->thread.trap_no = 0;
1905 + pax_report_refcount_overflow(regs);
1906 + fixup_exception(regs);
1907 + return;
1908 + }
1909 + }
1910 +#endif
1911 +
1912 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1913 return;
1914
1915 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1916 index ce8cb19..3ec539d 100644
1917 --- a/arch/arm/mm/mmap.c
1918 +++ b/arch/arm/mm/mmap.c
1919 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1920 if (len > TASK_SIZE)
1921 return -ENOMEM;
1922
1923 +#ifdef CONFIG_PAX_RANDMMAP
1924 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1925 +#endif
1926 +
1927 if (addr) {
1928 if (do_align)
1929 addr = COLOUR_ALIGN(addr, pgoff);
1930 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1931 addr = PAGE_ALIGN(addr);
1932
1933 vma = find_vma(mm, addr);
1934 - if (TASK_SIZE - len >= addr &&
1935 - (!vma || addr + len <= vma->vm_start))
1936 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1937 return addr;
1938 }
1939 if (len > mm->cached_hole_size) {
1940 - start_addr = addr = mm->free_area_cache;
1941 + start_addr = addr = mm->free_area_cache;
1942 } else {
1943 - start_addr = addr = mm->mmap_base;
1944 - mm->cached_hole_size = 0;
1945 + start_addr = addr = mm->mmap_base;
1946 + mm->cached_hole_size = 0;
1947 }
1948
1949 full_search:
1950 @@ -124,14 +127,14 @@ full_search:
1951 * Start a new search - just in case we missed
1952 * some holes.
1953 */
1954 - if (start_addr != TASK_UNMAPPED_BASE) {
1955 - start_addr = addr = TASK_UNMAPPED_BASE;
1956 + if (start_addr != mm->mmap_base) {
1957 + start_addr = addr = mm->mmap_base;
1958 mm->cached_hole_size = 0;
1959 goto full_search;
1960 }
1961 return -ENOMEM;
1962 }
1963 - if (!vma || addr + len <= vma->vm_start) {
1964 + if (check_heap_stack_gap(vma, addr, len)) {
1965 /*
1966 * Remember the place where we stopped the search:
1967 */
1968 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1969
1970 if (mmap_is_legacy()) {
1971 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1972 +
1973 +#ifdef CONFIG_PAX_RANDMMAP
1974 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1975 + mm->mmap_base += mm->delta_mmap;
1976 +#endif
1977 +
1978 mm->get_unmapped_area = arch_get_unmapped_area;
1979 mm->unmap_area = arch_unmap_area;
1980 } else {
1981 mm->mmap_base = mmap_base(random_factor);
1982 +
1983 +#ifdef CONFIG_PAX_RANDMMAP
1984 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1985 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1986 +#endif
1987 +
1988 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1989 mm->unmap_area = arch_unmap_area_topdown;
1990 }
1991 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1992 index 71a6827..e7fbc23 100644
1993 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1994 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1995 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
1996 int (*started)(unsigned ch);
1997 int (*flush)(unsigned ch);
1998 int (*stop)(unsigned ch);
1999 -};
2000 +} __no_const;
2001
2002 extern void *samsung_dmadev_get_ops(void);
2003 extern void *s3c_dma_get_ops(void);
2004 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2005 index 5f28cae..3d23723 100644
2006 --- a/arch/arm/plat-samsung/include/plat/ehci.h
2007 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
2008 @@ -14,7 +14,7 @@
2009 struct s5p_ehci_platdata {
2010 int (*phy_init)(struct platform_device *pdev, int type);
2011 int (*phy_exit)(struct platform_device *pdev, int type);
2012 -};
2013 +} __no_const;
2014
2015 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2016
2017 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2018 index c3a58a1..78fbf54 100644
2019 --- a/arch/avr32/include/asm/cache.h
2020 +++ b/arch/avr32/include/asm/cache.h
2021 @@ -1,8 +1,10 @@
2022 #ifndef __ASM_AVR32_CACHE_H
2023 #define __ASM_AVR32_CACHE_H
2024
2025 +#include <linux/const.h>
2026 +
2027 #define L1_CACHE_SHIFT 5
2028 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2029 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2030
2031 /*
2032 * Memory returned by kmalloc() may be used for DMA, so we must make
2033 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2034 index 3b3159b..425ea94 100644
2035 --- a/arch/avr32/include/asm/elf.h
2036 +++ b/arch/avr32/include/asm/elf.h
2037 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2038 the loader. We need to make sure that it is out of the way of the program
2039 that it will "exec", and that there is sufficient room for the brk. */
2040
2041 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2042 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2043
2044 +#ifdef CONFIG_PAX_ASLR
2045 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2046 +
2047 +#define PAX_DELTA_MMAP_LEN 15
2048 +#define PAX_DELTA_STACK_LEN 15
2049 +#endif
2050
2051 /* This yields a mask that user programs can use to figure out what
2052 instruction set this CPU supports. This could be done in user space,
2053 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2054 index b7f5c68..556135c 100644
2055 --- a/arch/avr32/include/asm/kmap_types.h
2056 +++ b/arch/avr32/include/asm/kmap_types.h
2057 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2058 D(11) KM_IRQ1,
2059 D(12) KM_SOFTIRQ0,
2060 D(13) KM_SOFTIRQ1,
2061 -D(14) KM_TYPE_NR
2062 +D(14) KM_CLEARPAGE,
2063 +D(15) KM_TYPE_NR
2064 };
2065
2066 #undef D
2067 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2068 index f7040a1..db9f300 100644
2069 --- a/arch/avr32/mm/fault.c
2070 +++ b/arch/avr32/mm/fault.c
2071 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2072
2073 int exception_trace = 1;
2074
2075 +#ifdef CONFIG_PAX_PAGEEXEC
2076 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2077 +{
2078 + unsigned long i;
2079 +
2080 + printk(KERN_ERR "PAX: bytes at PC: ");
2081 + for (i = 0; i < 20; i++) {
2082 + unsigned char c;
2083 + if (get_user(c, (unsigned char *)pc+i))
2084 + printk(KERN_CONT "???????? ");
2085 + else
2086 + printk(KERN_CONT "%02x ", c);
2087 + }
2088 + printk("\n");
2089 +}
2090 +#endif
2091 +
2092 /*
2093 * This routine handles page faults. It determines the address and the
2094 * problem, and then passes it off to one of the appropriate routines.
2095 @@ -156,6 +173,16 @@ bad_area:
2096 up_read(&mm->mmap_sem);
2097
2098 if (user_mode(regs)) {
2099 +
2100 +#ifdef CONFIG_PAX_PAGEEXEC
2101 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2102 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2103 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2104 + do_group_exit(SIGKILL);
2105 + }
2106 + }
2107 +#endif
2108 +
2109 if (exception_trace && printk_ratelimit())
2110 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2111 "sp %08lx ecr %lu\n",
2112 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2113 index 568885a..f8008df 100644
2114 --- a/arch/blackfin/include/asm/cache.h
2115 +++ b/arch/blackfin/include/asm/cache.h
2116 @@ -7,6 +7,7 @@
2117 #ifndef __ARCH_BLACKFIN_CACHE_H
2118 #define __ARCH_BLACKFIN_CACHE_H
2119
2120 +#include <linux/const.h>
2121 #include <linux/linkage.h> /* for asmlinkage */
2122
2123 /*
2124 @@ -14,7 +15,7 @@
2125 * Blackfin loads 32 bytes for cache
2126 */
2127 #define L1_CACHE_SHIFT 5
2128 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2129 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2130 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2131
2132 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2133 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2134 index aea2718..3639a60 100644
2135 --- a/arch/cris/include/arch-v10/arch/cache.h
2136 +++ b/arch/cris/include/arch-v10/arch/cache.h
2137 @@ -1,8 +1,9 @@
2138 #ifndef _ASM_ARCH_CACHE_H
2139 #define _ASM_ARCH_CACHE_H
2140
2141 +#include <linux/const.h>
2142 /* Etrax 100LX have 32-byte cache-lines. */
2143 -#define L1_CACHE_BYTES 32
2144 #define L1_CACHE_SHIFT 5
2145 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2146
2147 #endif /* _ASM_ARCH_CACHE_H */
2148 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2149 index 1de779f..336fad3 100644
2150 --- a/arch/cris/include/arch-v32/arch/cache.h
2151 +++ b/arch/cris/include/arch-v32/arch/cache.h
2152 @@ -1,11 +1,12 @@
2153 #ifndef _ASM_CRIS_ARCH_CACHE_H
2154 #define _ASM_CRIS_ARCH_CACHE_H
2155
2156 +#include <linux/const.h>
2157 #include <arch/hwregs/dma.h>
2158
2159 /* A cache-line is 32 bytes. */
2160 -#define L1_CACHE_BYTES 32
2161 #define L1_CACHE_SHIFT 5
2162 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2163
2164 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2165
2166 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2167 index 0d8a7d6..d0c9ff5 100644
2168 --- a/arch/frv/include/asm/atomic.h
2169 +++ b/arch/frv/include/asm/atomic.h
2170 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2171 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2172 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2173
2174 +#define atomic64_read_unchecked(v) atomic64_read(v)
2175 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2176 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2177 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2178 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2179 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2180 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2181 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2182 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2183 +
2184 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2185 {
2186 int c, old;
2187 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2188 index 2797163..c2a401d 100644
2189 --- a/arch/frv/include/asm/cache.h
2190 +++ b/arch/frv/include/asm/cache.h
2191 @@ -12,10 +12,11 @@
2192 #ifndef __ASM_CACHE_H
2193 #define __ASM_CACHE_H
2194
2195 +#include <linux/const.h>
2196
2197 /* bytes per L1 cache line */
2198 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2199 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2200 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2201
2202 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2203 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2204 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2205 index f8e16b2..c73ff79 100644
2206 --- a/arch/frv/include/asm/kmap_types.h
2207 +++ b/arch/frv/include/asm/kmap_types.h
2208 @@ -23,6 +23,7 @@ enum km_type {
2209 KM_IRQ1,
2210 KM_SOFTIRQ0,
2211 KM_SOFTIRQ1,
2212 + KM_CLEARPAGE,
2213 KM_TYPE_NR
2214 };
2215
2216 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2217 index 385fd30..6c3d97e 100644
2218 --- a/arch/frv/mm/elf-fdpic.c
2219 +++ b/arch/frv/mm/elf-fdpic.c
2220 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2221 if (addr) {
2222 addr = PAGE_ALIGN(addr);
2223 vma = find_vma(current->mm, addr);
2224 - if (TASK_SIZE - len >= addr &&
2225 - (!vma || addr + len <= vma->vm_start))
2226 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2227 goto success;
2228 }
2229
2230 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2231 for (; vma; vma = vma->vm_next) {
2232 if (addr > limit)
2233 break;
2234 - if (addr + len <= vma->vm_start)
2235 + if (check_heap_stack_gap(vma, addr, len))
2236 goto success;
2237 addr = vma->vm_end;
2238 }
2239 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2240 for (; vma; vma = vma->vm_next) {
2241 if (addr > limit)
2242 break;
2243 - if (addr + len <= vma->vm_start)
2244 + if (check_heap_stack_gap(vma, addr, len))
2245 goto success;
2246 addr = vma->vm_end;
2247 }
2248 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2249 index c635028..6d9445a 100644
2250 --- a/arch/h8300/include/asm/cache.h
2251 +++ b/arch/h8300/include/asm/cache.h
2252 @@ -1,8 +1,10 @@
2253 #ifndef __ARCH_H8300_CACHE_H
2254 #define __ARCH_H8300_CACHE_H
2255
2256 +#include <linux/const.h>
2257 +
2258 /* bytes per L1 cache line */
2259 -#define L1_CACHE_BYTES 4
2260 +#define L1_CACHE_BYTES _AC(4,UL)
2261
2262 /* m68k-elf-gcc 2.95.2 doesn't like these */
2263
2264 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2265 index 0f01de2..d37d309 100644
2266 --- a/arch/hexagon/include/asm/cache.h
2267 +++ b/arch/hexagon/include/asm/cache.h
2268 @@ -21,9 +21,11 @@
2269 #ifndef __ASM_CACHE_H
2270 #define __ASM_CACHE_H
2271
2272 +#include <linux/const.h>
2273 +
2274 /* Bytes per L1 cache line */
2275 -#define L1_CACHE_SHIFT (5)
2276 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2277 +#define L1_CACHE_SHIFT 5
2278 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2279
2280 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2281 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2282 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2283 index 3fad89e..3047da5 100644
2284 --- a/arch/ia64/include/asm/atomic.h
2285 +++ b/arch/ia64/include/asm/atomic.h
2286 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2287 #define atomic64_inc(v) atomic64_add(1, (v))
2288 #define atomic64_dec(v) atomic64_sub(1, (v))
2289
2290 +#define atomic64_read_unchecked(v) atomic64_read(v)
2291 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2292 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2293 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2294 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2295 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2296 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2297 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2298 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2299 +
2300 /* Atomic operations are already serializing */
2301 #define smp_mb__before_atomic_dec() barrier()
2302 #define smp_mb__after_atomic_dec() barrier()
2303 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2304 index 988254a..e1ee885 100644
2305 --- a/arch/ia64/include/asm/cache.h
2306 +++ b/arch/ia64/include/asm/cache.h
2307 @@ -1,6 +1,7 @@
2308 #ifndef _ASM_IA64_CACHE_H
2309 #define _ASM_IA64_CACHE_H
2310
2311 +#include <linux/const.h>
2312
2313 /*
2314 * Copyright (C) 1998-2000 Hewlett-Packard Co
2315 @@ -9,7 +10,7 @@
2316
2317 /* Bytes per L1 (data) cache line. */
2318 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2319 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2320 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2321
2322 #ifdef CONFIG_SMP
2323 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2324 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2325 index b5298eb..67c6e62 100644
2326 --- a/arch/ia64/include/asm/elf.h
2327 +++ b/arch/ia64/include/asm/elf.h
2328 @@ -42,6 +42,13 @@
2329 */
2330 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2331
2332 +#ifdef CONFIG_PAX_ASLR
2333 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2334 +
2335 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2336 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2337 +#endif
2338 +
2339 #define PT_IA_64_UNWIND 0x70000001
2340
2341 /* IA-64 relocations: */
2342 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2343 index 96a8d92..617a1cf 100644
2344 --- a/arch/ia64/include/asm/pgalloc.h
2345 +++ b/arch/ia64/include/asm/pgalloc.h
2346 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2347 pgd_val(*pgd_entry) = __pa(pud);
2348 }
2349
2350 +static inline void
2351 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2352 +{
2353 + pgd_populate(mm, pgd_entry, pud);
2354 +}
2355 +
2356 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2357 {
2358 return quicklist_alloc(0, GFP_KERNEL, NULL);
2359 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2360 pud_val(*pud_entry) = __pa(pmd);
2361 }
2362
2363 +static inline void
2364 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2365 +{
2366 + pud_populate(mm, pud_entry, pmd);
2367 +}
2368 +
2369 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2370 {
2371 return quicklist_alloc(0, GFP_KERNEL, NULL);
2372 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2373 index 1a97af3..7529d31 100644
2374 --- a/arch/ia64/include/asm/pgtable.h
2375 +++ b/arch/ia64/include/asm/pgtable.h
2376 @@ -12,7 +12,7 @@
2377 * David Mosberger-Tang <davidm@hpl.hp.com>
2378 */
2379
2380 -
2381 +#include <linux/const.h>
2382 #include <asm/mman.h>
2383 #include <asm/page.h>
2384 #include <asm/processor.h>
2385 @@ -143,6 +143,17 @@
2386 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2387 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2388 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2389 +
2390 +#ifdef CONFIG_PAX_PAGEEXEC
2391 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2392 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2393 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2394 +#else
2395 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2396 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2397 +# define PAGE_COPY_NOEXEC PAGE_COPY
2398 +#endif
2399 +
2400 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2401 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2402 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2403 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2404 index b77768d..e0795eb 100644
2405 --- a/arch/ia64/include/asm/spinlock.h
2406 +++ b/arch/ia64/include/asm/spinlock.h
2407 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2408 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2409
2410 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2411 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2412 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2413 }
2414
2415 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2416 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2417 index 449c8c0..432a3d2 100644
2418 --- a/arch/ia64/include/asm/uaccess.h
2419 +++ b/arch/ia64/include/asm/uaccess.h
2420 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2421 const void *__cu_from = (from); \
2422 long __cu_len = (n); \
2423 \
2424 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2425 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2426 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2427 __cu_len; \
2428 })
2429 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2430 long __cu_len = (n); \
2431 \
2432 __chk_user_ptr(__cu_from); \
2433 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2434 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2435 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2436 __cu_len; \
2437 })
2438 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2439 index 24603be..948052d 100644
2440 --- a/arch/ia64/kernel/module.c
2441 +++ b/arch/ia64/kernel/module.c
2442 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2443 void
2444 module_free (struct module *mod, void *module_region)
2445 {
2446 - if (mod && mod->arch.init_unw_table &&
2447 - module_region == mod->module_init) {
2448 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2449 unw_remove_unwind_table(mod->arch.init_unw_table);
2450 mod->arch.init_unw_table = NULL;
2451 }
2452 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2453 }
2454
2455 static inline int
2456 +in_init_rx (const struct module *mod, uint64_t addr)
2457 +{
2458 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2459 +}
2460 +
2461 +static inline int
2462 +in_init_rw (const struct module *mod, uint64_t addr)
2463 +{
2464 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2465 +}
2466 +
2467 +static inline int
2468 in_init (const struct module *mod, uint64_t addr)
2469 {
2470 - return addr - (uint64_t) mod->module_init < mod->init_size;
2471 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2472 +}
2473 +
2474 +static inline int
2475 +in_core_rx (const struct module *mod, uint64_t addr)
2476 +{
2477 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2478 +}
2479 +
2480 +static inline int
2481 +in_core_rw (const struct module *mod, uint64_t addr)
2482 +{
2483 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2484 }
2485
2486 static inline int
2487 in_core (const struct module *mod, uint64_t addr)
2488 {
2489 - return addr - (uint64_t) mod->module_core < mod->core_size;
2490 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2491 }
2492
2493 static inline int
2494 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2495 break;
2496
2497 case RV_BDREL:
2498 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2499 + if (in_init_rx(mod, val))
2500 + val -= (uint64_t) mod->module_init_rx;
2501 + else if (in_init_rw(mod, val))
2502 + val -= (uint64_t) mod->module_init_rw;
2503 + else if (in_core_rx(mod, val))
2504 + val -= (uint64_t) mod->module_core_rx;
2505 + else if (in_core_rw(mod, val))
2506 + val -= (uint64_t) mod->module_core_rw;
2507 break;
2508
2509 case RV_LTV:
2510 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2511 * addresses have been selected...
2512 */
2513 uint64_t gp;
2514 - if (mod->core_size > MAX_LTOFF)
2515 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2516 /*
2517 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2518 * at the end of the module.
2519 */
2520 - gp = mod->core_size - MAX_LTOFF / 2;
2521 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2522 else
2523 - gp = mod->core_size / 2;
2524 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2525 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2526 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2527 mod->arch.gp = gp;
2528 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2529 }
2530 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2531 index 609d500..7dde2a8 100644
2532 --- a/arch/ia64/kernel/sys_ia64.c
2533 +++ b/arch/ia64/kernel/sys_ia64.c
2534 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2535 if (REGION_NUMBER(addr) == RGN_HPAGE)
2536 addr = 0;
2537 #endif
2538 +
2539 +#ifdef CONFIG_PAX_RANDMMAP
2540 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2541 + addr = mm->free_area_cache;
2542 + else
2543 +#endif
2544 +
2545 if (!addr)
2546 addr = mm->free_area_cache;
2547
2548 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2549 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2550 /* At this point: (!vma || addr < vma->vm_end). */
2551 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2552 - if (start_addr != TASK_UNMAPPED_BASE) {
2553 + if (start_addr != mm->mmap_base) {
2554 /* Start a new search --- just in case we missed some holes. */
2555 - addr = TASK_UNMAPPED_BASE;
2556 + addr = mm->mmap_base;
2557 goto full_search;
2558 }
2559 return -ENOMEM;
2560 }
2561 - if (!vma || addr + len <= vma->vm_start) {
2562 + if (check_heap_stack_gap(vma, addr, len)) {
2563 /* Remember the address where we stopped this search: */
2564 mm->free_area_cache = addr + len;
2565 return addr;
2566 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2567 index 53c0ba0..2accdde 100644
2568 --- a/arch/ia64/kernel/vmlinux.lds.S
2569 +++ b/arch/ia64/kernel/vmlinux.lds.S
2570 @@ -199,7 +199,7 @@ SECTIONS {
2571 /* Per-cpu data: */
2572 . = ALIGN(PERCPU_PAGE_SIZE);
2573 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2574 - __phys_per_cpu_start = __per_cpu_load;
2575 + __phys_per_cpu_start = per_cpu_load;
2576 /*
2577 * ensure percpu data fits
2578 * into percpu page size
2579 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2580 index 20b3593..1ce77f0 100644
2581 --- a/arch/ia64/mm/fault.c
2582 +++ b/arch/ia64/mm/fault.c
2583 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2584 return pte_present(pte);
2585 }
2586
2587 +#ifdef CONFIG_PAX_PAGEEXEC
2588 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2589 +{
2590 + unsigned long i;
2591 +
2592 + printk(KERN_ERR "PAX: bytes at PC: ");
2593 + for (i = 0; i < 8; i++) {
2594 + unsigned int c;
2595 + if (get_user(c, (unsigned int *)pc+i))
2596 + printk(KERN_CONT "???????? ");
2597 + else
2598 + printk(KERN_CONT "%08x ", c);
2599 + }
2600 + printk("\n");
2601 +}
2602 +#endif
2603 +
2604 void __kprobes
2605 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2606 {
2607 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2608 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2609 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2610
2611 - if ((vma->vm_flags & mask) != mask)
2612 + if ((vma->vm_flags & mask) != mask) {
2613 +
2614 +#ifdef CONFIG_PAX_PAGEEXEC
2615 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2616 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2617 + goto bad_area;
2618 +
2619 + up_read(&mm->mmap_sem);
2620 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2621 + do_group_exit(SIGKILL);
2622 + }
2623 +#endif
2624 +
2625 goto bad_area;
2626
2627 + }
2628 +
2629 /*
2630 * If for any reason at all we couldn't handle the fault, make
2631 * sure we exit gracefully rather than endlessly redo the
2632 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2633 index 5ca674b..e0e1b70 100644
2634 --- a/arch/ia64/mm/hugetlbpage.c
2635 +++ b/arch/ia64/mm/hugetlbpage.c
2636 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2637 /* At this point: (!vmm || addr < vmm->vm_end). */
2638 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2639 return -ENOMEM;
2640 - if (!vmm || (addr + len) <= vmm->vm_start)
2641 + if (check_heap_stack_gap(vmm, addr, len))
2642 return addr;
2643 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2644 }
2645 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2646 index 13df239d..cb52116 100644
2647 --- a/arch/ia64/mm/init.c
2648 +++ b/arch/ia64/mm/init.c
2649 @@ -121,6 +121,19 @@ ia64_init_addr_space (void)
2650 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2651 vma->vm_end = vma->vm_start + PAGE_SIZE;
2652 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2653 +
2654 +#ifdef CONFIG_PAX_PAGEEXEC
2655 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2656 + vma->vm_flags &= ~VM_EXEC;
2657 +
2658 +#ifdef CONFIG_PAX_MPROTECT
2659 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2660 + vma->vm_flags &= ~VM_MAYEXEC;
2661 +#endif
2662 +
2663 + }
2664 +#endif
2665 +
2666 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2667 down_write(&current->mm->mmap_sem);
2668 if (insert_vm_struct(current->mm, vma)) {
2669 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2670 index 40b3ee9..8c2c112 100644
2671 --- a/arch/m32r/include/asm/cache.h
2672 +++ b/arch/m32r/include/asm/cache.h
2673 @@ -1,8 +1,10 @@
2674 #ifndef _ASM_M32R_CACHE_H
2675 #define _ASM_M32R_CACHE_H
2676
2677 +#include <linux/const.h>
2678 +
2679 /* L1 cache line size */
2680 #define L1_CACHE_SHIFT 4
2681 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2682 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2683
2684 #endif /* _ASM_M32R_CACHE_H */
2685 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2686 index 82abd15..d95ae5d 100644
2687 --- a/arch/m32r/lib/usercopy.c
2688 +++ b/arch/m32r/lib/usercopy.c
2689 @@ -14,6 +14,9 @@
2690 unsigned long
2691 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2692 {
2693 + if ((long)n < 0)
2694 + return n;
2695 +
2696 prefetch(from);
2697 if (access_ok(VERIFY_WRITE, to, n))
2698 __copy_user(to,from,n);
2699 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2700 unsigned long
2701 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2702 {
2703 + if ((long)n < 0)
2704 + return n;
2705 +
2706 prefetchw(to);
2707 if (access_ok(VERIFY_READ, from, n))
2708 __copy_user_zeroing(to,from,n);
2709 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2710 index 0395c51..5f26031 100644
2711 --- a/arch/m68k/include/asm/cache.h
2712 +++ b/arch/m68k/include/asm/cache.h
2713 @@ -4,9 +4,11 @@
2714 #ifndef __ARCH_M68K_CACHE_H
2715 #define __ARCH_M68K_CACHE_H
2716
2717 +#include <linux/const.h>
2718 +
2719 /* bytes per L1 cache line */
2720 #define L1_CACHE_SHIFT 4
2721 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2722 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2723
2724 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2725
2726 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2727 index 4efe96a..60e8699 100644
2728 --- a/arch/microblaze/include/asm/cache.h
2729 +++ b/arch/microblaze/include/asm/cache.h
2730 @@ -13,11 +13,12 @@
2731 #ifndef _ASM_MICROBLAZE_CACHE_H
2732 #define _ASM_MICROBLAZE_CACHE_H
2733
2734 +#include <linux/const.h>
2735 #include <asm/registers.h>
2736
2737 #define L1_CACHE_SHIFT 5
2738 /* word-granular cache in microblaze */
2739 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2740 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2741
2742 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2743
2744 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2745 index 1d93f81..67794d0 100644
2746 --- a/arch/mips/include/asm/atomic.h
2747 +++ b/arch/mips/include/asm/atomic.h
2748 @@ -21,6 +21,10 @@
2749 #include <asm/war.h>
2750 #include <asm/system.h>
2751
2752 +#ifdef CONFIG_GENERIC_ATOMIC64
2753 +#include <asm-generic/atomic64.h>
2754 +#endif
2755 +
2756 #define ATOMIC_INIT(i) { (i) }
2757
2758 /*
2759 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2760 */
2761 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2762
2763 +#define atomic64_read_unchecked(v) atomic64_read(v)
2764 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2765 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2766 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2767 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2768 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2769 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2770 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2771 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2772 +
2773 #endif /* CONFIG_64BIT */
2774
2775 /*
2776 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2777 index b4db69f..8f3b093 100644
2778 --- a/arch/mips/include/asm/cache.h
2779 +++ b/arch/mips/include/asm/cache.h
2780 @@ -9,10 +9,11 @@
2781 #ifndef _ASM_CACHE_H
2782 #define _ASM_CACHE_H
2783
2784 +#include <linux/const.h>
2785 #include <kmalloc.h>
2786
2787 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2788 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2789 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2790
2791 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2792 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2793 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2794 index 455c0ac..ad65fbe 100644
2795 --- a/arch/mips/include/asm/elf.h
2796 +++ b/arch/mips/include/asm/elf.h
2797 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2798 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2799 #endif
2800
2801 +#ifdef CONFIG_PAX_ASLR
2802 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2803 +
2804 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2805 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2806 +#endif
2807 +
2808 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2809 struct linux_binprm;
2810 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2811 int uses_interp);
2812
2813 -struct mm_struct;
2814 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2815 -#define arch_randomize_brk arch_randomize_brk
2816 -
2817 #endif /* _ASM_ELF_H */
2818 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2819 index da9bd7d..91aa7ab 100644
2820 --- a/arch/mips/include/asm/page.h
2821 +++ b/arch/mips/include/asm/page.h
2822 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2823 #ifdef CONFIG_CPU_MIPS32
2824 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2825 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2826 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2827 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2828 #else
2829 typedef struct { unsigned long long pte; } pte_t;
2830 #define pte_val(x) ((x).pte)
2831 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2832 index 881d18b..cea38bc 100644
2833 --- a/arch/mips/include/asm/pgalloc.h
2834 +++ b/arch/mips/include/asm/pgalloc.h
2835 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2836 {
2837 set_pud(pud, __pud((unsigned long)pmd));
2838 }
2839 +
2840 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2841 +{
2842 + pud_populate(mm, pud, pmd);
2843 +}
2844 #endif
2845
2846 /*
2847 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2848 index 6018c80..7c37203 100644
2849 --- a/arch/mips/include/asm/system.h
2850 +++ b/arch/mips/include/asm/system.h
2851 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2852 */
2853 #define __ARCH_WANT_UNLOCKED_CTXSW
2854
2855 -extern unsigned long arch_align_stack(unsigned long sp);
2856 +#define arch_align_stack(x) ((x) & ~0xfUL)
2857
2858 #endif /* _ASM_SYSTEM_H */
2859 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2860 index 0d85d8e..ec71487 100644
2861 --- a/arch/mips/include/asm/thread_info.h
2862 +++ b/arch/mips/include/asm/thread_info.h
2863 @@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2864 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2865 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2866 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2867 +/* li takes a 32bit immediate */
2868 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2869 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2870
2871 #ifdef CONFIG_MIPS32_O32
2872 @@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2873 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2874 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2875 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2876 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2877 +
2878 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2879
2880 /* work to do in syscall_trace_leave() */
2881 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2882 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2883
2884 /* work to do on interrupt/exception return */
2885 #define _TIF_WORK_MASK (0x0000ffef & \
2886 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2887 /* work to do on any return to u-space */
2888 -#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2889 +#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2890
2891 #endif /* __KERNEL__ */
2892
2893 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2894 index 9fdd8bc..4bd7f1a 100644
2895 --- a/arch/mips/kernel/binfmt_elfn32.c
2896 +++ b/arch/mips/kernel/binfmt_elfn32.c
2897 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2898 #undef ELF_ET_DYN_BASE
2899 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2900
2901 +#ifdef CONFIG_PAX_ASLR
2902 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2903 +
2904 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2905 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2906 +#endif
2907 +
2908 #include <asm/processor.h>
2909 #include <linux/module.h>
2910 #include <linux/elfcore.h>
2911 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2912 index ff44823..97f8906 100644
2913 --- a/arch/mips/kernel/binfmt_elfo32.c
2914 +++ b/arch/mips/kernel/binfmt_elfo32.c
2915 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2916 #undef ELF_ET_DYN_BASE
2917 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2918
2919 +#ifdef CONFIG_PAX_ASLR
2920 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2921 +
2922 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2923 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2924 +#endif
2925 +
2926 #include <asm/processor.h>
2927
2928 /*
2929 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2930 index 7955409..ceaea7c 100644
2931 --- a/arch/mips/kernel/process.c
2932 +++ b/arch/mips/kernel/process.c
2933 @@ -483,15 +483,3 @@ unsigned long get_wchan(struct task_struct *task)
2934 out:
2935 return pc;
2936 }
2937 -
2938 -/*
2939 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2940 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2941 - */
2942 -unsigned long arch_align_stack(unsigned long sp)
2943 -{
2944 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2945 - sp -= get_random_int() & ~PAGE_MASK;
2946 -
2947 - return sp & ALMASK;
2948 -}
2949 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2950 index 7786b60..3e38c72 100644
2951 --- a/arch/mips/kernel/ptrace.c
2952 +++ b/arch/mips/kernel/ptrace.c
2953 @@ -529,6 +529,10 @@ static inline int audit_arch(void)
2954 return arch;
2955 }
2956
2957 +#ifdef CONFIG_GRKERNSEC_SETXID
2958 +extern void gr_delayed_cred_worker(void);
2959 +#endif
2960 +
2961 /*
2962 * Notification of system call entry/exit
2963 * - triggered by current->work.syscall_trace
2964 @@ -538,6 +542,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
2965 /* do the secure computing check first */
2966 secure_computing(regs->regs[2]);
2967
2968 +#ifdef CONFIG_GRKERNSEC_SETXID
2969 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2970 + gr_delayed_cred_worker();
2971 +#endif
2972 +
2973 if (!(current->ptrace & PT_PTRACED))
2974 goto out;
2975
2976 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
2977 index a632bc1..0b77c7c 100644
2978 --- a/arch/mips/kernel/scall32-o32.S
2979 +++ b/arch/mips/kernel/scall32-o32.S
2980 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
2981
2982 stack_done:
2983 lw t0, TI_FLAGS($28) # syscall tracing enabled?
2984 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2985 + li t1, _TIF_SYSCALL_WORK
2986 and t0, t1
2987 bnez t0, syscall_trace_entry # -> yes
2988
2989 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
2990 index 3b5a5e9..e1ee86d 100644
2991 --- a/arch/mips/kernel/scall64-64.S
2992 +++ b/arch/mips/kernel/scall64-64.S
2993 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
2994
2995 sd a3, PT_R26(sp) # save a3 for syscall restarting
2996
2997 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
2998 + li t1, _TIF_SYSCALL_WORK
2999 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3000 and t0, t1, t0
3001 bnez t0, syscall_trace_entry
3002 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3003 index 6be6f70..1859577 100644
3004 --- a/arch/mips/kernel/scall64-n32.S
3005 +++ b/arch/mips/kernel/scall64-n32.S
3006 @@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3007
3008 sd a3, PT_R26(sp) # save a3 for syscall restarting
3009
3010 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3011 + li t1, _TIF_SYSCALL_WORK
3012 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3013 and t0, t1, t0
3014 bnez t0, n32_syscall_trace_entry
3015 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3016 index 5422855..74e63a3 100644
3017 --- a/arch/mips/kernel/scall64-o32.S
3018 +++ b/arch/mips/kernel/scall64-o32.S
3019 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3020 PTR 4b, bad_stack
3021 .previous
3022
3023 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3024 + li t1, _TIF_SYSCALL_WORK
3025 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3026 and t0, t1, t0
3027 bnez t0, trace_a_syscall
3028 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3029 index 69ebd58..e4bff83 100644
3030 --- a/arch/mips/mm/fault.c
3031 +++ b/arch/mips/mm/fault.c
3032 @@ -28,6 +28,23 @@
3033 #include <asm/highmem.h> /* For VMALLOC_END */
3034 #include <linux/kdebug.h>
3035
3036 +#ifdef CONFIG_PAX_PAGEEXEC
3037 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3038 +{
3039 + unsigned long i;
3040 +
3041 + printk(KERN_ERR "PAX: bytes at PC: ");
3042 + for (i = 0; i < 5; i++) {
3043 + unsigned int c;
3044 + if (get_user(c, (unsigned int *)pc+i))
3045 + printk(KERN_CONT "???????? ");
3046 + else
3047 + printk(KERN_CONT "%08x ", c);
3048 + }
3049 + printk("\n");
3050 +}
3051 +#endif
3052 +
3053 /*
3054 * This routine handles page faults. It determines the address,
3055 * and the problem, and then passes it off to one of the appropriate
3056 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3057 index 302d779..7d35bf8 100644
3058 --- a/arch/mips/mm/mmap.c
3059 +++ b/arch/mips/mm/mmap.c
3060 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3061 do_color_align = 1;
3062
3063 /* requesting a specific address */
3064 +
3065 +#ifdef CONFIG_PAX_RANDMMAP
3066 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3067 +#endif
3068 +
3069 if (addr) {
3070 if (do_color_align)
3071 addr = COLOUR_ALIGN(addr, pgoff);
3072 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3073 addr = PAGE_ALIGN(addr);
3074
3075 vma = find_vma(mm, addr);
3076 - if (TASK_SIZE - len >= addr &&
3077 - (!vma || addr + len <= vma->vm_start))
3078 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3079 return addr;
3080 }
3081
3082 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3083 /* At this point: (!vma || addr < vma->vm_end). */
3084 if (TASK_SIZE - len < addr)
3085 return -ENOMEM;
3086 - if (!vma || addr + len <= vma->vm_start)
3087 + if (check_heap_stack_gap(vmm, addr, len))
3088 return addr;
3089 addr = vma->vm_end;
3090 if (do_color_align)
3091 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3092 /* make sure it can fit in the remaining address space */
3093 if (likely(addr > len)) {
3094 vma = find_vma(mm, addr - len);
3095 - if (!vma || addr <= vma->vm_start) {
3096 + if (check_heap_stack_gap(vmm, addr - len, len))
3097 /* cache the address as a hint for next time */
3098 return mm->free_area_cache = addr - len;
3099 }
3100 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3101 * return with success:
3102 */
3103 vma = find_vma(mm, addr);
3104 - if (likely(!vma || addr + len <= vma->vm_start)) {
3105 + if (check_heap_stack_gap(vmm, addr, len)) {
3106 /* cache the address as a hint for next time */
3107 return mm->free_area_cache = addr;
3108 }
3109 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3110 mm->unmap_area = arch_unmap_area_topdown;
3111 }
3112 }
3113 -
3114 -static inline unsigned long brk_rnd(void)
3115 -{
3116 - unsigned long rnd = get_random_int();
3117 -
3118 - rnd = rnd << PAGE_SHIFT;
3119 - /* 8MB for 32bit, 256MB for 64bit */
3120 - if (TASK_IS_32BIT_ADDR)
3121 - rnd = rnd & 0x7ffffful;
3122 - else
3123 - rnd = rnd & 0xffffffful;
3124 -
3125 - return rnd;
3126 -}
3127 -
3128 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3129 -{
3130 - unsigned long base = mm->brk;
3131 - unsigned long ret;
3132 -
3133 - ret = PAGE_ALIGN(base + brk_rnd());
3134 -
3135 - if (ret < mm->brk)
3136 - return mm->brk;
3137 -
3138 - return ret;
3139 -}
3140 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3141 index 967d144..db12197 100644
3142 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3143 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3144 @@ -11,12 +11,14 @@
3145 #ifndef _ASM_PROC_CACHE_H
3146 #define _ASM_PROC_CACHE_H
3147
3148 +#include <linux/const.h>
3149 +
3150 /* L1 cache */
3151
3152 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3153 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3154 -#define L1_CACHE_BYTES 16 /* bytes per entry */
3155 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3156 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3157 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3158
3159 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3160 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3161 index bcb5df2..84fabd2 100644
3162 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3163 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3164 @@ -16,13 +16,15 @@
3165 #ifndef _ASM_PROC_CACHE_H
3166 #define _ASM_PROC_CACHE_H
3167
3168 +#include <linux/const.h>
3169 +
3170 /*
3171 * L1 cache
3172 */
3173 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3174 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3175 -#define L1_CACHE_BYTES 32 /* bytes per entry */
3176 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3177 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3178 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3179
3180 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3181 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3182 index 4ce7a01..449202a 100644
3183 --- a/arch/openrisc/include/asm/cache.h
3184 +++ b/arch/openrisc/include/asm/cache.h
3185 @@ -19,11 +19,13 @@
3186 #ifndef __ASM_OPENRISC_CACHE_H
3187 #define __ASM_OPENRISC_CACHE_H
3188
3189 +#include <linux/const.h>
3190 +
3191 /* FIXME: How can we replace these with values from the CPU...
3192 * they shouldn't be hard-coded!
3193 */
3194
3195 -#define L1_CACHE_BYTES 16
3196 #define L1_CACHE_SHIFT 4
3197 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3198
3199 #endif /* __ASM_OPENRISC_CACHE_H */
3200 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3201 index 4054b31..a10c105 100644
3202 --- a/arch/parisc/include/asm/atomic.h
3203 +++ b/arch/parisc/include/asm/atomic.h
3204 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3205
3206 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3207
3208 +#define atomic64_read_unchecked(v) atomic64_read(v)
3209 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3210 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3211 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3212 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3213 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3214 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3215 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3216 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3217 +
3218 #endif /* !CONFIG_64BIT */
3219
3220
3221 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3222 index 47f11c7..3420df2 100644
3223 --- a/arch/parisc/include/asm/cache.h
3224 +++ b/arch/parisc/include/asm/cache.h
3225 @@ -5,6 +5,7 @@
3226 #ifndef __ARCH_PARISC_CACHE_H
3227 #define __ARCH_PARISC_CACHE_H
3228
3229 +#include <linux/const.h>
3230
3231 /*
3232 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3233 @@ -15,13 +16,13 @@
3234 * just ruin performance.
3235 */
3236 #ifdef CONFIG_PA20
3237 -#define L1_CACHE_BYTES 64
3238 #define L1_CACHE_SHIFT 6
3239 #else
3240 -#define L1_CACHE_BYTES 32
3241 #define L1_CACHE_SHIFT 5
3242 #endif
3243
3244 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3245 +
3246 #ifndef __ASSEMBLY__
3247
3248 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3249 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3250 index 19f6cb1..6c78cf2 100644
3251 --- a/arch/parisc/include/asm/elf.h
3252 +++ b/arch/parisc/include/asm/elf.h
3253 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3254
3255 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3256
3257 +#ifdef CONFIG_PAX_ASLR
3258 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3259 +
3260 +#define PAX_DELTA_MMAP_LEN 16
3261 +#define PAX_DELTA_STACK_LEN 16
3262 +#endif
3263 +
3264 /* This yields a mask that user programs can use to figure out what
3265 instruction set this CPU supports. This could be done in user space,
3266 but it's not easy, and we've already done it here. */
3267 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3268 index fc987a1..6e068ef 100644
3269 --- a/arch/parisc/include/asm/pgalloc.h
3270 +++ b/arch/parisc/include/asm/pgalloc.h
3271 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3272 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3273 }
3274
3275 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3276 +{
3277 + pgd_populate(mm, pgd, pmd);
3278 +}
3279 +
3280 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3281 {
3282 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3283 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3284 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3285 #define pmd_free(mm, x) do { } while (0)
3286 #define pgd_populate(mm, pmd, pte) BUG()
3287 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3288
3289 #endif
3290
3291 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3292 index 22dadeb..f6c2be4 100644
3293 --- a/arch/parisc/include/asm/pgtable.h
3294 +++ b/arch/parisc/include/asm/pgtable.h
3295 @@ -210,6 +210,17 @@ struct vm_area_struct;
3296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3297 #define PAGE_COPY PAGE_EXECREAD
3298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3299 +
3300 +#ifdef CONFIG_PAX_PAGEEXEC
3301 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3302 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3303 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3304 +#else
3305 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3306 +# define PAGE_COPY_NOEXEC PAGE_COPY
3307 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3308 +#endif
3309 +
3310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3313 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3314 index 5e34ccf..672bc9c 100644
3315 --- a/arch/parisc/kernel/module.c
3316 +++ b/arch/parisc/kernel/module.c
3317 @@ -98,16 +98,38 @@
3318
3319 /* three functions to determine where in the module core
3320 * or init pieces the location is */
3321 +static inline int in_init_rx(struct module *me, void *loc)
3322 +{
3323 + return (loc >= me->module_init_rx &&
3324 + loc < (me->module_init_rx + me->init_size_rx));
3325 +}
3326 +
3327 +static inline int in_init_rw(struct module *me, void *loc)
3328 +{
3329 + return (loc >= me->module_init_rw &&
3330 + loc < (me->module_init_rw + me->init_size_rw));
3331 +}
3332 +
3333 static inline int in_init(struct module *me, void *loc)
3334 {
3335 - return (loc >= me->module_init &&
3336 - loc <= (me->module_init + me->init_size));
3337 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3338 +}
3339 +
3340 +static inline int in_core_rx(struct module *me, void *loc)
3341 +{
3342 + return (loc >= me->module_core_rx &&
3343 + loc < (me->module_core_rx + me->core_size_rx));
3344 +}
3345 +
3346 +static inline int in_core_rw(struct module *me, void *loc)
3347 +{
3348 + return (loc >= me->module_core_rw &&
3349 + loc < (me->module_core_rw + me->core_size_rw));
3350 }
3351
3352 static inline int in_core(struct module *me, void *loc)
3353 {
3354 - return (loc >= me->module_core &&
3355 - loc <= (me->module_core + me->core_size));
3356 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3357 }
3358
3359 static inline int in_local(struct module *me, void *loc)
3360 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3361 }
3362
3363 /* align things a bit */
3364 - me->core_size = ALIGN(me->core_size, 16);
3365 - me->arch.got_offset = me->core_size;
3366 - me->core_size += gots * sizeof(struct got_entry);
3367 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3368 + me->arch.got_offset = me->core_size_rw;
3369 + me->core_size_rw += gots * sizeof(struct got_entry);
3370
3371 - me->core_size = ALIGN(me->core_size, 16);
3372 - me->arch.fdesc_offset = me->core_size;
3373 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3374 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3375 + me->arch.fdesc_offset = me->core_size_rw;
3376 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3377
3378 me->arch.got_max = gots;
3379 me->arch.fdesc_max = fdescs;
3380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3381
3382 BUG_ON(value == 0);
3383
3384 - got = me->module_core + me->arch.got_offset;
3385 + got = me->module_core_rw + me->arch.got_offset;
3386 for (i = 0; got[i].addr; i++)
3387 if (got[i].addr == value)
3388 goto out;
3389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3390 #ifdef CONFIG_64BIT
3391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3392 {
3393 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3394 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3395
3396 if (!value) {
3397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3399
3400 /* Create new one */
3401 fdesc->addr = value;
3402 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3403 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3404 return (Elf_Addr)fdesc;
3405 }
3406 #endif /* CONFIG_64BIT */
3407 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3408
3409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3410 end = table + sechdrs[me->arch.unwind_section].sh_size;
3411 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3412 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3413
3414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3415 me->arch.unwind_section, table, end, gp);
3416 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3417 index c9b9322..02d8940 100644
3418 --- a/arch/parisc/kernel/sys_parisc.c
3419 +++ b/arch/parisc/kernel/sys_parisc.c
3420 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3421 /* At this point: (!vma || addr < vma->vm_end). */
3422 if (TASK_SIZE - len < addr)
3423 return -ENOMEM;
3424 - if (!vma || addr + len <= vma->vm_start)
3425 + if (check_heap_stack_gap(vma, addr, len))
3426 return addr;
3427 addr = vma->vm_end;
3428 }
3429 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3430 /* At this point: (!vma || addr < vma->vm_end). */
3431 if (TASK_SIZE - len < addr)
3432 return -ENOMEM;
3433 - if (!vma || addr + len <= vma->vm_start)
3434 + if (check_heap_stack_gap(vma, addr, len))
3435 return addr;
3436 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3437 if (addr < vma->vm_end) /* handle wraparound */
3438 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3439 if (flags & MAP_FIXED)
3440 return addr;
3441 if (!addr)
3442 - addr = TASK_UNMAPPED_BASE;
3443 + addr = current->mm->mmap_base;
3444
3445 if (filp) {
3446 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3447 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3448 index f19e660..414fe24 100644
3449 --- a/arch/parisc/kernel/traps.c
3450 +++ b/arch/parisc/kernel/traps.c
3451 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3452
3453 down_read(&current->mm->mmap_sem);
3454 vma = find_vma(current->mm,regs->iaoq[0]);
3455 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3456 - && (vma->vm_flags & VM_EXEC)) {
3457 -
3458 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3459 fault_address = regs->iaoq[0];
3460 fault_space = regs->iasq[0];
3461
3462 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3463 index 18162ce..94de376 100644
3464 --- a/arch/parisc/mm/fault.c
3465 +++ b/arch/parisc/mm/fault.c
3466 @@ -15,6 +15,7 @@
3467 #include <linux/sched.h>
3468 #include <linux/interrupt.h>
3469 #include <linux/module.h>
3470 +#include <linux/unistd.h>
3471
3472 #include <asm/uaccess.h>
3473 #include <asm/traps.h>
3474 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3475 static unsigned long
3476 parisc_acctyp(unsigned long code, unsigned int inst)
3477 {
3478 - if (code == 6 || code == 16)
3479 + if (code == 6 || code == 7 || code == 16)
3480 return VM_EXEC;
3481
3482 switch (inst & 0xf0000000) {
3483 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3484 }
3485 #endif
3486
3487 +#ifdef CONFIG_PAX_PAGEEXEC
3488 +/*
3489 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3490 + *
3491 + * returns 1 when task should be killed
3492 + * 2 when rt_sigreturn trampoline was detected
3493 + * 3 when unpatched PLT trampoline was detected
3494 + */
3495 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3496 +{
3497 +
3498 +#ifdef CONFIG_PAX_EMUPLT
3499 + int err;
3500 +
3501 + do { /* PaX: unpatched PLT emulation */
3502 + unsigned int bl, depwi;
3503 +
3504 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3505 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3506 +
3507 + if (err)
3508 + break;
3509 +
3510 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3511 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3512 +
3513 + err = get_user(ldw, (unsigned int *)addr);
3514 + err |= get_user(bv, (unsigned int *)(addr+4));
3515 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3516 +
3517 + if (err)
3518 + break;
3519 +
3520 + if (ldw == 0x0E801096U &&
3521 + bv == 0xEAC0C000U &&
3522 + ldw2 == 0x0E881095U)
3523 + {
3524 + unsigned int resolver, map;
3525 +
3526 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3527 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3528 + if (err)
3529 + break;
3530 +
3531 + regs->gr[20] = instruction_pointer(regs)+8;
3532 + regs->gr[21] = map;
3533 + regs->gr[22] = resolver;
3534 + regs->iaoq[0] = resolver | 3UL;
3535 + regs->iaoq[1] = regs->iaoq[0] + 4;
3536 + return 3;
3537 + }
3538 + }
3539 + } while (0);
3540 +#endif
3541 +
3542 +#ifdef CONFIG_PAX_EMUTRAMP
3543 +
3544 +#ifndef CONFIG_PAX_EMUSIGRT
3545 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3546 + return 1;
3547 +#endif
3548 +
3549 + do { /* PaX: rt_sigreturn emulation */
3550 + unsigned int ldi1, ldi2, bel, nop;
3551 +
3552 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3553 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3554 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3555 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3556 +
3557 + if (err)
3558 + break;
3559 +
3560 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3561 + ldi2 == 0x3414015AU &&
3562 + bel == 0xE4008200U &&
3563 + nop == 0x08000240U)
3564 + {
3565 + regs->gr[25] = (ldi1 & 2) >> 1;
3566 + regs->gr[20] = __NR_rt_sigreturn;
3567 + regs->gr[31] = regs->iaoq[1] + 16;
3568 + regs->sr[0] = regs->iasq[1];
3569 + regs->iaoq[0] = 0x100UL;
3570 + regs->iaoq[1] = regs->iaoq[0] + 4;
3571 + regs->iasq[0] = regs->sr[2];
3572 + regs->iasq[1] = regs->sr[2];
3573 + return 2;
3574 + }
3575 + } while (0);
3576 +#endif
3577 +
3578 + return 1;
3579 +}
3580 +
3581 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3582 +{
3583 + unsigned long i;
3584 +
3585 + printk(KERN_ERR "PAX: bytes at PC: ");
3586 + for (i = 0; i < 5; i++) {
3587 + unsigned int c;
3588 + if (get_user(c, (unsigned int *)pc+i))
3589 + printk(KERN_CONT "???????? ");
3590 + else
3591 + printk(KERN_CONT "%08x ", c);
3592 + }
3593 + printk("\n");
3594 +}
3595 +#endif
3596 +
3597 int fixup_exception(struct pt_regs *regs)
3598 {
3599 const struct exception_table_entry *fix;
3600 @@ -192,8 +303,33 @@ good_area:
3601
3602 acc_type = parisc_acctyp(code,regs->iir);
3603
3604 - if ((vma->vm_flags & acc_type) != acc_type)
3605 + if ((vma->vm_flags & acc_type) != acc_type) {
3606 +
3607 +#ifdef CONFIG_PAX_PAGEEXEC
3608 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3609 + (address & ~3UL) == instruction_pointer(regs))
3610 + {
3611 + up_read(&mm->mmap_sem);
3612 + switch (pax_handle_fetch_fault(regs)) {
3613 +
3614 +#ifdef CONFIG_PAX_EMUPLT
3615 + case 3:
3616 + return;
3617 +#endif
3618 +
3619 +#ifdef CONFIG_PAX_EMUTRAMP
3620 + case 2:
3621 + return;
3622 +#endif
3623 +
3624 + }
3625 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3626 + do_group_exit(SIGKILL);
3627 + }
3628 +#endif
3629 +
3630 goto bad_area;
3631 + }
3632
3633 /*
3634 * If for any reason at all we couldn't handle the fault, make
3635 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3636 index 02e41b5..ec6e26c 100644
3637 --- a/arch/powerpc/include/asm/atomic.h
3638 +++ b/arch/powerpc/include/asm/atomic.h
3639 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3640
3641 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3642
3643 +#define atomic64_read_unchecked(v) atomic64_read(v)
3644 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3645 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3646 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3647 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3648 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3649 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3650 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3651 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3652 +
3653 #endif /* __powerpc64__ */
3654
3655 #endif /* __KERNEL__ */
3656 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3657 index 4b50941..5605819 100644
3658 --- a/arch/powerpc/include/asm/cache.h
3659 +++ b/arch/powerpc/include/asm/cache.h
3660 @@ -3,6 +3,7 @@
3661
3662 #ifdef __KERNEL__
3663
3664 +#include <linux/const.h>
3665
3666 /* bytes per L1 cache line */
3667 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3668 @@ -22,7 +23,7 @@
3669 #define L1_CACHE_SHIFT 7
3670 #endif
3671
3672 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3673 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3674
3675 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3676
3677 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3678 index 3bf9cca..e7457d0 100644
3679 --- a/arch/powerpc/include/asm/elf.h
3680 +++ b/arch/powerpc/include/asm/elf.h
3681 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3682 the loader. We need to make sure that it is out of the way of the program
3683 that it will "exec", and that there is sufficient room for the brk. */
3684
3685 -extern unsigned long randomize_et_dyn(unsigned long base);
3686 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3687 +#define ELF_ET_DYN_BASE (0x20000000)
3688 +
3689 +#ifdef CONFIG_PAX_ASLR
3690 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3691 +
3692 +#ifdef __powerpc64__
3693 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3694 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3695 +#else
3696 +#define PAX_DELTA_MMAP_LEN 15
3697 +#define PAX_DELTA_STACK_LEN 15
3698 +#endif
3699 +#endif
3700
3701 /*
3702 * Our registers are always unsigned longs, whether we're a 32 bit
3703 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3704 (0x7ff >> (PAGE_SHIFT - 12)) : \
3705 (0x3ffff >> (PAGE_SHIFT - 12)))
3706
3707 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3708 -#define arch_randomize_brk arch_randomize_brk
3709 -
3710 #endif /* __KERNEL__ */
3711
3712 /*
3713 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3714 index bca8fdc..61e9580 100644
3715 --- a/arch/powerpc/include/asm/kmap_types.h
3716 +++ b/arch/powerpc/include/asm/kmap_types.h
3717 @@ -27,6 +27,7 @@ enum km_type {
3718 KM_PPC_SYNC_PAGE,
3719 KM_PPC_SYNC_ICACHE,
3720 KM_KDB,
3721 + KM_CLEARPAGE,
3722 KM_TYPE_NR
3723 };
3724
3725 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3726 index d4a7f64..451de1c 100644
3727 --- a/arch/powerpc/include/asm/mman.h
3728 +++ b/arch/powerpc/include/asm/mman.h
3729 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3730 }
3731 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3732
3733 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3734 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3735 {
3736 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3737 }
3738 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3739 index f072e97..b436dee 100644
3740 --- a/arch/powerpc/include/asm/page.h
3741 +++ b/arch/powerpc/include/asm/page.h
3742 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3743 * and needs to be executable. This means the whole heap ends
3744 * up being executable.
3745 */
3746 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3747 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3748 +#define VM_DATA_DEFAULT_FLAGS32 \
3749 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3750 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3751
3752 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3753 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3754 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3755 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3756 #endif
3757
3758 +#define ktla_ktva(addr) (addr)
3759 +#define ktva_ktla(addr) (addr)
3760 +
3761 /*
3762 * Use the top bit of the higher-level page table entries to indicate whether
3763 * the entries we point to contain hugepages. This works because we know that
3764 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3765 index fed85e6..da5c71b 100644
3766 --- a/arch/powerpc/include/asm/page_64.h
3767 +++ b/arch/powerpc/include/asm/page_64.h
3768 @@ -146,15 +146,18 @@ do { \
3769 * stack by default, so in the absence of a PT_GNU_STACK program header
3770 * we turn execute permission off.
3771 */
3772 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3773 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3774 +#define VM_STACK_DEFAULT_FLAGS32 \
3775 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3776 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3777
3778 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3779 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3780
3781 +#ifndef CONFIG_PAX_PAGEEXEC
3782 #define VM_STACK_DEFAULT_FLAGS \
3783 (is_32bit_task() ? \
3784 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3785 +#endif
3786
3787 #include <asm-generic/getorder.h>
3788
3789 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3790 index 292725c..f87ae14 100644
3791 --- a/arch/powerpc/include/asm/pgalloc-64.h
3792 +++ b/arch/powerpc/include/asm/pgalloc-64.h
3793 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3794 #ifndef CONFIG_PPC_64K_PAGES
3795
3796 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3797 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3798
3799 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3800 {
3801 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3802 pud_set(pud, (unsigned long)pmd);
3803 }
3804
3805 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3806 +{
3807 + pud_populate(mm, pud, pmd);
3808 +}
3809 +
3810 #define pmd_populate(mm, pmd, pte_page) \
3811 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3812 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3813 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3814 #else /* CONFIG_PPC_64K_PAGES */
3815
3816 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3817 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3818
3819 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3820 pte_t *pte)
3821 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3822 index 2e0e411..7899c68 100644
3823 --- a/arch/powerpc/include/asm/pgtable.h
3824 +++ b/arch/powerpc/include/asm/pgtable.h
3825 @@ -2,6 +2,7 @@
3826 #define _ASM_POWERPC_PGTABLE_H
3827 #ifdef __KERNEL__
3828
3829 +#include <linux/const.h>
3830 #ifndef __ASSEMBLY__
3831 #include <asm/processor.h> /* For TASK_SIZE */
3832 #include <asm/mmu.h>
3833 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3834 index 4aad413..85d86bf 100644
3835 --- a/arch/powerpc/include/asm/pte-hash32.h
3836 +++ b/arch/powerpc/include/asm/pte-hash32.h
3837 @@ -21,6 +21,7 @@
3838 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3839 #define _PAGE_USER 0x004 /* usermode access allowed */
3840 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3841 +#define _PAGE_EXEC _PAGE_GUARDED
3842 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3843 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3844 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3845 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3846 index 7fdc2c0..e47a9b02d3 100644
3847 --- a/arch/powerpc/include/asm/reg.h
3848 +++ b/arch/powerpc/include/asm/reg.h
3849 @@ -212,6 +212,7 @@
3850 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3851 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3852 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3853 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3854 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3855 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3856 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3857 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3858 index c377457..3c69fbc 100644
3859 --- a/arch/powerpc/include/asm/system.h
3860 +++ b/arch/powerpc/include/asm/system.h
3861 @@ -539,7 +539,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3862 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3863 #endif
3864
3865 -extern unsigned long arch_align_stack(unsigned long sp);
3866 +#define arch_align_stack(x) ((x) & ~0xfUL)
3867
3868 /* Used in very early kernel initialization. */
3869 extern unsigned long reloc_offset(void);
3870 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3871 index 96471494..60ed5a2 100644
3872 --- a/arch/powerpc/include/asm/thread_info.h
3873 +++ b/arch/powerpc/include/asm/thread_info.h
3874 @@ -104,13 +104,15 @@ static inline struct thread_info *current_thread_info(void)
3875 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3876 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3877 #define TIF_SINGLESTEP 8 /* singlestepping active */
3878 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3879 #define TIF_SECCOMP 10 /* secure computing */
3880 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3881 #define TIF_NOERROR 12 /* Force successful syscall return */
3882 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3883 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3884 #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
3885 +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
3886 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3887 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3888
3889 /* as above, but as bit values */
3890 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3891 @@ -128,8 +130,11 @@ static inline struct thread_info *current_thread_info(void)
3892 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3893 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3894 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3895 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3896 +
3897 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3898 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3899 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3900 + _TIF_GRSEC_SETXID)
3901
3902 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3903 _TIF_NOTIFY_RESUME)
3904 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3905 index bd0fb84..a42a14b 100644
3906 --- a/arch/powerpc/include/asm/uaccess.h
3907 +++ b/arch/powerpc/include/asm/uaccess.h
3908 @@ -13,6 +13,8 @@
3909 #define VERIFY_READ 0
3910 #define VERIFY_WRITE 1
3911
3912 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3913 +
3914 /*
3915 * The fs value determines whether argument validity checking should be
3916 * performed or not. If get_fs() == USER_DS, checking is performed, with
3917 @@ -327,52 +329,6 @@ do { \
3918 extern unsigned long __copy_tofrom_user(void __user *to,
3919 const void __user *from, unsigned long size);
3920
3921 -#ifndef __powerpc64__
3922 -
3923 -static inline unsigned long copy_from_user(void *to,
3924 - const void __user *from, unsigned long n)
3925 -{
3926 - unsigned long over;
3927 -
3928 - if (access_ok(VERIFY_READ, from, n))
3929 - return __copy_tofrom_user((__force void __user *)to, from, n);
3930 - if ((unsigned long)from < TASK_SIZE) {
3931 - over = (unsigned long)from + n - TASK_SIZE;
3932 - return __copy_tofrom_user((__force void __user *)to, from,
3933 - n - over) + over;
3934 - }
3935 - return n;
3936 -}
3937 -
3938 -static inline unsigned long copy_to_user(void __user *to,
3939 - const void *from, unsigned long n)
3940 -{
3941 - unsigned long over;
3942 -
3943 - if (access_ok(VERIFY_WRITE, to, n))
3944 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3945 - if ((unsigned long)to < TASK_SIZE) {
3946 - over = (unsigned long)to + n - TASK_SIZE;
3947 - return __copy_tofrom_user(to, (__force void __user *)from,
3948 - n - over) + over;
3949 - }
3950 - return n;
3951 -}
3952 -
3953 -#else /* __powerpc64__ */
3954 -
3955 -#define __copy_in_user(to, from, size) \
3956 - __copy_tofrom_user((to), (from), (size))
3957 -
3958 -extern unsigned long copy_from_user(void *to, const void __user *from,
3959 - unsigned long n);
3960 -extern unsigned long copy_to_user(void __user *to, const void *from,
3961 - unsigned long n);
3962 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3963 - unsigned long n);
3964 -
3965 -#endif /* __powerpc64__ */
3966 -
3967 static inline unsigned long __copy_from_user_inatomic(void *to,
3968 const void __user *from, unsigned long n)
3969 {
3970 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3971 if (ret == 0)
3972 return 0;
3973 }
3974 +
3975 + if (!__builtin_constant_p(n))
3976 + check_object_size(to, n, false);
3977 +
3978 return __copy_tofrom_user((__force void __user *)to, from, n);
3979 }
3980
3981 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3982 if (ret == 0)
3983 return 0;
3984 }
3985 +
3986 + if (!__builtin_constant_p(n))
3987 + check_object_size(from, n, true);
3988 +
3989 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3990 }
3991
3992 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3993 return __copy_to_user_inatomic(to, from, size);
3994 }
3995
3996 +#ifndef __powerpc64__
3997 +
3998 +static inline unsigned long __must_check copy_from_user(void *to,
3999 + const void __user *from, unsigned long n)
4000 +{
4001 + unsigned long over;
4002 +
4003 + if ((long)n < 0)
4004 + return n;
4005 +
4006 + if (access_ok(VERIFY_READ, from, n)) {
4007 + if (!__builtin_constant_p(n))
4008 + check_object_size(to, n, false);
4009 + return __copy_tofrom_user((__force void __user *)to, from, n);
4010 + }
4011 + if ((unsigned long)from < TASK_SIZE) {
4012 + over = (unsigned long)from + n - TASK_SIZE;
4013 + if (!__builtin_constant_p(n - over))
4014 + check_object_size(to, n - over, false);
4015 + return __copy_tofrom_user((__force void __user *)to, from,
4016 + n - over) + over;
4017 + }
4018 + return n;
4019 +}
4020 +
4021 +static inline unsigned long __must_check copy_to_user(void __user *to,
4022 + const void *from, unsigned long n)
4023 +{
4024 + unsigned long over;
4025 +
4026 + if ((long)n < 0)
4027 + return n;
4028 +
4029 + if (access_ok(VERIFY_WRITE, to, n)) {
4030 + if (!__builtin_constant_p(n))
4031 + check_object_size(from, n, true);
4032 + return __copy_tofrom_user(to, (__force void __user *)from, n);
4033 + }
4034 + if ((unsigned long)to < TASK_SIZE) {
4035 + over = (unsigned long)to + n - TASK_SIZE;
4036 + if (!__builtin_constant_p(n))
4037 + check_object_size(from, n - over, true);
4038 + return __copy_tofrom_user(to, (__force void __user *)from,
4039 + n - over) + over;
4040 + }
4041 + return n;
4042 +}
4043 +
4044 +#else /* __powerpc64__ */
4045 +
4046 +#define __copy_in_user(to, from, size) \
4047 + __copy_tofrom_user((to), (from), (size))
4048 +
4049 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4050 +{
4051 + if ((long)n < 0 || n > INT_MAX)
4052 + return n;
4053 +
4054 + if (!__builtin_constant_p(n))
4055 + check_object_size(to, n, false);
4056 +
4057 + if (likely(access_ok(VERIFY_READ, from, n)))
4058 + n = __copy_from_user(to, from, n);
4059 + else
4060 + memset(to, 0, n);
4061 + return n;
4062 +}
4063 +
4064 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4065 +{
4066 + if ((long)n < 0 || n > INT_MAX)
4067 + return n;
4068 +
4069 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
4070 + if (!__builtin_constant_p(n))
4071 + check_object_size(from, n, true);
4072 + n = __copy_to_user(to, from, n);
4073 + }
4074 + return n;
4075 +}
4076 +
4077 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
4078 + unsigned long n);
4079 +
4080 +#endif /* __powerpc64__ */
4081 +
4082 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4083
4084 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4085 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4086 index 429983c..7af363b 100644
4087 --- a/arch/powerpc/kernel/exceptions-64e.S
4088 +++ b/arch/powerpc/kernel/exceptions-64e.S
4089 @@ -587,6 +587,7 @@ storage_fault_common:
4090 std r14,_DAR(r1)
4091 std r15,_DSISR(r1)
4092 addi r3,r1,STACK_FRAME_OVERHEAD
4093 + bl .save_nvgprs
4094 mr r4,r14
4095 mr r5,r15
4096 ld r14,PACA_EXGEN+EX_R14(r13)
4097 @@ -596,8 +597,7 @@ storage_fault_common:
4098 cmpdi r3,0
4099 bne- 1f
4100 b .ret_from_except_lite
4101 -1: bl .save_nvgprs
4102 - mr r5,r3
4103 +1: mr r5,r3
4104 addi r3,r1,STACK_FRAME_OVERHEAD
4105 ld r4,_DAR(r1)
4106 bl .bad_page_fault
4107 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4108 index 15c5a4f..22a4000 100644
4109 --- a/arch/powerpc/kernel/exceptions-64s.S
4110 +++ b/arch/powerpc/kernel/exceptions-64s.S
4111 @@ -1004,10 +1004,10 @@ handle_page_fault:
4112 11: ld r4,_DAR(r1)
4113 ld r5,_DSISR(r1)
4114 addi r3,r1,STACK_FRAME_OVERHEAD
4115 + bl .save_nvgprs
4116 bl .do_page_fault
4117 cmpdi r3,0
4118 beq+ 13f
4119 - bl .save_nvgprs
4120 mr r5,r3
4121 addi r3,r1,STACK_FRAME_OVERHEAD
4122 lwz r4,_DAR(r1)
4123 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
4124 index 01e2877..a1ba360 100644
4125 --- a/arch/powerpc/kernel/irq.c
4126 +++ b/arch/powerpc/kernel/irq.c
4127 @@ -560,9 +560,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
4128 host->ops = ops;
4129 host->of_node = of_node_get(of_node);
4130
4131 - if (host->ops->match == NULL)
4132 - host->ops->match = default_irq_host_match;
4133 -
4134 raw_spin_lock_irqsave(&irq_big_lock, flags);
4135
4136 /* If it's a legacy controller, check for duplicates and
4137 @@ -635,7 +632,12 @@ struct irq_host *irq_find_host(struct device_node *node)
4138 */
4139 raw_spin_lock_irqsave(&irq_big_lock, flags);
4140 list_for_each_entry(h, &irq_hosts, link)
4141 - if (h->ops->match(h, node)) {
4142 + if (h->ops->match) {
4143 + if (h->ops->match(h, node)) {
4144 + found = h;
4145 + break;
4146 + }
4147 + } else if (default_irq_host_match(h, node)) {
4148 found = h;
4149 break;
4150 }
4151 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4152 index 0b6d796..d760ddb 100644
4153 --- a/arch/powerpc/kernel/module_32.c
4154 +++ b/arch/powerpc/kernel/module_32.c
4155 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4156 me->arch.core_plt_section = i;
4157 }
4158 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4159 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4160 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4161 return -ENOEXEC;
4162 }
4163
4164 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4165
4166 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4167 /* Init, or core PLT? */
4168 - if (location >= mod->module_core
4169 - && location < mod->module_core + mod->core_size)
4170 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4171 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4172 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4173 - else
4174 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4175 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4176 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4177 + else {
4178 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4179 + return ~0UL;
4180 + }
4181
4182 /* Find this entry, or if that fails, the next avail. entry */
4183 while (entry->jump[0]) {
4184 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4185 index d817ab0..b23b18e 100644
4186 --- a/arch/powerpc/kernel/process.c
4187 +++ b/arch/powerpc/kernel/process.c
4188 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
4189 * Lookup NIP late so we have the best change of getting the
4190 * above info out without failing
4191 */
4192 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4193 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4194 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4195 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4196 #endif
4197 show_stack(current, (unsigned long *) regs->gpr[1]);
4198 if (!user_mode(regs))
4199 @@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4200 newsp = stack[0];
4201 ip = stack[STACK_FRAME_LR_SAVE];
4202 if (!firstframe || ip != lr) {
4203 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4204 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4205 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4206 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4207 - printk(" (%pS)",
4208 + printk(" (%pA)",
4209 (void *)current->ret_stack[curr_frame].ret);
4210 curr_frame--;
4211 }
4212 @@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4213 struct pt_regs *regs = (struct pt_regs *)
4214 (sp + STACK_FRAME_OVERHEAD);
4215 lr = regs->link;
4216 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4217 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4218 regs->trap, (void *)regs->nip, (void *)lr);
4219 firstframe = 1;
4220 }
4221 @@ -1279,58 +1279,3 @@ void thread_info_cache_init(void)
4222 }
4223
4224 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4225 -
4226 -unsigned long arch_align_stack(unsigned long sp)
4227 -{
4228 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4229 - sp -= get_random_int() & ~PAGE_MASK;
4230 - return sp & ~0xf;
4231 -}
4232 -
4233 -static inline unsigned long brk_rnd(void)
4234 -{
4235 - unsigned long rnd = 0;
4236 -
4237 - /* 8MB for 32bit, 1GB for 64bit */
4238 - if (is_32bit_task())
4239 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4240 - else
4241 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4242 -
4243 - return rnd << PAGE_SHIFT;
4244 -}
4245 -
4246 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4247 -{
4248 - unsigned long base = mm->brk;
4249 - unsigned long ret;
4250 -
4251 -#ifdef CONFIG_PPC_STD_MMU_64
4252 - /*
4253 - * If we are using 1TB segments and we are allowed to randomise
4254 - * the heap, we can put it above 1TB so it is backed by a 1TB
4255 - * segment. Otherwise the heap will be in the bottom 1TB
4256 - * which always uses 256MB segments and this may result in a
4257 - * performance penalty.
4258 - */
4259 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4260 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4261 -#endif
4262 -
4263 - ret = PAGE_ALIGN(base + brk_rnd());
4264 -
4265 - if (ret < mm->brk)
4266 - return mm->brk;
4267 -
4268 - return ret;
4269 -}
4270 -
4271 -unsigned long randomize_et_dyn(unsigned long base)
4272 -{
4273 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4274 -
4275 - if (ret < base)
4276 - return base;
4277 -
4278 - return ret;
4279 -}
4280 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4281 index 5b43325..94a5bb4 100644
4282 --- a/arch/powerpc/kernel/ptrace.c
4283 +++ b/arch/powerpc/kernel/ptrace.c
4284 @@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4285 return ret;
4286 }
4287
4288 +#ifdef CONFIG_GRKERNSEC_SETXID
4289 +extern void gr_delayed_cred_worker(void);
4290 +#endif
4291 +
4292 /*
4293 * We must return the syscall number to actually look up in the table.
4294 * This can be -1L to skip running any syscall at all.
4295 @@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4296
4297 secure_computing(regs->gpr[0]);
4298
4299 +#ifdef CONFIG_GRKERNSEC_SETXID
4300 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4301 + gr_delayed_cred_worker();
4302 +#endif
4303 +
4304 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4305 tracehook_report_syscall_entry(regs))
4306 /*
4307 @@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4308 {
4309 int step;
4310
4311 +#ifdef CONFIG_GRKERNSEC_SETXID
4312 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4313 + gr_delayed_cred_worker();
4314 +#endif
4315 +
4316 audit_syscall_exit(regs);
4317
4318 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4319 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4320 index 836a5a1..27289a3 100644
4321 --- a/arch/powerpc/kernel/signal_32.c
4322 +++ b/arch/powerpc/kernel/signal_32.c
4323 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4324 /* Save user registers on the stack */
4325 frame = &rt_sf->uc.uc_mcontext;
4326 addr = frame;
4327 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4328 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4329 if (save_user_regs(regs, frame, 0, 1))
4330 goto badframe;
4331 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4332 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4333 index a50b5ec..547078a 100644
4334 --- a/arch/powerpc/kernel/signal_64.c
4335 +++ b/arch/powerpc/kernel/signal_64.c
4336 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4337 current->thread.fpscr.val = 0;
4338
4339 /* Set up to return from userspace. */
4340 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4341 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4342 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4343 } else {
4344 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4345 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4346 index c091527..5592625 100644
4347 --- a/arch/powerpc/kernel/traps.c
4348 +++ b/arch/powerpc/kernel/traps.c
4349 @@ -131,6 +131,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4350 return flags;
4351 }
4352
4353 +extern void gr_handle_kernel_exploit(void);
4354 +
4355 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4356 int signr)
4357 {
4358 @@ -178,6 +180,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4359 panic("Fatal exception in interrupt");
4360 if (panic_on_oops)
4361 panic("Fatal exception");
4362 +
4363 + gr_handle_kernel_exploit();
4364 +
4365 do_exit(signr);
4366 }
4367
4368 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4369 index 7d14bb6..1305601 100644
4370 --- a/arch/powerpc/kernel/vdso.c
4371 +++ b/arch/powerpc/kernel/vdso.c
4372 @@ -35,6 +35,7 @@
4373 #include <asm/firmware.h>
4374 #include <asm/vdso.h>
4375 #include <asm/vdso_datapage.h>
4376 +#include <asm/mman.h>
4377
4378 #include "setup.h"
4379
4380 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4381 vdso_base = VDSO32_MBASE;
4382 #endif
4383
4384 - current->mm->context.vdso_base = 0;
4385 + current->mm->context.vdso_base = ~0UL;
4386
4387 /* vDSO has a problem and was disabled, just don't "enable" it for the
4388 * process
4389 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4390 vdso_base = get_unmapped_area(NULL, vdso_base,
4391 (vdso_pages << PAGE_SHIFT) +
4392 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4393 - 0, 0);
4394 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4395 if (IS_ERR_VALUE(vdso_base)) {
4396 rc = vdso_base;
4397 goto fail_mmapsem;
4398 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4399 index 5eea6f3..5d10396 100644
4400 --- a/arch/powerpc/lib/usercopy_64.c
4401 +++ b/arch/powerpc/lib/usercopy_64.c
4402 @@ -9,22 +9,6 @@
4403 #include <linux/module.h>
4404 #include <asm/uaccess.h>
4405
4406 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4407 -{
4408 - if (likely(access_ok(VERIFY_READ, from, n)))
4409 - n = __copy_from_user(to, from, n);
4410 - else
4411 - memset(to, 0, n);
4412 - return n;
4413 -}
4414 -
4415 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4416 -{
4417 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4418 - n = __copy_to_user(to, from, n);
4419 - return n;
4420 -}
4421 -
4422 unsigned long copy_in_user(void __user *to, const void __user *from,
4423 unsigned long n)
4424 {
4425 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4426 return n;
4427 }
4428
4429 -EXPORT_SYMBOL(copy_from_user);
4430 -EXPORT_SYMBOL(copy_to_user);
4431 EXPORT_SYMBOL(copy_in_user);
4432
4433 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4434 index 2f0d1b0..36fb5cc 100644
4435 --- a/arch/powerpc/mm/fault.c
4436 +++ b/arch/powerpc/mm/fault.c
4437 @@ -32,6 +32,10 @@
4438 #include <linux/perf_event.h>
4439 #include <linux/magic.h>
4440 #include <linux/ratelimit.h>
4441 +#include <linux/slab.h>
4442 +#include <linux/pagemap.h>
4443 +#include <linux/compiler.h>
4444 +#include <linux/unistd.h>
4445
4446 #include <asm/firmware.h>
4447 #include <asm/page.h>
4448 @@ -43,6 +47,7 @@
4449 #include <asm/tlbflush.h>
4450 #include <asm/siginfo.h>
4451 #include <mm/mmu_decl.h>
4452 +#include <asm/ptrace.h>
4453
4454 #include "icswx.h"
4455
4456 @@ -68,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4457 }
4458 #endif
4459
4460 +#ifdef CONFIG_PAX_PAGEEXEC
4461 +/*
4462 + * PaX: decide what to do with offenders (regs->nip = fault address)
4463 + *
4464 + * returns 1 when task should be killed
4465 + */
4466 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4467 +{
4468 + return 1;
4469 +}
4470 +
4471 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4472 +{
4473 + unsigned long i;
4474 +
4475 + printk(KERN_ERR "PAX: bytes at PC: ");
4476 + for (i = 0; i < 5; i++) {
4477 + unsigned int c;
4478 + if (get_user(c, (unsigned int __user *)pc+i))
4479 + printk(KERN_CONT "???????? ");
4480 + else
4481 + printk(KERN_CONT "%08x ", c);
4482 + }
4483 + printk("\n");
4484 +}
4485 +#endif
4486 +
4487 /*
4488 * Check whether the instruction at regs->nip is a store using
4489 * an update addressing form which will update r1.
4490 @@ -138,7 +170,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4491 * indicate errors in DSISR but can validly be set in SRR1.
4492 */
4493 if (trap == 0x400)
4494 - error_code &= 0x48200000;
4495 + error_code &= 0x58200000;
4496 else
4497 is_write = error_code & DSISR_ISSTORE;
4498 #else
4499 @@ -276,7 +308,7 @@ good_area:
4500 * "undefined". Of those that can be set, this is the only
4501 * one which seems bad.
4502 */
4503 - if (error_code & 0x10000000)
4504 + if (error_code & DSISR_GUARDED)
4505 /* Guarded storage error. */
4506 goto bad_area;
4507 #endif /* CONFIG_8xx */
4508 @@ -291,7 +323,7 @@ good_area:
4509 * processors use the same I/D cache coherency mechanism
4510 * as embedded.
4511 */
4512 - if (error_code & DSISR_PROTFAULT)
4513 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4514 goto bad_area;
4515 #endif /* CONFIG_PPC_STD_MMU */
4516
4517 @@ -360,6 +392,23 @@ bad_area:
4518 bad_area_nosemaphore:
4519 /* User mode accesses cause a SIGSEGV */
4520 if (user_mode(regs)) {
4521 +
4522 +#ifdef CONFIG_PAX_PAGEEXEC
4523 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4524 +#ifdef CONFIG_PPC_STD_MMU
4525 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4526 +#else
4527 + if (is_exec && regs->nip == address) {
4528 +#endif
4529 + switch (pax_handle_fetch_fault(regs)) {
4530 + }
4531 +
4532 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4533 + do_group_exit(SIGKILL);
4534 + }
4535 + }
4536 +#endif
4537 +
4538 _exception(SIGSEGV, regs, code, address);
4539 return 0;
4540 }
4541 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4542 index 67a42ed..1c7210c 100644
4543 --- a/arch/powerpc/mm/mmap_64.c
4544 +++ b/arch/powerpc/mm/mmap_64.c
4545 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4546 */
4547 if (mmap_is_legacy()) {
4548 mm->mmap_base = TASK_UNMAPPED_BASE;
4549 +
4550 +#ifdef CONFIG_PAX_RANDMMAP
4551 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4552 + mm->mmap_base += mm->delta_mmap;
4553 +#endif
4554 +
4555 mm->get_unmapped_area = arch_get_unmapped_area;
4556 mm->unmap_area = arch_unmap_area;
4557 } else {
4558 mm->mmap_base = mmap_base();
4559 +
4560 +#ifdef CONFIG_PAX_RANDMMAP
4561 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4562 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4563 +#endif
4564 +
4565 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4566 mm->unmap_area = arch_unmap_area_topdown;
4567 }
4568 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4569 index 73709f7..6b90313 100644
4570 --- a/arch/powerpc/mm/slice.c
4571 +++ b/arch/powerpc/mm/slice.c
4572 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4573 if ((mm->task_size - len) < addr)
4574 return 0;
4575 vma = find_vma(mm, addr);
4576 - return (!vma || (addr + len) <= vma->vm_start);
4577 + return check_heap_stack_gap(vma, addr, len);
4578 }
4579
4580 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4581 @@ -256,7 +256,7 @@ full_search:
4582 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4583 continue;
4584 }
4585 - if (!vma || addr + len <= vma->vm_start) {
4586 + if (check_heap_stack_gap(vma, addr, len)) {
4587 /*
4588 * Remember the place where we stopped the search:
4589 */
4590 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4591 }
4592 }
4593
4594 - addr = mm->mmap_base;
4595 - while (addr > len) {
4596 + if (mm->mmap_base < len)
4597 + addr = -ENOMEM;
4598 + else
4599 + addr = mm->mmap_base - len;
4600 +
4601 + while (!IS_ERR_VALUE(addr)) {
4602 /* Go down by chunk size */
4603 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4604 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4605
4606 /* Check for hit with different page size */
4607 mask = slice_range_to_mask(addr, len);
4608 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4609 * return with success:
4610 */
4611 vma = find_vma(mm, addr);
4612 - if (!vma || (addr + len) <= vma->vm_start) {
4613 + if (check_heap_stack_gap(vma, addr, len)) {
4614 /* remember the address as a hint for next time */
4615 if (use_cache)
4616 mm->free_area_cache = addr;
4617 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4618 mm->cached_hole_size = vma->vm_start - addr;
4619
4620 /* try just below the current vma->vm_start */
4621 - addr = vma->vm_start;
4622 + addr = skip_heap_stack_gap(vma, len);
4623 }
4624
4625 /*
4626 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4627 if (fixed && addr > (mm->task_size - len))
4628 return -EINVAL;
4629
4630 +#ifdef CONFIG_PAX_RANDMMAP
4631 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4632 + addr = 0;
4633 +#endif
4634 +
4635 /* If hint, make sure it matches our alignment restrictions */
4636 if (!fixed && addr) {
4637 addr = _ALIGN_UP(addr, 1ul << pshift);
4638 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4639 index 8517d2a..d2738d4 100644
4640 --- a/arch/s390/include/asm/atomic.h
4641 +++ b/arch/s390/include/asm/atomic.h
4642 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4643 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4644 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4645
4646 +#define atomic64_read_unchecked(v) atomic64_read(v)
4647 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4648 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4649 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4650 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4651 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4652 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4653 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4654 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4655 +
4656 #define smp_mb__before_atomic_dec() smp_mb()
4657 #define smp_mb__after_atomic_dec() smp_mb()
4658 #define smp_mb__before_atomic_inc() smp_mb()
4659 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4660 index 2a30d5a..5e5586f 100644
4661 --- a/arch/s390/include/asm/cache.h
4662 +++ b/arch/s390/include/asm/cache.h
4663 @@ -11,8 +11,10 @@
4664 #ifndef __ARCH_S390_CACHE_H
4665 #define __ARCH_S390_CACHE_H
4666
4667 -#define L1_CACHE_BYTES 256
4668 +#include <linux/const.h>
4669 +
4670 #define L1_CACHE_SHIFT 8
4671 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4672 #define NET_SKB_PAD 32
4673
4674 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4675 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4676 index 547f1a6..0b22b53 100644
4677 --- a/arch/s390/include/asm/elf.h
4678 +++ b/arch/s390/include/asm/elf.h
4679 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4680 the loader. We need to make sure that it is out of the way of the program
4681 that it will "exec", and that there is sufficient room for the brk. */
4682
4683 -extern unsigned long randomize_et_dyn(unsigned long base);
4684 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4685 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4686 +
4687 +#ifdef CONFIG_PAX_ASLR
4688 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4689 +
4690 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4691 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4692 +#endif
4693
4694 /* This yields a mask that user programs can use to figure out what
4695 instruction set this CPU supports. */
4696 @@ -211,7 +217,4 @@ struct linux_binprm;
4697 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4698 int arch_setup_additional_pages(struct linux_binprm *, int);
4699
4700 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4701 -#define arch_randomize_brk arch_randomize_brk
4702 -
4703 #endif
4704 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4705 index d73cc6b..1a296ad 100644
4706 --- a/arch/s390/include/asm/system.h
4707 +++ b/arch/s390/include/asm/system.h
4708 @@ -260,7 +260,7 @@ extern void (*_machine_restart)(char *command);
4709 extern void (*_machine_halt)(void);
4710 extern void (*_machine_power_off)(void);
4711
4712 -extern unsigned long arch_align_stack(unsigned long sp);
4713 +#define arch_align_stack(x) ((x) & ~0xfUL)
4714
4715 static inline int tprot(unsigned long addr)
4716 {
4717 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4718 index 2b23885..e136e31 100644
4719 --- a/arch/s390/include/asm/uaccess.h
4720 +++ b/arch/s390/include/asm/uaccess.h
4721 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
4722 copy_to_user(void __user *to, const void *from, unsigned long n)
4723 {
4724 might_fault();
4725 +
4726 + if ((long)n < 0)
4727 + return n;
4728 +
4729 if (access_ok(VERIFY_WRITE, to, n))
4730 n = __copy_to_user(to, from, n);
4731 return n;
4732 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4733 static inline unsigned long __must_check
4734 __copy_from_user(void *to, const void __user *from, unsigned long n)
4735 {
4736 + if ((long)n < 0)
4737 + return n;
4738 +
4739 if (__builtin_constant_p(n) && (n <= 256))
4740 return uaccess.copy_from_user_small(n, from, to);
4741 else
4742 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4743 unsigned int sz = __compiletime_object_size(to);
4744
4745 might_fault();
4746 +
4747 + if ((long)n < 0)
4748 + return n;
4749 +
4750 if (unlikely(sz != -1 && sz < n)) {
4751 copy_from_user_overflow();
4752 return n;
4753 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4754 index dfcb343..eda788a 100644
4755 --- a/arch/s390/kernel/module.c
4756 +++ b/arch/s390/kernel/module.c
4757 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4758
4759 /* Increase core size by size of got & plt and set start
4760 offsets for got and plt. */
4761 - me->core_size = ALIGN(me->core_size, 4);
4762 - me->arch.got_offset = me->core_size;
4763 - me->core_size += me->arch.got_size;
4764 - me->arch.plt_offset = me->core_size;
4765 - me->core_size += me->arch.plt_size;
4766 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4767 + me->arch.got_offset = me->core_size_rw;
4768 + me->core_size_rw += me->arch.got_size;
4769 + me->arch.plt_offset = me->core_size_rx;
4770 + me->core_size_rx += me->arch.plt_size;
4771 return 0;
4772 }
4773
4774 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4775 if (info->got_initialized == 0) {
4776 Elf_Addr *gotent;
4777
4778 - gotent = me->module_core + me->arch.got_offset +
4779 + gotent = me->module_core_rw + me->arch.got_offset +
4780 info->got_offset;
4781 *gotent = val;
4782 info->got_initialized = 1;
4783 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4784 else if (r_type == R_390_GOTENT ||
4785 r_type == R_390_GOTPLTENT)
4786 *(unsigned int *) loc =
4787 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4788 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4789 else if (r_type == R_390_GOT64 ||
4790 r_type == R_390_GOTPLT64)
4791 *(unsigned long *) loc = val;
4792 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4793 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4794 if (info->plt_initialized == 0) {
4795 unsigned int *ip;
4796 - ip = me->module_core + me->arch.plt_offset +
4797 + ip = me->module_core_rx + me->arch.plt_offset +
4798 info->plt_offset;
4799 #ifndef CONFIG_64BIT
4800 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4801 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4802 val - loc + 0xffffUL < 0x1ffffeUL) ||
4803 (r_type == R_390_PLT32DBL &&
4804 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4805 - val = (Elf_Addr) me->module_core +
4806 + val = (Elf_Addr) me->module_core_rx +
4807 me->arch.plt_offset +
4808 info->plt_offset;
4809 val += rela->r_addend - loc;
4810 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4811 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4812 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4813 val = val + rela->r_addend -
4814 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4815 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4816 if (r_type == R_390_GOTOFF16)
4817 *(unsigned short *) loc = val;
4818 else if (r_type == R_390_GOTOFF32)
4819 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4820 break;
4821 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4822 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4823 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4824 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4825 rela->r_addend - loc;
4826 if (r_type == R_390_GOTPC)
4827 *(unsigned int *) loc = val;
4828 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4829 index e795933..b32563c 100644
4830 --- a/arch/s390/kernel/process.c
4831 +++ b/arch/s390/kernel/process.c
4832 @@ -323,39 +323,3 @@ unsigned long get_wchan(struct task_struct *p)
4833 }
4834 return 0;
4835 }
4836 -
4837 -unsigned long arch_align_stack(unsigned long sp)
4838 -{
4839 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4840 - sp -= get_random_int() & ~PAGE_MASK;
4841 - return sp & ~0xf;
4842 -}
4843 -
4844 -static inline unsigned long brk_rnd(void)
4845 -{
4846 - /* 8MB for 32bit, 1GB for 64bit */
4847 - if (is_32bit_task())
4848 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4849 - else
4850 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4851 -}
4852 -
4853 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4854 -{
4855 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4856 -
4857 - if (ret < mm->brk)
4858 - return mm->brk;
4859 - return ret;
4860 -}
4861 -
4862 -unsigned long randomize_et_dyn(unsigned long base)
4863 -{
4864 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4865 -
4866 - if (!(current->flags & PF_RANDOMIZE))
4867 - return base;
4868 - if (ret < base)
4869 - return base;
4870 - return ret;
4871 -}
4872 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4873 index a0155c0..34cc491 100644
4874 --- a/arch/s390/mm/mmap.c
4875 +++ b/arch/s390/mm/mmap.c
4876 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4877 */
4878 if (mmap_is_legacy()) {
4879 mm->mmap_base = TASK_UNMAPPED_BASE;
4880 +
4881 +#ifdef CONFIG_PAX_RANDMMAP
4882 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4883 + mm->mmap_base += mm->delta_mmap;
4884 +#endif
4885 +
4886 mm->get_unmapped_area = arch_get_unmapped_area;
4887 mm->unmap_area = arch_unmap_area;
4888 } else {
4889 mm->mmap_base = mmap_base();
4890 +
4891 +#ifdef CONFIG_PAX_RANDMMAP
4892 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4893 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4894 +#endif
4895 +
4896 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4897 mm->unmap_area = arch_unmap_area_topdown;
4898 }
4899 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4900 */
4901 if (mmap_is_legacy()) {
4902 mm->mmap_base = TASK_UNMAPPED_BASE;
4903 +
4904 +#ifdef CONFIG_PAX_RANDMMAP
4905 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4906 + mm->mmap_base += mm->delta_mmap;
4907 +#endif
4908 +
4909 mm->get_unmapped_area = s390_get_unmapped_area;
4910 mm->unmap_area = arch_unmap_area;
4911 } else {
4912 mm->mmap_base = mmap_base();
4913 +
4914 +#ifdef CONFIG_PAX_RANDMMAP
4915 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4916 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4917 +#endif
4918 +
4919 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4920 mm->unmap_area = arch_unmap_area_topdown;
4921 }
4922 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4923 index ae3d59f..f65f075 100644
4924 --- a/arch/score/include/asm/cache.h
4925 +++ b/arch/score/include/asm/cache.h
4926 @@ -1,7 +1,9 @@
4927 #ifndef _ASM_SCORE_CACHE_H
4928 #define _ASM_SCORE_CACHE_H
4929
4930 +#include <linux/const.h>
4931 +
4932 #define L1_CACHE_SHIFT 4
4933 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4934 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4935
4936 #endif /* _ASM_SCORE_CACHE_H */
4937 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4938 index 589d5c7..669e274 100644
4939 --- a/arch/score/include/asm/system.h
4940 +++ b/arch/score/include/asm/system.h
4941 @@ -17,7 +17,7 @@ do { \
4942 #define finish_arch_switch(prev) do {} while (0)
4943
4944 typedef void (*vi_handler_t)(void);
4945 -extern unsigned long arch_align_stack(unsigned long sp);
4946 +#define arch_align_stack(x) (x)
4947
4948 #define mb() barrier()
4949 #define rmb() barrier()
4950 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4951 index 25d0803..d6c8e36 100644
4952 --- a/arch/score/kernel/process.c
4953 +++ b/arch/score/kernel/process.c
4954 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4955
4956 return task_pt_regs(task)->cp0_epc;
4957 }
4958 -
4959 -unsigned long arch_align_stack(unsigned long sp)
4960 -{
4961 - return sp;
4962 -}
4963 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4964 index ef9e555..331bd29 100644
4965 --- a/arch/sh/include/asm/cache.h
4966 +++ b/arch/sh/include/asm/cache.h
4967 @@ -9,10 +9,11 @@
4968 #define __ASM_SH_CACHE_H
4969 #ifdef __KERNEL__
4970
4971 +#include <linux/const.h>
4972 #include <linux/init.h>
4973 #include <cpu/cache.h>
4974
4975 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4976 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4977
4978 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4979
4980 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4981 index afeb710..d1d1289 100644
4982 --- a/arch/sh/mm/mmap.c
4983 +++ b/arch/sh/mm/mmap.c
4984 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4985 addr = PAGE_ALIGN(addr);
4986
4987 vma = find_vma(mm, addr);
4988 - if (TASK_SIZE - len >= addr &&
4989 - (!vma || addr + len <= vma->vm_start))
4990 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4991 return addr;
4992 }
4993
4994 @@ -106,7 +105,7 @@ full_search:
4995 }
4996 return -ENOMEM;
4997 }
4998 - if (likely(!vma || addr + len <= vma->vm_start)) {
4999 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5000 /*
5001 * Remember the place where we stopped the search:
5002 */
5003 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5004 addr = PAGE_ALIGN(addr);
5005
5006 vma = find_vma(mm, addr);
5007 - if (TASK_SIZE - len >= addr &&
5008 - (!vma || addr + len <= vma->vm_start))
5009 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5010 return addr;
5011 }
5012
5013 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5014 /* make sure it can fit in the remaining address space */
5015 if (likely(addr > len)) {
5016 vma = find_vma(mm, addr-len);
5017 - if (!vma || addr <= vma->vm_start) {
5018 + if (check_heap_stack_gap(vma, addr - len, len)) {
5019 /* remember the address as a hint for next time */
5020 return (mm->free_area_cache = addr-len);
5021 }
5022 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5023 if (unlikely(mm->mmap_base < len))
5024 goto bottomup;
5025
5026 - addr = mm->mmap_base-len;
5027 - if (do_colour_align)
5028 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5029 + addr = mm->mmap_base - len;
5030
5031 do {
5032 + if (do_colour_align)
5033 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5034 /*
5035 * Lookup failure means no vma is above this address,
5036 * else if new region fits below vma->vm_start,
5037 * return with success:
5038 */
5039 vma = find_vma(mm, addr);
5040 - if (likely(!vma || addr+len <= vma->vm_start)) {
5041 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5042 /* remember the address as a hint for next time */
5043 return (mm->free_area_cache = addr);
5044 }
5045 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5046 mm->cached_hole_size = vma->vm_start - addr;
5047
5048 /* try just below the current vma->vm_start */
5049 - addr = vma->vm_start-len;
5050 - if (do_colour_align)
5051 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5052 - } while (likely(len < vma->vm_start));
5053 + addr = skip_heap_stack_gap(vma, len);
5054 + } while (!IS_ERR_VALUE(addr));
5055
5056 bottomup:
5057 /*
5058 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5059 index eddcfb3..b117d90 100644
5060 --- a/arch/sparc/Makefile
5061 +++ b/arch/sparc/Makefile
5062 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5063 # Export what is needed by arch/sparc/boot/Makefile
5064 export VMLINUX_INIT VMLINUX_MAIN
5065 VMLINUX_INIT := $(head-y) $(init-y)
5066 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5067 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5068 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5069 VMLINUX_MAIN += $(drivers-y) $(net-y)
5070
5071 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5072 index 9f421df..b81fc12 100644
5073 --- a/arch/sparc/include/asm/atomic_64.h
5074 +++ b/arch/sparc/include/asm/atomic_64.h
5075 @@ -14,18 +14,40 @@
5076 #define ATOMIC64_INIT(i) { (i) }
5077
5078 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5079 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5080 +{
5081 + return v->counter;
5082 +}
5083 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5084 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5085 +{
5086 + return v->counter;
5087 +}
5088
5089 #define atomic_set(v, i) (((v)->counter) = i)
5090 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5091 +{
5092 + v->counter = i;
5093 +}
5094 #define atomic64_set(v, i) (((v)->counter) = i)
5095 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5096 +{
5097 + v->counter = i;
5098 +}
5099
5100 extern void atomic_add(int, atomic_t *);
5101 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5102 extern void atomic64_add(long, atomic64_t *);
5103 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5104 extern void atomic_sub(int, atomic_t *);
5105 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5106 extern void atomic64_sub(long, atomic64_t *);
5107 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5108
5109 extern int atomic_add_ret(int, atomic_t *);
5110 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5111 extern long atomic64_add_ret(long, atomic64_t *);
5112 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5113 extern int atomic_sub_ret(int, atomic_t *);
5114 extern long atomic64_sub_ret(long, atomic64_t *);
5115
5116 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5117 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5118
5119 #define atomic_inc_return(v) atomic_add_ret(1, v)
5120 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5121 +{
5122 + return atomic_add_ret_unchecked(1, v);
5123 +}
5124 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5125 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5126 +{
5127 + return atomic64_add_ret_unchecked(1, v);
5128 +}
5129
5130 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5131 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5132
5133 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5134 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5135 +{
5136 + return atomic_add_ret_unchecked(i, v);
5137 +}
5138 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5139 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5140 +{
5141 + return atomic64_add_ret_unchecked(i, v);
5142 +}
5143
5144 /*
5145 * atomic_inc_and_test - increment and test
5146 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5147 * other cases.
5148 */
5149 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5150 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5151 +{
5152 + return atomic_inc_return_unchecked(v) == 0;
5153 +}
5154 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5155
5156 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5157 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5158 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5159
5160 #define atomic_inc(v) atomic_add(1, v)
5161 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5162 +{
5163 + atomic_add_unchecked(1, v);
5164 +}
5165 #define atomic64_inc(v) atomic64_add(1, v)
5166 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5167 +{
5168 + atomic64_add_unchecked(1, v);
5169 +}
5170
5171 #define atomic_dec(v) atomic_sub(1, v)
5172 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5173 +{
5174 + atomic_sub_unchecked(1, v);
5175 +}
5176 #define atomic64_dec(v) atomic64_sub(1, v)
5177 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5178 +{
5179 + atomic64_sub_unchecked(1, v);
5180 +}
5181
5182 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5183 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5184
5185 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5186 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5187 +{
5188 + return cmpxchg(&v->counter, old, new);
5189 +}
5190 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5191 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5192 +{
5193 + return xchg(&v->counter, new);
5194 +}
5195
5196 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5197 {
5198 - int c, old;
5199 + int c, old, new;
5200 c = atomic_read(v);
5201 for (;;) {
5202 - if (unlikely(c == (u)))
5203 + if (unlikely(c == u))
5204 break;
5205 - old = atomic_cmpxchg((v), c, c + (a));
5206 +
5207 + asm volatile("addcc %2, %0, %0\n"
5208 +
5209 +#ifdef CONFIG_PAX_REFCOUNT
5210 + "tvs %%icc, 6\n"
5211 +#endif
5212 +
5213 + : "=r" (new)
5214 + : "0" (c), "ir" (a)
5215 + : "cc");
5216 +
5217 + old = atomic_cmpxchg(v, c, new);
5218 if (likely(old == c))
5219 break;
5220 c = old;
5221 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5222 #define atomic64_cmpxchg(v, o, n) \
5223 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5224 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5225 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5226 +{
5227 + return xchg(&v->counter, new);
5228 +}
5229
5230 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5231 {
5232 - long c, old;
5233 + long c, old, new;
5234 c = atomic64_read(v);
5235 for (;;) {
5236 - if (unlikely(c == (u)))
5237 + if (unlikely(c == u))
5238 break;
5239 - old = atomic64_cmpxchg((v), c, c + (a));
5240 +
5241 + asm volatile("addcc %2, %0, %0\n"
5242 +
5243 +#ifdef CONFIG_PAX_REFCOUNT
5244 + "tvs %%xcc, 6\n"
5245 +#endif
5246 +
5247 + : "=r" (new)
5248 + : "0" (c), "ir" (a)
5249 + : "cc");
5250 +
5251 + old = atomic64_cmpxchg(v, c, new);
5252 if (likely(old == c))
5253 break;
5254 c = old;
5255 }
5256 - return c != (u);
5257 + return c != u;
5258 }
5259
5260 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5261 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5262 index 69358b5..9d0d492 100644
5263 --- a/arch/sparc/include/asm/cache.h
5264 +++ b/arch/sparc/include/asm/cache.h
5265 @@ -7,10 +7,12 @@
5266 #ifndef _SPARC_CACHE_H
5267 #define _SPARC_CACHE_H
5268
5269 +#include <linux/const.h>
5270 +
5271 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5272
5273 #define L1_CACHE_SHIFT 5
5274 -#define L1_CACHE_BYTES 32
5275 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5276
5277 #ifdef CONFIG_SPARC32
5278 #define SMP_CACHE_BYTES_SHIFT 5
5279 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5280 index 4269ca6..e3da77f 100644
5281 --- a/arch/sparc/include/asm/elf_32.h
5282 +++ b/arch/sparc/include/asm/elf_32.h
5283 @@ -114,6 +114,13 @@ typedef struct {
5284
5285 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5286
5287 +#ifdef CONFIG_PAX_ASLR
5288 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5289 +
5290 +#define PAX_DELTA_MMAP_LEN 16
5291 +#define PAX_DELTA_STACK_LEN 16
5292 +#endif
5293 +
5294 /* This yields a mask that user programs can use to figure out what
5295 instruction set this cpu supports. This can NOT be done in userspace
5296 on Sparc. */
5297 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5298 index 7df8b7f..4946269 100644
5299 --- a/arch/sparc/include/asm/elf_64.h
5300 +++ b/arch/sparc/include/asm/elf_64.h
5301 @@ -180,6 +180,13 @@ typedef struct {
5302 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5303 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5304
5305 +#ifdef CONFIG_PAX_ASLR
5306 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5307 +
5308 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5309 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5310 +#endif
5311 +
5312 extern unsigned long sparc64_elf_hwcap;
5313 #define ELF_HWCAP sparc64_elf_hwcap
5314
5315 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5316 index ca2b344..c6084f89 100644
5317 --- a/arch/sparc/include/asm/pgalloc_32.h
5318 +++ b/arch/sparc/include/asm/pgalloc_32.h
5319 @@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5320 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5321 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5322 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5323 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5324
5325 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5326 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5327 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5328 index 40b2d7a..22a665b 100644
5329 --- a/arch/sparc/include/asm/pgalloc_64.h
5330 +++ b/arch/sparc/include/asm/pgalloc_64.h
5331 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5332 }
5333
5334 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5335 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5336
5337 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5338 {
5339 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5340 index a790cc6..091ed94 100644
5341 --- a/arch/sparc/include/asm/pgtable_32.h
5342 +++ b/arch/sparc/include/asm/pgtable_32.h
5343 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5344 BTFIXUPDEF_INT(page_none)
5345 BTFIXUPDEF_INT(page_copy)
5346 BTFIXUPDEF_INT(page_readonly)
5347 +
5348 +#ifdef CONFIG_PAX_PAGEEXEC
5349 +BTFIXUPDEF_INT(page_shared_noexec)
5350 +BTFIXUPDEF_INT(page_copy_noexec)
5351 +BTFIXUPDEF_INT(page_readonly_noexec)
5352 +#endif
5353 +
5354 BTFIXUPDEF_INT(page_kernel)
5355
5356 #define PMD_SHIFT SUN4C_PMD_SHIFT
5357 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5358 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5359 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5360
5361 +#ifdef CONFIG_PAX_PAGEEXEC
5362 +extern pgprot_t PAGE_SHARED_NOEXEC;
5363 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5364 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5365 +#else
5366 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5367 +# define PAGE_COPY_NOEXEC PAGE_COPY
5368 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5369 +#endif
5370 +
5371 extern unsigned long page_kernel;
5372
5373 #ifdef MODULE
5374 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5375 index f6ae2b2..b03ffc7 100644
5376 --- a/arch/sparc/include/asm/pgtsrmmu.h
5377 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5378 @@ -115,6 +115,13 @@
5379 SRMMU_EXEC | SRMMU_REF)
5380 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5381 SRMMU_EXEC | SRMMU_REF)
5382 +
5383 +#ifdef CONFIG_PAX_PAGEEXEC
5384 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5385 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5386 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5387 +#endif
5388 +
5389 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5390 SRMMU_DIRTY | SRMMU_REF)
5391
5392 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5393 index 9689176..63c18ea 100644
5394 --- a/arch/sparc/include/asm/spinlock_64.h
5395 +++ b/arch/sparc/include/asm/spinlock_64.h
5396 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5397
5398 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5399
5400 -static void inline arch_read_lock(arch_rwlock_t *lock)
5401 +static inline void arch_read_lock(arch_rwlock_t *lock)
5402 {
5403 unsigned long tmp1, tmp2;
5404
5405 __asm__ __volatile__ (
5406 "1: ldsw [%2], %0\n"
5407 " brlz,pn %0, 2f\n"
5408 -"4: add %0, 1, %1\n"
5409 +"4: addcc %0, 1, %1\n"
5410 +
5411 +#ifdef CONFIG_PAX_REFCOUNT
5412 +" tvs %%icc, 6\n"
5413 +#endif
5414 +
5415 " cas [%2], %0, %1\n"
5416 " cmp %0, %1\n"
5417 " bne,pn %%icc, 1b\n"
5418 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5419 " .previous"
5420 : "=&r" (tmp1), "=&r" (tmp2)
5421 : "r" (lock)
5422 - : "memory");
5423 + : "memory", "cc");
5424 }
5425
5426 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5427 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5428 {
5429 int tmp1, tmp2;
5430
5431 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5432 "1: ldsw [%2], %0\n"
5433 " brlz,a,pn %0, 2f\n"
5434 " mov 0, %0\n"
5435 -" add %0, 1, %1\n"
5436 +" addcc %0, 1, %1\n"
5437 +
5438 +#ifdef CONFIG_PAX_REFCOUNT
5439 +" tvs %%icc, 6\n"
5440 +#endif
5441 +
5442 " cas [%2], %0, %1\n"
5443 " cmp %0, %1\n"
5444 " bne,pn %%icc, 1b\n"
5445 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5446 return tmp1;
5447 }
5448
5449 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5450 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5451 {
5452 unsigned long tmp1, tmp2;
5453
5454 __asm__ __volatile__(
5455 "1: lduw [%2], %0\n"
5456 -" sub %0, 1, %1\n"
5457 +" subcc %0, 1, %1\n"
5458 +
5459 +#ifdef CONFIG_PAX_REFCOUNT
5460 +" tvs %%icc, 6\n"
5461 +#endif
5462 +
5463 " cas [%2], %0, %1\n"
5464 " cmp %0, %1\n"
5465 " bne,pn %%xcc, 1b\n"
5466 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5467 : "memory");
5468 }
5469
5470 -static void inline arch_write_lock(arch_rwlock_t *lock)
5471 +static inline void arch_write_lock(arch_rwlock_t *lock)
5472 {
5473 unsigned long mask, tmp1, tmp2;
5474
5475 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5476 : "memory");
5477 }
5478
5479 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5480 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5481 {
5482 __asm__ __volatile__(
5483 " stw %%g0, [%0]"
5484 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5485 : "memory");
5486 }
5487
5488 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5489 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5490 {
5491 unsigned long mask, tmp1, tmp2, result;
5492
5493 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5494 index c2a1080..21ed218 100644
5495 --- a/arch/sparc/include/asm/thread_info_32.h
5496 +++ b/arch/sparc/include/asm/thread_info_32.h
5497 @@ -50,6 +50,8 @@ struct thread_info {
5498 unsigned long w_saved;
5499
5500 struct restart_block restart_block;
5501 +
5502 + unsigned long lowest_stack;
5503 };
5504
5505 /*
5506 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5507 index 01d057f..13a7d2f 100644
5508 --- a/arch/sparc/include/asm/thread_info_64.h
5509 +++ b/arch/sparc/include/asm/thread_info_64.h
5510 @@ -63,6 +63,8 @@ struct thread_info {
5511 struct pt_regs *kern_una_regs;
5512 unsigned int kern_una_insn;
5513
5514 + unsigned long lowest_stack;
5515 +
5516 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5517 };
5518
5519 @@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5520 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5521 /* flag bit 6 is available */
5522 #define TIF_32BIT 7 /* 32-bit binary */
5523 -/* flag bit 8 is available */
5524 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5525 #define TIF_SECCOMP 9 /* secure computing */
5526 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5527 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5528 +
5529 /* NOTE: Thread flags >= 12 should be ones we have no interest
5530 * in using in assembly, else we can't use the mask as
5531 * an immediate value in instructions such as andcc.
5532 @@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5533 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5534 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5535 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5536 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5537
5538 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5539 _TIF_DO_NOTIFY_RESUME_MASK | \
5540 _TIF_NEED_RESCHED)
5541 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5542
5543 +#define _TIF_WORK_SYSCALL \
5544 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5545 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5546 +
5547 +
5548 /*
5549 * Thread-synchronous status.
5550 *
5551 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5552 index e88fbe5..96b0ce5 100644
5553 --- a/arch/sparc/include/asm/uaccess.h
5554 +++ b/arch/sparc/include/asm/uaccess.h
5555 @@ -1,5 +1,13 @@
5556 #ifndef ___ASM_SPARC_UACCESS_H
5557 #define ___ASM_SPARC_UACCESS_H
5558 +
5559 +#ifdef __KERNEL__
5560 +#ifndef __ASSEMBLY__
5561 +#include <linux/types.h>
5562 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5563 +#endif
5564 +#endif
5565 +
5566 #if defined(__sparc__) && defined(__arch64__)
5567 #include <asm/uaccess_64.h>
5568 #else
5569 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5570 index 8303ac4..07f333d 100644
5571 --- a/arch/sparc/include/asm/uaccess_32.h
5572 +++ b/arch/sparc/include/asm/uaccess_32.h
5573 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5574
5575 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5576 {
5577 - if (n && __access_ok((unsigned long) to, n))
5578 + if ((long)n < 0)
5579 + return n;
5580 +
5581 + if (n && __access_ok((unsigned long) to, n)) {
5582 + if (!__builtin_constant_p(n))
5583 + check_object_size(from, n, true);
5584 return __copy_user(to, (__force void __user *) from, n);
5585 - else
5586 + } else
5587 return n;
5588 }
5589
5590 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5591 {
5592 + if ((long)n < 0)
5593 + return n;
5594 +
5595 + if (!__builtin_constant_p(n))
5596 + check_object_size(from, n, true);
5597 +
5598 return __copy_user(to, (__force void __user *) from, n);
5599 }
5600
5601 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5602 {
5603 - if (n && __access_ok((unsigned long) from, n))
5604 + if ((long)n < 0)
5605 + return n;
5606 +
5607 + if (n && __access_ok((unsigned long) from, n)) {
5608 + if (!__builtin_constant_p(n))
5609 + check_object_size(to, n, false);
5610 return __copy_user((__force void __user *) to, from, n);
5611 - else
5612 + } else
5613 return n;
5614 }
5615
5616 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5617 {
5618 + if ((long)n < 0)
5619 + return n;
5620 +
5621 return __copy_user((__force void __user *) to, from, n);
5622 }
5623
5624 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5625 index 3e1449f..5293a0e 100644
5626 --- a/arch/sparc/include/asm/uaccess_64.h
5627 +++ b/arch/sparc/include/asm/uaccess_64.h
5628 @@ -10,6 +10,7 @@
5629 #include <linux/compiler.h>
5630 #include <linux/string.h>
5631 #include <linux/thread_info.h>
5632 +#include <linux/kernel.h>
5633 #include <asm/asi.h>
5634 #include <asm/system.h>
5635 #include <asm/spitfire.h>
5636 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5637 static inline unsigned long __must_check
5638 copy_from_user(void *to, const void __user *from, unsigned long size)
5639 {
5640 - unsigned long ret = ___copy_from_user(to, from, size);
5641 + unsigned long ret;
5642
5643 + if ((long)size < 0 || size > INT_MAX)
5644 + return size;
5645 +
5646 + if (!__builtin_constant_p(size))
5647 + check_object_size(to, size, false);
5648 +
5649 + ret = ___copy_from_user(to, from, size);
5650 if (unlikely(ret))
5651 ret = copy_from_user_fixup(to, from, size);
5652
5653 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5654 static inline unsigned long __must_check
5655 copy_to_user(void __user *to, const void *from, unsigned long size)
5656 {
5657 - unsigned long ret = ___copy_to_user(to, from, size);
5658 + unsigned long ret;
5659
5660 + if ((long)size < 0 || size > INT_MAX)
5661 + return size;
5662 +
5663 + if (!__builtin_constant_p(size))
5664 + check_object_size(from, size, true);
5665 +
5666 + ret = ___copy_to_user(to, from, size);
5667 if (unlikely(ret))
5668 ret = copy_to_user_fixup(to, from, size);
5669 return ret;
5670 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5671 index cb85458..e063f17 100644
5672 --- a/arch/sparc/kernel/Makefile
5673 +++ b/arch/sparc/kernel/Makefile
5674 @@ -3,7 +3,7 @@
5675 #
5676
5677 asflags-y := -ansi
5678 -ccflags-y := -Werror
5679 +#ccflags-y := -Werror
5680
5681 extra-y := head_$(BITS).o
5682 extra-y += init_task.o
5683 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5684 index f793742..4d880af 100644
5685 --- a/arch/sparc/kernel/process_32.c
5686 +++ b/arch/sparc/kernel/process_32.c
5687 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5688 rw->ins[4], rw->ins[5],
5689 rw->ins[6],
5690 rw->ins[7]);
5691 - printk("%pS\n", (void *) rw->ins[7]);
5692 + printk("%pA\n", (void *) rw->ins[7]);
5693 rw = (struct reg_window32 *) rw->ins[6];
5694 }
5695 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5696 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5697
5698 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5699 r->psr, r->pc, r->npc, r->y, print_tainted());
5700 - printk("PC: <%pS>\n", (void *) r->pc);
5701 + printk("PC: <%pA>\n", (void *) r->pc);
5702 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5703 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5704 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5705 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5706 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5707 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5708 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5709 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5710
5711 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5712 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5713 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5714 rw = (struct reg_window32 *) fp;
5715 pc = rw->ins[7];
5716 printk("[%08lx : ", pc);
5717 - printk("%pS ] ", (void *) pc);
5718 + printk("%pA ] ", (void *) pc);
5719 fp = rw->ins[6];
5720 } while (++count < 16);
5721 printk("\n");
5722 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5723 index 39d8b05..d1a7d90 100644
5724 --- a/arch/sparc/kernel/process_64.c
5725 +++ b/arch/sparc/kernel/process_64.c
5726 @@ -182,14 +182,14 @@ static void show_regwindow(struct pt_regs *regs)
5727 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5728 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5729 if (regs->tstate & TSTATE_PRIV)
5730 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5731 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5732 }
5733
5734 void show_regs(struct pt_regs *regs)
5735 {
5736 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5737 regs->tpc, regs->tnpc, regs->y, print_tainted());
5738 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5739 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5740 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5741 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5742 regs->u_regs[3]);
5743 @@ -202,7 +202,7 @@ void show_regs(struct pt_regs *regs)
5744 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5745 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5746 regs->u_regs[15]);
5747 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5748 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5749 show_regwindow(regs);
5750 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5751 }
5752 @@ -287,7 +287,7 @@ void arch_trigger_all_cpu_backtrace(void)
5753 ((tp && tp->task) ? tp->task->pid : -1));
5754
5755 if (gp->tstate & TSTATE_PRIV) {
5756 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5757 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5758 (void *) gp->tpc,
5759 (void *) gp->o7,
5760 (void *) gp->i7,
5761 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5762 index 9388844..0075fd2 100644
5763 --- a/arch/sparc/kernel/ptrace_64.c
5764 +++ b/arch/sparc/kernel/ptrace_64.c
5765 @@ -1058,6 +1058,10 @@ long arch_ptrace(struct task_struct *child, long request,
5766 return ret;
5767 }
5768
5769 +#ifdef CONFIG_GRKERNSEC_SETXID
5770 +extern void gr_delayed_cred_worker(void);
5771 +#endif
5772 +
5773 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5774 {
5775 int ret = 0;
5776 @@ -1065,6 +1069,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5777 /* do the secure computing check first */
5778 secure_computing(regs->u_regs[UREG_G1]);
5779
5780 +#ifdef CONFIG_GRKERNSEC_SETXID
5781 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5782 + gr_delayed_cred_worker();
5783 +#endif
5784 +
5785 if (test_thread_flag(TIF_SYSCALL_TRACE))
5786 ret = tracehook_report_syscall_entry(regs);
5787
5788 @@ -1085,6 +1094,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5789
5790 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5791 {
5792 +#ifdef CONFIG_GRKERNSEC_SETXID
5793 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5794 + gr_delayed_cred_worker();
5795 +#endif
5796 +
5797 audit_syscall_exit(regs);
5798
5799 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5800 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5801 index 42b282f..28ce9f2 100644
5802 --- a/arch/sparc/kernel/sys_sparc_32.c
5803 +++ b/arch/sparc/kernel/sys_sparc_32.c
5804 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5805 if (ARCH_SUN4C && len > 0x20000000)
5806 return -ENOMEM;
5807 if (!addr)
5808 - addr = TASK_UNMAPPED_BASE;
5809 + addr = current->mm->mmap_base;
5810
5811 if (flags & MAP_SHARED)
5812 addr = COLOUR_ALIGN(addr);
5813 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5814 }
5815 if (TASK_SIZE - PAGE_SIZE - len < addr)
5816 return -ENOMEM;
5817 - if (!vmm || addr + len <= vmm->vm_start)
5818 + if (check_heap_stack_gap(vmm, addr, len))
5819 return addr;
5820 addr = vmm->vm_end;
5821 if (flags & MAP_SHARED)
5822 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5823 index 232df99..cee1f9c 100644
5824 --- a/arch/sparc/kernel/sys_sparc_64.c
5825 +++ b/arch/sparc/kernel/sys_sparc_64.c
5826 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5827 /* We do not accept a shared mapping if it would violate
5828 * cache aliasing constraints.
5829 */
5830 - if ((flags & MAP_SHARED) &&
5831 + if ((filp || (flags & MAP_SHARED)) &&
5832 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5833 return -EINVAL;
5834 return addr;
5835 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5836 if (filp || (flags & MAP_SHARED))
5837 do_color_align = 1;
5838
5839 +#ifdef CONFIG_PAX_RANDMMAP
5840 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5841 +#endif
5842 +
5843 if (addr) {
5844 if (do_color_align)
5845 addr = COLOUR_ALIGN(addr, pgoff);
5846 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5847 addr = PAGE_ALIGN(addr);
5848
5849 vma = find_vma(mm, addr);
5850 - if (task_size - len >= addr &&
5851 - (!vma || addr + len <= vma->vm_start))
5852 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5853 return addr;
5854 }
5855
5856 if (len > mm->cached_hole_size) {
5857 - start_addr = addr = mm->free_area_cache;
5858 + start_addr = addr = mm->free_area_cache;
5859 } else {
5860 - start_addr = addr = TASK_UNMAPPED_BASE;
5861 + start_addr = addr = mm->mmap_base;
5862 mm->cached_hole_size = 0;
5863 }
5864
5865 @@ -174,14 +177,14 @@ full_search:
5866 vma = find_vma(mm, VA_EXCLUDE_END);
5867 }
5868 if (unlikely(task_size < addr)) {
5869 - if (start_addr != TASK_UNMAPPED_BASE) {
5870 - start_addr = addr = TASK_UNMAPPED_BASE;
5871 + if (start_addr != mm->mmap_base) {
5872 + start_addr = addr = mm->mmap_base;
5873 mm->cached_hole_size = 0;
5874 goto full_search;
5875 }
5876 return -ENOMEM;
5877 }
5878 - if (likely(!vma || addr + len <= vma->vm_start)) {
5879 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5880 /*
5881 * Remember the place where we stopped the search:
5882 */
5883 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5884 /* We do not accept a shared mapping if it would violate
5885 * cache aliasing constraints.
5886 */
5887 - if ((flags & MAP_SHARED) &&
5888 + if ((filp || (flags & MAP_SHARED)) &&
5889 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5890 return -EINVAL;
5891 return addr;
5892 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5893 addr = PAGE_ALIGN(addr);
5894
5895 vma = find_vma(mm, addr);
5896 - if (task_size - len >= addr &&
5897 - (!vma || addr + len <= vma->vm_start))
5898 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5899 return addr;
5900 }
5901
5902 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5903 /* make sure it can fit in the remaining address space */
5904 if (likely(addr > len)) {
5905 vma = find_vma(mm, addr-len);
5906 - if (!vma || addr <= vma->vm_start) {
5907 + if (check_heap_stack_gap(vma, addr - len, len)) {
5908 /* remember the address as a hint for next time */
5909 return (mm->free_area_cache = addr-len);
5910 }
5911 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5912 if (unlikely(mm->mmap_base < len))
5913 goto bottomup;
5914
5915 - addr = mm->mmap_base-len;
5916 - if (do_color_align)
5917 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5918 + addr = mm->mmap_base - len;
5919
5920 do {
5921 + if (do_color_align)
5922 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5923 /*
5924 * Lookup failure means no vma is above this address,
5925 * else if new region fits below vma->vm_start,
5926 * return with success:
5927 */
5928 vma = find_vma(mm, addr);
5929 - if (likely(!vma || addr+len <= vma->vm_start)) {
5930 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5931 /* remember the address as a hint for next time */
5932 return (mm->free_area_cache = addr);
5933 }
5934 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5935 mm->cached_hole_size = vma->vm_start - addr;
5936
5937 /* try just below the current vma->vm_start */
5938 - addr = vma->vm_start-len;
5939 - if (do_color_align)
5940 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5941 - } while (likely(len < vma->vm_start));
5942 + addr = skip_heap_stack_gap(vma, len);
5943 + } while (!IS_ERR_VALUE(addr));
5944
5945 bottomup:
5946 /*
5947 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5948 gap == RLIM_INFINITY ||
5949 sysctl_legacy_va_layout) {
5950 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5951 +
5952 +#ifdef CONFIG_PAX_RANDMMAP
5953 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5954 + mm->mmap_base += mm->delta_mmap;
5955 +#endif
5956 +
5957 mm->get_unmapped_area = arch_get_unmapped_area;
5958 mm->unmap_area = arch_unmap_area;
5959 } else {
5960 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5961 gap = (task_size / 6 * 5);
5962
5963 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5964 +
5965 +#ifdef CONFIG_PAX_RANDMMAP
5966 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5967 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5968 +#endif
5969 +
5970 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5971 mm->unmap_area = arch_unmap_area_topdown;
5972 }
5973 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
5974 index 1d7e274..b39c527 100644
5975 --- a/arch/sparc/kernel/syscalls.S
5976 +++ b/arch/sparc/kernel/syscalls.S
5977 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
5978 #endif
5979 .align 32
5980 1: ldx [%g6 + TI_FLAGS], %l5
5981 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5982 + andcc %l5, _TIF_WORK_SYSCALL, %g0
5983 be,pt %icc, rtrap
5984 nop
5985 call syscall_trace_leave
5986 @@ -179,7 +179,7 @@ linux_sparc_syscall32:
5987
5988 srl %i5, 0, %o5 ! IEU1
5989 srl %i2, 0, %o2 ! IEU0 Group
5990 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
5991 + andcc %l0, _TIF_WORK_SYSCALL, %g0
5992 bne,pn %icc, linux_syscall_trace32 ! CTI
5993 mov %i0, %l5 ! IEU1
5994 call %l7 ! CTI Group brk forced
5995 @@ -202,7 +202,7 @@ linux_sparc_syscall:
5996
5997 mov %i3, %o3 ! IEU1
5998 mov %i4, %o4 ! IEU0 Group
5999 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6000 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6001 bne,pn %icc, linux_syscall_trace ! CTI Group
6002 mov %i0, %l5 ! IEU0
6003 2: call %l7 ! CTI Group brk forced
6004 @@ -226,7 +226,7 @@ ret_sys_call:
6005
6006 cmp %o0, -ERESTART_RESTARTBLOCK
6007 bgeu,pn %xcc, 1f
6008 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6009 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6010 80:
6011 /* System call success, clear Carry condition code. */
6012 andn %g3, %g2, %g3
6013 @@ -241,7 +241,7 @@ ret_sys_call:
6014 /* System call failure, set Carry condition code.
6015 * Also, get abs(errno) to return to the process.
6016 */
6017 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6018 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6019 sub %g0, %o0, %o0
6020 or %g3, %g2, %g3
6021 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6022 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6023 index 591f20c..0f1b925 100644
6024 --- a/arch/sparc/kernel/traps_32.c
6025 +++ b/arch/sparc/kernel/traps_32.c
6026 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
6027 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6028 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6029
6030 +extern void gr_handle_kernel_exploit(void);
6031 +
6032 void die_if_kernel(char *str, struct pt_regs *regs)
6033 {
6034 static int die_counter;
6035 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6036 count++ < 30 &&
6037 (((unsigned long) rw) >= PAGE_OFFSET) &&
6038 !(((unsigned long) rw) & 0x7)) {
6039 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
6040 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
6041 (void *) rw->ins[7]);
6042 rw = (struct reg_window32 *)rw->ins[6];
6043 }
6044 }
6045 printk("Instruction DUMP:");
6046 instruction_dump ((unsigned long *) regs->pc);
6047 - if(regs->psr & PSR_PS)
6048 + if(regs->psr & PSR_PS) {
6049 + gr_handle_kernel_exploit();
6050 do_exit(SIGKILL);
6051 + }
6052 do_exit(SIGSEGV);
6053 }
6054
6055 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6056 index 0cbdaa4..438e4c9 100644
6057 --- a/arch/sparc/kernel/traps_64.c
6058 +++ b/arch/sparc/kernel/traps_64.c
6059 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6060 i + 1,
6061 p->trapstack[i].tstate, p->trapstack[i].tpc,
6062 p->trapstack[i].tnpc, p->trapstack[i].tt);
6063 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6064 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6065 }
6066 }
6067
6068 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6069
6070 lvl -= 0x100;
6071 if (regs->tstate & TSTATE_PRIV) {
6072 +
6073 +#ifdef CONFIG_PAX_REFCOUNT
6074 + if (lvl == 6)
6075 + pax_report_refcount_overflow(regs);
6076 +#endif
6077 +
6078 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6079 die_if_kernel(buffer, regs);
6080 }
6081 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6082 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6083 {
6084 char buffer[32];
6085 -
6086 +
6087 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6088 0, lvl, SIGTRAP) == NOTIFY_STOP)
6089 return;
6090
6091 +#ifdef CONFIG_PAX_REFCOUNT
6092 + if (lvl == 6)
6093 + pax_report_refcount_overflow(regs);
6094 +#endif
6095 +
6096 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6097
6098 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6099 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6100 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6101 printk("%s" "ERROR(%d): ",
6102 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6103 - printk("TPC<%pS>\n", (void *) regs->tpc);
6104 + printk("TPC<%pA>\n", (void *) regs->tpc);
6105 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6106 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6107 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6108 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6109 smp_processor_id(),
6110 (type & 0x1) ? 'I' : 'D',
6111 regs->tpc);
6112 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6113 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6114 panic("Irrecoverable Cheetah+ parity error.");
6115 }
6116
6117 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6118 smp_processor_id(),
6119 (type & 0x1) ? 'I' : 'D',
6120 regs->tpc);
6121 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6122 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6123 }
6124
6125 struct sun4v_error_entry {
6126 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6127
6128 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6129 regs->tpc, tl);
6130 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6131 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6132 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6133 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6134 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6135 (void *) regs->u_regs[UREG_I7]);
6136 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6137 "pte[%lx] error[%lx]\n",
6138 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6139
6140 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6141 regs->tpc, tl);
6142 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6143 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6144 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6145 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6146 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6147 (void *) regs->u_regs[UREG_I7]);
6148 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6149 "pte[%lx] error[%lx]\n",
6150 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6151 fp = (unsigned long)sf->fp + STACK_BIAS;
6152 }
6153
6154 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6155 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6156 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6157 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6158 int index = tsk->curr_ret_stack;
6159 if (tsk->ret_stack && index >= graph) {
6160 pc = tsk->ret_stack[index - graph].ret;
6161 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6162 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6163 graph++;
6164 }
6165 }
6166 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6167 return (struct reg_window *) (fp + STACK_BIAS);
6168 }
6169
6170 +extern void gr_handle_kernel_exploit(void);
6171 +
6172 void die_if_kernel(char *str, struct pt_regs *regs)
6173 {
6174 static int die_counter;
6175 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6176 while (rw &&
6177 count++ < 30 &&
6178 kstack_valid(tp, (unsigned long) rw)) {
6179 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6180 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6181 (void *) rw->ins[7]);
6182
6183 rw = kernel_stack_up(rw);
6184 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6185 }
6186 user_instruction_dump ((unsigned int __user *) regs->tpc);
6187 }
6188 - if (regs->tstate & TSTATE_PRIV)
6189 + if (regs->tstate & TSTATE_PRIV) {
6190 + gr_handle_kernel_exploit();
6191 do_exit(SIGKILL);
6192 + }
6193 do_exit(SIGSEGV);
6194 }
6195 EXPORT_SYMBOL(die_if_kernel);
6196 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6197 index 76e4ac1..78f8bb1 100644
6198 --- a/arch/sparc/kernel/unaligned_64.c
6199 +++ b/arch/sparc/kernel/unaligned_64.c
6200 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6201 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6202
6203 if (__ratelimit(&ratelimit)) {
6204 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6205 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6206 regs->tpc, (void *) regs->tpc);
6207 }
6208 }
6209 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6210 index a3fc437..fea9957 100644
6211 --- a/arch/sparc/lib/Makefile
6212 +++ b/arch/sparc/lib/Makefile
6213 @@ -2,7 +2,7 @@
6214 #
6215
6216 asflags-y := -ansi -DST_DIV0=0x02
6217 -ccflags-y := -Werror
6218 +#ccflags-y := -Werror
6219
6220 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6221 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6222 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6223 index 59186e0..f747d7a 100644
6224 --- a/arch/sparc/lib/atomic_64.S
6225 +++ b/arch/sparc/lib/atomic_64.S
6226 @@ -18,7 +18,12 @@
6227 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6228 BACKOFF_SETUP(%o2)
6229 1: lduw [%o1], %g1
6230 - add %g1, %o0, %g7
6231 + addcc %g1, %o0, %g7
6232 +
6233 +#ifdef CONFIG_PAX_REFCOUNT
6234 + tvs %icc, 6
6235 +#endif
6236 +
6237 cas [%o1], %g1, %g7
6238 cmp %g1, %g7
6239 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6240 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6241 2: BACKOFF_SPIN(%o2, %o3, 1b)
6242 .size atomic_add, .-atomic_add
6243
6244 + .globl atomic_add_unchecked
6245 + .type atomic_add_unchecked,#function
6246 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6247 + BACKOFF_SETUP(%o2)
6248 +1: lduw [%o1], %g1
6249 + add %g1, %o0, %g7
6250 + cas [%o1], %g1, %g7
6251 + cmp %g1, %g7
6252 + bne,pn %icc, 2f
6253 + nop
6254 + retl
6255 + nop
6256 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6257 + .size atomic_add_unchecked, .-atomic_add_unchecked
6258 +
6259 .globl atomic_sub
6260 .type atomic_sub,#function
6261 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6262 BACKOFF_SETUP(%o2)
6263 1: lduw [%o1], %g1
6264 - sub %g1, %o0, %g7
6265 + subcc %g1, %o0, %g7
6266 +
6267 +#ifdef CONFIG_PAX_REFCOUNT
6268 + tvs %icc, 6
6269 +#endif
6270 +
6271 cas [%o1], %g1, %g7
6272 cmp %g1, %g7
6273 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6274 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6275 2: BACKOFF_SPIN(%o2, %o3, 1b)
6276 .size atomic_sub, .-atomic_sub
6277
6278 + .globl atomic_sub_unchecked
6279 + .type atomic_sub_unchecked,#function
6280 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6281 + BACKOFF_SETUP(%o2)
6282 +1: lduw [%o1], %g1
6283 + sub %g1, %o0, %g7
6284 + cas [%o1], %g1, %g7
6285 + cmp %g1, %g7
6286 + bne,pn %icc, 2f
6287 + nop
6288 + retl
6289 + nop
6290 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6291 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
6292 +
6293 .globl atomic_add_ret
6294 .type atomic_add_ret,#function
6295 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6296 BACKOFF_SETUP(%o2)
6297 1: lduw [%o1], %g1
6298 - add %g1, %o0, %g7
6299 + addcc %g1, %o0, %g7
6300 +
6301 +#ifdef CONFIG_PAX_REFCOUNT
6302 + tvs %icc, 6
6303 +#endif
6304 +
6305 cas [%o1], %g1, %g7
6306 cmp %g1, %g7
6307 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6308 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6309 2: BACKOFF_SPIN(%o2, %o3, 1b)
6310 .size atomic_add_ret, .-atomic_add_ret
6311
6312 + .globl atomic_add_ret_unchecked
6313 + .type atomic_add_ret_unchecked,#function
6314 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6315 + BACKOFF_SETUP(%o2)
6316 +1: lduw [%o1], %g1
6317 + addcc %g1, %o0, %g7
6318 + cas [%o1], %g1, %g7
6319 + cmp %g1, %g7
6320 + bne,pn %icc, 2f
6321 + add %g7, %o0, %g7
6322 + sra %g7, 0, %o0
6323 + retl
6324 + nop
6325 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6326 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6327 +
6328 .globl atomic_sub_ret
6329 .type atomic_sub_ret,#function
6330 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6331 BACKOFF_SETUP(%o2)
6332 1: lduw [%o1], %g1
6333 - sub %g1, %o0, %g7
6334 + subcc %g1, %o0, %g7
6335 +
6336 +#ifdef CONFIG_PAX_REFCOUNT
6337 + tvs %icc, 6
6338 +#endif
6339 +
6340 cas [%o1], %g1, %g7
6341 cmp %g1, %g7
6342 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6343 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6344 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6345 BACKOFF_SETUP(%o2)
6346 1: ldx [%o1], %g1
6347 - add %g1, %o0, %g7
6348 + addcc %g1, %o0, %g7
6349 +
6350 +#ifdef CONFIG_PAX_REFCOUNT
6351 + tvs %xcc, 6
6352 +#endif
6353 +
6354 casx [%o1], %g1, %g7
6355 cmp %g1, %g7
6356 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6357 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6358 2: BACKOFF_SPIN(%o2, %o3, 1b)
6359 .size atomic64_add, .-atomic64_add
6360
6361 + .globl atomic64_add_unchecked
6362 + .type atomic64_add_unchecked,#function
6363 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6364 + BACKOFF_SETUP(%o2)
6365 +1: ldx [%o1], %g1
6366 + addcc %g1, %o0, %g7
6367 + casx [%o1], %g1, %g7
6368 + cmp %g1, %g7
6369 + bne,pn %xcc, 2f
6370 + nop
6371 + retl
6372 + nop
6373 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6374 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
6375 +
6376 .globl atomic64_sub
6377 .type atomic64_sub,#function
6378 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6379 BACKOFF_SETUP(%o2)
6380 1: ldx [%o1], %g1
6381 - sub %g1, %o0, %g7
6382 + subcc %g1, %o0, %g7
6383 +
6384 +#ifdef CONFIG_PAX_REFCOUNT
6385 + tvs %xcc, 6
6386 +#endif
6387 +
6388 casx [%o1], %g1, %g7
6389 cmp %g1, %g7
6390 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6391 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6392 2: BACKOFF_SPIN(%o2, %o3, 1b)
6393 .size atomic64_sub, .-atomic64_sub
6394
6395 + .globl atomic64_sub_unchecked
6396 + .type atomic64_sub_unchecked,#function
6397 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6398 + BACKOFF_SETUP(%o2)
6399 +1: ldx [%o1], %g1
6400 + subcc %g1, %o0, %g7
6401 + casx [%o1], %g1, %g7
6402 + cmp %g1, %g7
6403 + bne,pn %xcc, 2f
6404 + nop
6405 + retl
6406 + nop
6407 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6408 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6409 +
6410 .globl atomic64_add_ret
6411 .type atomic64_add_ret,#function
6412 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6413 BACKOFF_SETUP(%o2)
6414 1: ldx [%o1], %g1
6415 - add %g1, %o0, %g7
6416 + addcc %g1, %o0, %g7
6417 +
6418 +#ifdef CONFIG_PAX_REFCOUNT
6419 + tvs %xcc, 6
6420 +#endif
6421 +
6422 casx [%o1], %g1, %g7
6423 cmp %g1, %g7
6424 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6425 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6426 2: BACKOFF_SPIN(%o2, %o3, 1b)
6427 .size atomic64_add_ret, .-atomic64_add_ret
6428
6429 + .globl atomic64_add_ret_unchecked
6430 + .type atomic64_add_ret_unchecked,#function
6431 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6432 + BACKOFF_SETUP(%o2)
6433 +1: ldx [%o1], %g1
6434 + addcc %g1, %o0, %g7
6435 + casx [%o1], %g1, %g7
6436 + cmp %g1, %g7
6437 + bne,pn %xcc, 2f
6438 + add %g7, %o0, %g7
6439 + mov %g7, %o0
6440 + retl
6441 + nop
6442 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6443 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6444 +
6445 .globl atomic64_sub_ret
6446 .type atomic64_sub_ret,#function
6447 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6448 BACKOFF_SETUP(%o2)
6449 1: ldx [%o1], %g1
6450 - sub %g1, %o0, %g7
6451 + subcc %g1, %o0, %g7
6452 +
6453 +#ifdef CONFIG_PAX_REFCOUNT
6454 + tvs %xcc, 6
6455 +#endif
6456 +
6457 casx [%o1], %g1, %g7
6458 cmp %g1, %g7
6459 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6460 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6461 index f73c224..662af10 100644
6462 --- a/arch/sparc/lib/ksyms.c
6463 +++ b/arch/sparc/lib/ksyms.c
6464 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6465
6466 /* Atomic counter implementation. */
6467 EXPORT_SYMBOL(atomic_add);
6468 +EXPORT_SYMBOL(atomic_add_unchecked);
6469 EXPORT_SYMBOL(atomic_add_ret);
6470 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6471 EXPORT_SYMBOL(atomic_sub);
6472 +EXPORT_SYMBOL(atomic_sub_unchecked);
6473 EXPORT_SYMBOL(atomic_sub_ret);
6474 EXPORT_SYMBOL(atomic64_add);
6475 +EXPORT_SYMBOL(atomic64_add_unchecked);
6476 EXPORT_SYMBOL(atomic64_add_ret);
6477 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6478 EXPORT_SYMBOL(atomic64_sub);
6479 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6480 EXPORT_SYMBOL(atomic64_sub_ret);
6481
6482 /* Atomic bit operations. */
6483 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6484 index 301421c..e2535d1 100644
6485 --- a/arch/sparc/mm/Makefile
6486 +++ b/arch/sparc/mm/Makefile
6487 @@ -2,7 +2,7 @@
6488 #
6489
6490 asflags-y := -ansi
6491 -ccflags-y := -Werror
6492 +#ccflags-y := -Werror
6493
6494 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6495 obj-y += fault_$(BITS).o
6496 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6497 index 8023fd7..c8e89e9 100644
6498 --- a/arch/sparc/mm/fault_32.c
6499 +++ b/arch/sparc/mm/fault_32.c
6500 @@ -21,6 +21,9 @@
6501 #include <linux/perf_event.h>
6502 #include <linux/interrupt.h>
6503 #include <linux/kdebug.h>
6504 +#include <linux/slab.h>
6505 +#include <linux/pagemap.h>
6506 +#include <linux/compiler.h>
6507
6508 #include <asm/system.h>
6509 #include <asm/page.h>
6510 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6511 return safe_compute_effective_address(regs, insn);
6512 }
6513
6514 +#ifdef CONFIG_PAX_PAGEEXEC
6515 +#ifdef CONFIG_PAX_DLRESOLVE
6516 +static void pax_emuplt_close(struct vm_area_struct *vma)
6517 +{
6518 + vma->vm_mm->call_dl_resolve = 0UL;
6519 +}
6520 +
6521 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6522 +{
6523 + unsigned int *kaddr;
6524 +
6525 + vmf->page = alloc_page(GFP_HIGHUSER);
6526 + if (!vmf->page)
6527 + return VM_FAULT_OOM;
6528 +
6529 + kaddr = kmap(vmf->page);
6530 + memset(kaddr, 0, PAGE_SIZE);
6531 + kaddr[0] = 0x9DE3BFA8U; /* save */
6532 + flush_dcache_page(vmf->page);
6533 + kunmap(vmf->page);
6534 + return VM_FAULT_MAJOR;
6535 +}
6536 +
6537 +static const struct vm_operations_struct pax_vm_ops = {
6538 + .close = pax_emuplt_close,
6539 + .fault = pax_emuplt_fault
6540 +};
6541 +
6542 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6543 +{
6544 + int ret;
6545 +
6546 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6547 + vma->vm_mm = current->mm;
6548 + vma->vm_start = addr;
6549 + vma->vm_end = addr + PAGE_SIZE;
6550 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6551 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6552 + vma->vm_ops = &pax_vm_ops;
6553 +
6554 + ret = insert_vm_struct(current->mm, vma);
6555 + if (ret)
6556 + return ret;
6557 +
6558 + ++current->mm->total_vm;
6559 + return 0;
6560 +}
6561 +#endif
6562 +
6563 +/*
6564 + * PaX: decide what to do with offenders (regs->pc = fault address)
6565 + *
6566 + * returns 1 when task should be killed
6567 + * 2 when patched PLT trampoline was detected
6568 + * 3 when unpatched PLT trampoline was detected
6569 + */
6570 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6571 +{
6572 +
6573 +#ifdef CONFIG_PAX_EMUPLT
6574 + int err;
6575 +
6576 + do { /* PaX: patched PLT emulation #1 */
6577 + unsigned int sethi1, sethi2, jmpl;
6578 +
6579 + err = get_user(sethi1, (unsigned int *)regs->pc);
6580 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6581 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6582 +
6583 + if (err)
6584 + break;
6585 +
6586 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6587 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6588 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6589 + {
6590 + unsigned int addr;
6591 +
6592 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6593 + addr = regs->u_regs[UREG_G1];
6594 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6595 + regs->pc = addr;
6596 + regs->npc = addr+4;
6597 + return 2;
6598 + }
6599 + } while (0);
6600 +
6601 + { /* PaX: patched PLT emulation #2 */
6602 + unsigned int ba;
6603 +
6604 + err = get_user(ba, (unsigned int *)regs->pc);
6605 +
6606 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6607 + unsigned int addr;
6608 +
6609 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6610 + regs->pc = addr;
6611 + regs->npc = addr+4;
6612 + return 2;
6613 + }
6614 + }
6615 +
6616 + do { /* PaX: patched PLT emulation #3 */
6617 + unsigned int sethi, jmpl, nop;
6618 +
6619 + err = get_user(sethi, (unsigned int *)regs->pc);
6620 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6621 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6622 +
6623 + if (err)
6624 + break;
6625 +
6626 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6627 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6628 + nop == 0x01000000U)
6629 + {
6630 + unsigned int addr;
6631 +
6632 + addr = (sethi & 0x003FFFFFU) << 10;
6633 + regs->u_regs[UREG_G1] = addr;
6634 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6635 + regs->pc = addr;
6636 + regs->npc = addr+4;
6637 + return 2;
6638 + }
6639 + } while (0);
6640 +
6641 + do { /* PaX: unpatched PLT emulation step 1 */
6642 + unsigned int sethi, ba, nop;
6643 +
6644 + err = get_user(sethi, (unsigned int *)regs->pc);
6645 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6646 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6647 +
6648 + if (err)
6649 + break;
6650 +
6651 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6652 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6653 + nop == 0x01000000U)
6654 + {
6655 + unsigned int addr, save, call;
6656 +
6657 + if ((ba & 0xFFC00000U) == 0x30800000U)
6658 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6659 + else
6660 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6661 +
6662 + err = get_user(save, (unsigned int *)addr);
6663 + err |= get_user(call, (unsigned int *)(addr+4));
6664 + err |= get_user(nop, (unsigned int *)(addr+8));
6665 + if (err)
6666 + break;
6667 +
6668 +#ifdef CONFIG_PAX_DLRESOLVE
6669 + if (save == 0x9DE3BFA8U &&
6670 + (call & 0xC0000000U) == 0x40000000U &&
6671 + nop == 0x01000000U)
6672 + {
6673 + struct vm_area_struct *vma;
6674 + unsigned long call_dl_resolve;
6675 +
6676 + down_read(&current->mm->mmap_sem);
6677 + call_dl_resolve = current->mm->call_dl_resolve;
6678 + up_read(&current->mm->mmap_sem);
6679 + if (likely(call_dl_resolve))
6680 + goto emulate;
6681 +
6682 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6683 +
6684 + down_write(&current->mm->mmap_sem);
6685 + if (current->mm->call_dl_resolve) {
6686 + call_dl_resolve = current->mm->call_dl_resolve;
6687 + up_write(&current->mm->mmap_sem);
6688 + if (vma)
6689 + kmem_cache_free(vm_area_cachep, vma);
6690 + goto emulate;
6691 + }
6692 +
6693 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6694 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6695 + up_write(&current->mm->mmap_sem);
6696 + if (vma)
6697 + kmem_cache_free(vm_area_cachep, vma);
6698 + return 1;
6699 + }
6700 +
6701 + if (pax_insert_vma(vma, call_dl_resolve)) {
6702 + up_write(&current->mm->mmap_sem);
6703 + kmem_cache_free(vm_area_cachep, vma);
6704 + return 1;
6705 + }
6706 +
6707 + current->mm->call_dl_resolve = call_dl_resolve;
6708 + up_write(&current->mm->mmap_sem);
6709 +
6710 +emulate:
6711 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6712 + regs->pc = call_dl_resolve;
6713 + regs->npc = addr+4;
6714 + return 3;
6715 + }
6716 +#endif
6717 +
6718 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6719 + if ((save & 0xFFC00000U) == 0x05000000U &&
6720 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6721 + nop == 0x01000000U)
6722 + {
6723 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6724 + regs->u_regs[UREG_G2] = addr + 4;
6725 + addr = (save & 0x003FFFFFU) << 10;
6726 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6727 + regs->pc = addr;
6728 + regs->npc = addr+4;
6729 + return 3;
6730 + }
6731 + }
6732 + } while (0);
6733 +
6734 + do { /* PaX: unpatched PLT emulation step 2 */
6735 + unsigned int save, call, nop;
6736 +
6737 + err = get_user(save, (unsigned int *)(regs->pc-4));
6738 + err |= get_user(call, (unsigned int *)regs->pc);
6739 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6740 + if (err)
6741 + break;
6742 +
6743 + if (save == 0x9DE3BFA8U &&
6744 + (call & 0xC0000000U) == 0x40000000U &&
6745 + nop == 0x01000000U)
6746 + {
6747 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6748 +
6749 + regs->u_regs[UREG_RETPC] = regs->pc;
6750 + regs->pc = dl_resolve;
6751 + regs->npc = dl_resolve+4;
6752 + return 3;
6753 + }
6754 + } while (0);
6755 +#endif
6756 +
6757 + return 1;
6758 +}
6759 +
6760 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6761 +{
6762 + unsigned long i;
6763 +
6764 + printk(KERN_ERR "PAX: bytes at PC: ");
6765 + for (i = 0; i < 8; i++) {
6766 + unsigned int c;
6767 + if (get_user(c, (unsigned int *)pc+i))
6768 + printk(KERN_CONT "???????? ");
6769 + else
6770 + printk(KERN_CONT "%08x ", c);
6771 + }
6772 + printk("\n");
6773 +}
6774 +#endif
6775 +
6776 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6777 int text_fault)
6778 {
6779 @@ -280,6 +545,24 @@ good_area:
6780 if(!(vma->vm_flags & VM_WRITE))
6781 goto bad_area;
6782 } else {
6783 +
6784 +#ifdef CONFIG_PAX_PAGEEXEC
6785 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6786 + up_read(&mm->mmap_sem);
6787 + switch (pax_handle_fetch_fault(regs)) {
6788 +
6789 +#ifdef CONFIG_PAX_EMUPLT
6790 + case 2:
6791 + case 3:
6792 + return;
6793 +#endif
6794 +
6795 + }
6796 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6797 + do_group_exit(SIGKILL);
6798 + }
6799 +#endif
6800 +
6801 /* Allow reads even for write-only mappings */
6802 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6803 goto bad_area;
6804 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6805 index 504c062..6fcb9c6 100644
6806 --- a/arch/sparc/mm/fault_64.c
6807 +++ b/arch/sparc/mm/fault_64.c
6808 @@ -21,6 +21,9 @@
6809 #include <linux/kprobes.h>
6810 #include <linux/kdebug.h>
6811 #include <linux/percpu.h>
6812 +#include <linux/slab.h>
6813 +#include <linux/pagemap.h>
6814 +#include <linux/compiler.h>
6815
6816 #include <asm/page.h>
6817 #include <asm/pgtable.h>
6818 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6819 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6820 regs->tpc);
6821 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6822 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6823 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6824 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6825 dump_stack();
6826 unhandled_fault(regs->tpc, current, regs);
6827 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6828 show_regs(regs);
6829 }
6830
6831 +#ifdef CONFIG_PAX_PAGEEXEC
6832 +#ifdef CONFIG_PAX_DLRESOLVE
6833 +static void pax_emuplt_close(struct vm_area_struct *vma)
6834 +{
6835 + vma->vm_mm->call_dl_resolve = 0UL;
6836 +}
6837 +
6838 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6839 +{
6840 + unsigned int *kaddr;
6841 +
6842 + vmf->page = alloc_page(GFP_HIGHUSER);
6843 + if (!vmf->page)
6844 + return VM_FAULT_OOM;
6845 +
6846 + kaddr = kmap(vmf->page);
6847 + memset(kaddr, 0, PAGE_SIZE);
6848 + kaddr[0] = 0x9DE3BFA8U; /* save */
6849 + flush_dcache_page(vmf->page);
6850 + kunmap(vmf->page);
6851 + return VM_FAULT_MAJOR;
6852 +}
6853 +
6854 +static const struct vm_operations_struct pax_vm_ops = {
6855 + .close = pax_emuplt_close,
6856 + .fault = pax_emuplt_fault
6857 +};
6858 +
6859 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6860 +{
6861 + int ret;
6862 +
6863 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6864 + vma->vm_mm = current->mm;
6865 + vma->vm_start = addr;
6866 + vma->vm_end = addr + PAGE_SIZE;
6867 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6868 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6869 + vma->vm_ops = &pax_vm_ops;
6870 +
6871 + ret = insert_vm_struct(current->mm, vma);
6872 + if (ret)
6873 + return ret;
6874 +
6875 + ++current->mm->total_vm;
6876 + return 0;
6877 +}
6878 +#endif
6879 +
6880 +/*
6881 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6882 + *
6883 + * returns 1 when task should be killed
6884 + * 2 when patched PLT trampoline was detected
6885 + * 3 when unpatched PLT trampoline was detected
6886 + */
6887 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6888 +{
6889 +
6890 +#ifdef CONFIG_PAX_EMUPLT
6891 + int err;
6892 +
6893 + do { /* PaX: patched PLT emulation #1 */
6894 + unsigned int sethi1, sethi2, jmpl;
6895 +
6896 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6897 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6898 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6899 +
6900 + if (err)
6901 + break;
6902 +
6903 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6904 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6905 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6906 + {
6907 + unsigned long addr;
6908 +
6909 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6910 + addr = regs->u_regs[UREG_G1];
6911 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6912 +
6913 + if (test_thread_flag(TIF_32BIT))
6914 + addr &= 0xFFFFFFFFUL;
6915 +
6916 + regs->tpc = addr;
6917 + regs->tnpc = addr+4;
6918 + return 2;
6919 + }
6920 + } while (0);
6921 +
6922 + { /* PaX: patched PLT emulation #2 */
6923 + unsigned int ba;
6924 +
6925 + err = get_user(ba, (unsigned int *)regs->tpc);
6926 +
6927 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6928 + unsigned long addr;
6929 +
6930 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6931 +
6932 + if (test_thread_flag(TIF_32BIT))
6933 + addr &= 0xFFFFFFFFUL;
6934 +
6935 + regs->tpc = addr;
6936 + regs->tnpc = addr+4;
6937 + return 2;
6938 + }
6939 + }
6940 +
6941 + do { /* PaX: patched PLT emulation #3 */
6942 + unsigned int sethi, jmpl, nop;
6943 +
6944 + err = get_user(sethi, (unsigned int *)regs->tpc);
6945 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6946 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6947 +
6948 + if (err)
6949 + break;
6950 +
6951 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6952 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6953 + nop == 0x01000000U)
6954 + {
6955 + unsigned long addr;
6956 +
6957 + addr = (sethi & 0x003FFFFFU) << 10;
6958 + regs->u_regs[UREG_G1] = addr;
6959 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6960 +
6961 + if (test_thread_flag(TIF_32BIT))
6962 + addr &= 0xFFFFFFFFUL;
6963 +
6964 + regs->tpc = addr;
6965 + regs->tnpc = addr+4;
6966 + return 2;
6967 + }
6968 + } while (0);
6969 +
6970 + do { /* PaX: patched PLT emulation #4 */
6971 + unsigned int sethi, mov1, call, mov2;
6972 +
6973 + err = get_user(sethi, (unsigned int *)regs->tpc);
6974 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6975 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6976 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6977 +
6978 + if (err)
6979 + break;
6980 +
6981 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6982 + mov1 == 0x8210000FU &&
6983 + (call & 0xC0000000U) == 0x40000000U &&
6984 + mov2 == 0x9E100001U)
6985 + {
6986 + unsigned long addr;
6987 +
6988 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6989 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6990 +
6991 + if (test_thread_flag(TIF_32BIT))
6992 + addr &= 0xFFFFFFFFUL;
6993 +
6994 + regs->tpc = addr;
6995 + regs->tnpc = addr+4;
6996 + return 2;
6997 + }
6998 + } while (0);
6999 +
7000 + do { /* PaX: patched PLT emulation #5 */
7001 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7002 +
7003 + err = get_user(sethi, (unsigned int *)regs->tpc);
7004 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7005 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7006 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7007 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7008 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7009 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7010 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7011 +
7012 + if (err)
7013 + break;
7014 +
7015 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7016 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7017 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7018 + (or1 & 0xFFFFE000U) == 0x82106000U &&
7019 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7020 + sllx == 0x83287020U &&
7021 + jmpl == 0x81C04005U &&
7022 + nop == 0x01000000U)
7023 + {
7024 + unsigned long addr;
7025 +
7026 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7027 + regs->u_regs[UREG_G1] <<= 32;
7028 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7029 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7030 + regs->tpc = addr;
7031 + regs->tnpc = addr+4;
7032 + return 2;
7033 + }
7034 + } while (0);
7035 +
7036 + do { /* PaX: patched PLT emulation #6 */
7037 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7038 +
7039 + err = get_user(sethi, (unsigned int *)regs->tpc);
7040 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7041 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7042 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7043 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
7044 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7045 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7046 +
7047 + if (err)
7048 + break;
7049 +
7050 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7051 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7052 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7053 + sllx == 0x83287020U &&
7054 + (or & 0xFFFFE000U) == 0x8A116000U &&
7055 + jmpl == 0x81C04005U &&
7056 + nop == 0x01000000U)
7057 + {
7058 + unsigned long addr;
7059 +
7060 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7061 + regs->u_regs[UREG_G1] <<= 32;
7062 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7063 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7064 + regs->tpc = addr;
7065 + regs->tnpc = addr+4;
7066 + return 2;
7067 + }
7068 + } while (0);
7069 +
7070 + do { /* PaX: unpatched PLT emulation step 1 */
7071 + unsigned int sethi, ba, nop;
7072 +
7073 + err = get_user(sethi, (unsigned int *)regs->tpc);
7074 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7075 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7076 +
7077 + if (err)
7078 + break;
7079 +
7080 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7081 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7082 + nop == 0x01000000U)
7083 + {
7084 + unsigned long addr;
7085 + unsigned int save, call;
7086 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7087 +
7088 + if ((ba & 0xFFC00000U) == 0x30800000U)
7089 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7090 + else
7091 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7092 +
7093 + if (test_thread_flag(TIF_32BIT))
7094 + addr &= 0xFFFFFFFFUL;
7095 +
7096 + err = get_user(save, (unsigned int *)addr);
7097 + err |= get_user(call, (unsigned int *)(addr+4));
7098 + err |= get_user(nop, (unsigned int *)(addr+8));
7099 + if (err)
7100 + break;
7101 +
7102 +#ifdef CONFIG_PAX_DLRESOLVE
7103 + if (save == 0x9DE3BFA8U &&
7104 + (call & 0xC0000000U) == 0x40000000U &&
7105 + nop == 0x01000000U)
7106 + {
7107 + struct vm_area_struct *vma;
7108 + unsigned long call_dl_resolve;
7109 +
7110 + down_read(&current->mm->mmap_sem);
7111 + call_dl_resolve = current->mm->call_dl_resolve;
7112 + up_read(&current->mm->mmap_sem);
7113 + if (likely(call_dl_resolve))
7114 + goto emulate;
7115 +
7116 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7117 +
7118 + down_write(&current->mm->mmap_sem);
7119 + if (current->mm->call_dl_resolve) {
7120 + call_dl_resolve = current->mm->call_dl_resolve;
7121 + up_write(&current->mm->mmap_sem);
7122 + if (vma)
7123 + kmem_cache_free(vm_area_cachep, vma);
7124 + goto emulate;
7125 + }
7126 +
7127 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7128 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7129 + up_write(&current->mm->mmap_sem);
7130 + if (vma)
7131 + kmem_cache_free(vm_area_cachep, vma);
7132 + return 1;
7133 + }
7134 +
7135 + if (pax_insert_vma(vma, call_dl_resolve)) {
7136 + up_write(&current->mm->mmap_sem);
7137 + kmem_cache_free(vm_area_cachep, vma);
7138 + return 1;
7139 + }
7140 +
7141 + current->mm->call_dl_resolve = call_dl_resolve;
7142 + up_write(&current->mm->mmap_sem);
7143 +
7144 +emulate:
7145 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7146 + regs->tpc = call_dl_resolve;
7147 + regs->tnpc = addr+4;
7148 + return 3;
7149 + }
7150 +#endif
7151 +
7152 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7153 + if ((save & 0xFFC00000U) == 0x05000000U &&
7154 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7155 + nop == 0x01000000U)
7156 + {
7157 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7158 + regs->u_regs[UREG_G2] = addr + 4;
7159 + addr = (save & 0x003FFFFFU) << 10;
7160 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7161 +
7162 + if (test_thread_flag(TIF_32BIT))
7163 + addr &= 0xFFFFFFFFUL;
7164 +
7165 + regs->tpc = addr;
7166 + regs->tnpc = addr+4;
7167 + return 3;
7168 + }
7169 +
7170 + /* PaX: 64-bit PLT stub */
7171 + err = get_user(sethi1, (unsigned int *)addr);
7172 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7173 + err |= get_user(or1, (unsigned int *)(addr+8));
7174 + err |= get_user(or2, (unsigned int *)(addr+12));
7175 + err |= get_user(sllx, (unsigned int *)(addr+16));
7176 + err |= get_user(add, (unsigned int *)(addr+20));
7177 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7178 + err |= get_user(nop, (unsigned int *)(addr+28));
7179 + if (err)
7180 + break;
7181 +
7182 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7183 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7184 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7185 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7186 + sllx == 0x89293020U &&
7187 + add == 0x8A010005U &&
7188 + jmpl == 0x89C14000U &&
7189 + nop == 0x01000000U)
7190 + {
7191 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7192 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7193 + regs->u_regs[UREG_G4] <<= 32;
7194 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7195 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7196 + regs->u_regs[UREG_G4] = addr + 24;
7197 + addr = regs->u_regs[UREG_G5];
7198 + regs->tpc = addr;
7199 + regs->tnpc = addr+4;
7200 + return 3;
7201 + }
7202 + }
7203 + } while (0);
7204 +
7205 +#ifdef CONFIG_PAX_DLRESOLVE
7206 + do { /* PaX: unpatched PLT emulation step 2 */
7207 + unsigned int save, call, nop;
7208 +
7209 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7210 + err |= get_user(call, (unsigned int *)regs->tpc);
7211 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7212 + if (err)
7213 + break;
7214 +
7215 + if (save == 0x9DE3BFA8U &&
7216 + (call & 0xC0000000U) == 0x40000000U &&
7217 + nop == 0x01000000U)
7218 + {
7219 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7220 +
7221 + if (test_thread_flag(TIF_32BIT))
7222 + dl_resolve &= 0xFFFFFFFFUL;
7223 +
7224 + regs->u_regs[UREG_RETPC] = regs->tpc;
7225 + regs->tpc = dl_resolve;
7226 + regs->tnpc = dl_resolve+4;
7227 + return 3;
7228 + }
7229 + } while (0);
7230 +#endif
7231 +
7232 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7233 + unsigned int sethi, ba, nop;
7234 +
7235 + err = get_user(sethi, (unsigned int *)regs->tpc);
7236 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7237 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7238 +
7239 + if (err)
7240 + break;
7241 +
7242 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7243 + (ba & 0xFFF00000U) == 0x30600000U &&
7244 + nop == 0x01000000U)
7245 + {
7246 + unsigned long addr;
7247 +
7248 + addr = (sethi & 0x003FFFFFU) << 10;
7249 + regs->u_regs[UREG_G1] = addr;
7250 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7251 +
7252 + if (test_thread_flag(TIF_32BIT))
7253 + addr &= 0xFFFFFFFFUL;
7254 +
7255 + regs->tpc = addr;
7256 + regs->tnpc = addr+4;
7257 + return 2;
7258 + }
7259 + } while (0);
7260 +
7261 +#endif
7262 +
7263 + return 1;
7264 +}
7265 +
7266 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7267 +{
7268 + unsigned long i;
7269 +
7270 + printk(KERN_ERR "PAX: bytes at PC: ");
7271 + for (i = 0; i < 8; i++) {
7272 + unsigned int c;
7273 + if (get_user(c, (unsigned int *)pc+i))
7274 + printk(KERN_CONT "???????? ");
7275 + else
7276 + printk(KERN_CONT "%08x ", c);
7277 + }
7278 + printk("\n");
7279 +}
7280 +#endif
7281 +
7282 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7283 {
7284 struct mm_struct *mm = current->mm;
7285 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7286 if (!vma)
7287 goto bad_area;
7288
7289 +#ifdef CONFIG_PAX_PAGEEXEC
7290 + /* PaX: detect ITLB misses on non-exec pages */
7291 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7292 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7293 + {
7294 + if (address != regs->tpc)
7295 + goto good_area;
7296 +
7297 + up_read(&mm->mmap_sem);
7298 + switch (pax_handle_fetch_fault(regs)) {
7299 +
7300 +#ifdef CONFIG_PAX_EMUPLT
7301 + case 2:
7302 + case 3:
7303 + return;
7304 +#endif
7305 +
7306 + }
7307 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7308 + do_group_exit(SIGKILL);
7309 + }
7310 +#endif
7311 +
7312 /* Pure DTLB misses do not tell us whether the fault causing
7313 * load/store/atomic was a write or not, it only says that there
7314 * was no match. So in such a case we (carefully) read the
7315 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7316 index 07e1453..0a7d9e9 100644
7317 --- a/arch/sparc/mm/hugetlbpage.c
7318 +++ b/arch/sparc/mm/hugetlbpage.c
7319 @@ -67,7 +67,7 @@ full_search:
7320 }
7321 return -ENOMEM;
7322 }
7323 - if (likely(!vma || addr + len <= vma->vm_start)) {
7324 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7325 /*
7326 * Remember the place where we stopped the search:
7327 */
7328 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7329 /* make sure it can fit in the remaining address space */
7330 if (likely(addr > len)) {
7331 vma = find_vma(mm, addr-len);
7332 - if (!vma || addr <= vma->vm_start) {
7333 + if (check_heap_stack_gap(vma, addr - len, len)) {
7334 /* remember the address as a hint for next time */
7335 return (mm->free_area_cache = addr-len);
7336 }
7337 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7338 if (unlikely(mm->mmap_base < len))
7339 goto bottomup;
7340
7341 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7342 + addr = mm->mmap_base - len;
7343
7344 do {
7345 + addr &= HPAGE_MASK;
7346 /*
7347 * Lookup failure means no vma is above this address,
7348 * else if new region fits below vma->vm_start,
7349 * return with success:
7350 */
7351 vma = find_vma(mm, addr);
7352 - if (likely(!vma || addr+len <= vma->vm_start)) {
7353 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7354 /* remember the address as a hint for next time */
7355 return (mm->free_area_cache = addr);
7356 }
7357 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7358 mm->cached_hole_size = vma->vm_start - addr;
7359
7360 /* try just below the current vma->vm_start */
7361 - addr = (vma->vm_start-len) & HPAGE_MASK;
7362 - } while (likely(len < vma->vm_start));
7363 + addr = skip_heap_stack_gap(vma, len);
7364 + } while (!IS_ERR_VALUE(addr));
7365
7366 bottomup:
7367 /*
7368 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7369 if (addr) {
7370 addr = ALIGN(addr, HPAGE_SIZE);
7371 vma = find_vma(mm, addr);
7372 - if (task_size - len >= addr &&
7373 - (!vma || addr + len <= vma->vm_start))
7374 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7375 return addr;
7376 }
7377 if (mm->get_unmapped_area == arch_get_unmapped_area)
7378 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7379 index 7b00de6..78239f4 100644
7380 --- a/arch/sparc/mm/init_32.c
7381 +++ b/arch/sparc/mm/init_32.c
7382 @@ -316,6 +316,9 @@ extern void device_scan(void);
7383 pgprot_t PAGE_SHARED __read_mostly;
7384 EXPORT_SYMBOL(PAGE_SHARED);
7385
7386 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7387 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7388 +
7389 void __init paging_init(void)
7390 {
7391 switch(sparc_cpu_model) {
7392 @@ -344,17 +347,17 @@ void __init paging_init(void)
7393
7394 /* Initialize the protection map with non-constant, MMU dependent values. */
7395 protection_map[0] = PAGE_NONE;
7396 - protection_map[1] = PAGE_READONLY;
7397 - protection_map[2] = PAGE_COPY;
7398 - protection_map[3] = PAGE_COPY;
7399 + protection_map[1] = PAGE_READONLY_NOEXEC;
7400 + protection_map[2] = PAGE_COPY_NOEXEC;
7401 + protection_map[3] = PAGE_COPY_NOEXEC;
7402 protection_map[4] = PAGE_READONLY;
7403 protection_map[5] = PAGE_READONLY;
7404 protection_map[6] = PAGE_COPY;
7405 protection_map[7] = PAGE_COPY;
7406 protection_map[8] = PAGE_NONE;
7407 - protection_map[9] = PAGE_READONLY;
7408 - protection_map[10] = PAGE_SHARED;
7409 - protection_map[11] = PAGE_SHARED;
7410 + protection_map[9] = PAGE_READONLY_NOEXEC;
7411 + protection_map[10] = PAGE_SHARED_NOEXEC;
7412 + protection_map[11] = PAGE_SHARED_NOEXEC;
7413 protection_map[12] = PAGE_READONLY;
7414 protection_map[13] = PAGE_READONLY;
7415 protection_map[14] = PAGE_SHARED;
7416 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7417 index cbef74e..c38fead 100644
7418 --- a/arch/sparc/mm/srmmu.c
7419 +++ b/arch/sparc/mm/srmmu.c
7420 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7421 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7422 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7423 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7424 +
7425 +#ifdef CONFIG_PAX_PAGEEXEC
7426 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7427 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7428 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7429 +#endif
7430 +
7431 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7432 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7433
7434 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7435 index 27fe667..36d474c 100644
7436 --- a/arch/tile/include/asm/atomic_64.h
7437 +++ b/arch/tile/include/asm/atomic_64.h
7438 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7439
7440 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7441
7442 +#define atomic64_read_unchecked(v) atomic64_read(v)
7443 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7444 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7445 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7446 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7447 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7448 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7449 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7450 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7451 +
7452 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7453 #define smp_mb__before_atomic_dec() smp_mb()
7454 #define smp_mb__after_atomic_dec() smp_mb()
7455 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7456 index 392e533..536b092 100644
7457 --- a/arch/tile/include/asm/cache.h
7458 +++ b/arch/tile/include/asm/cache.h
7459 @@ -15,11 +15,12 @@
7460 #ifndef _ASM_TILE_CACHE_H
7461 #define _ASM_TILE_CACHE_H
7462
7463 +#include <linux/const.h>
7464 #include <arch/chip.h>
7465
7466 /* bytes per L1 data cache line */
7467 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7468 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7469 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7470
7471 /* bytes per L2 cache line */
7472 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7473 diff --git a/arch/um/Makefile b/arch/um/Makefile
7474 index 28688e6..4c0aa1c 100644
7475 --- a/arch/um/Makefile
7476 +++ b/arch/um/Makefile
7477 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7478 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7479 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7480
7481 +ifdef CONSTIFY_PLUGIN
7482 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7483 +endif
7484 +
7485 #This will adjust *FLAGS accordingly to the platform.
7486 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7487
7488 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7489 index 19e1bdd..3665b77 100644
7490 --- a/arch/um/include/asm/cache.h
7491 +++ b/arch/um/include/asm/cache.h
7492 @@ -1,6 +1,7 @@
7493 #ifndef __UM_CACHE_H
7494 #define __UM_CACHE_H
7495
7496 +#include <linux/const.h>
7497
7498 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7499 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7500 @@ -12,6 +13,6 @@
7501 # define L1_CACHE_SHIFT 5
7502 #endif
7503
7504 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7505 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7506
7507 #endif
7508 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7509 index 6c03acd..a5e0215 100644
7510 --- a/arch/um/include/asm/kmap_types.h
7511 +++ b/arch/um/include/asm/kmap_types.h
7512 @@ -23,6 +23,7 @@ enum km_type {
7513 KM_IRQ1,
7514 KM_SOFTIRQ0,
7515 KM_SOFTIRQ1,
7516 + KM_CLEARPAGE,
7517 KM_TYPE_NR
7518 };
7519
7520 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7521 index 7cfc3ce..cbd1a58 100644
7522 --- a/arch/um/include/asm/page.h
7523 +++ b/arch/um/include/asm/page.h
7524 @@ -14,6 +14,9 @@
7525 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7526 #define PAGE_MASK (~(PAGE_SIZE-1))
7527
7528 +#define ktla_ktva(addr) (addr)
7529 +#define ktva_ktla(addr) (addr)
7530 +
7531 #ifndef __ASSEMBLY__
7532
7533 struct page;
7534 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7535 index 0032f92..cd151e0 100644
7536 --- a/arch/um/include/asm/pgtable-3level.h
7537 +++ b/arch/um/include/asm/pgtable-3level.h
7538 @@ -58,6 +58,7 @@
7539 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7540 #define pud_populate(mm, pud, pmd) \
7541 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7542 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7543
7544 #ifdef CONFIG_64BIT
7545 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7546 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7547 index 69f2490..2634831 100644
7548 --- a/arch/um/kernel/process.c
7549 +++ b/arch/um/kernel/process.c
7550 @@ -408,22 +408,6 @@ int singlestepping(void * t)
7551 return 2;
7552 }
7553
7554 -/*
7555 - * Only x86 and x86_64 have an arch_align_stack().
7556 - * All other arches have "#define arch_align_stack(x) (x)"
7557 - * in their asm/system.h
7558 - * As this is included in UML from asm-um/system-generic.h,
7559 - * we can use it to behave as the subarch does.
7560 - */
7561 -#ifndef arch_align_stack
7562 -unsigned long arch_align_stack(unsigned long sp)
7563 -{
7564 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7565 - sp -= get_random_int() % 8192;
7566 - return sp & ~0xf;
7567 -}
7568 -#endif
7569 -
7570 unsigned long get_wchan(struct task_struct *p)
7571 {
7572 unsigned long stack_page, sp, ip;
7573 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7574 index ad8f795..2c7eec6 100644
7575 --- a/arch/unicore32/include/asm/cache.h
7576 +++ b/arch/unicore32/include/asm/cache.h
7577 @@ -12,8 +12,10 @@
7578 #ifndef __UNICORE_CACHE_H__
7579 #define __UNICORE_CACHE_H__
7580
7581 -#define L1_CACHE_SHIFT (5)
7582 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7583 +#include <linux/const.h>
7584 +
7585 +#define L1_CACHE_SHIFT 5
7586 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7587
7588 /*
7589 * Memory returned by kmalloc() may be used for DMA, so we must make
7590 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7591 index 5bed94e..fbcf200 100644
7592 --- a/arch/x86/Kconfig
7593 +++ b/arch/x86/Kconfig
7594 @@ -226,7 +226,7 @@ config X86_HT
7595
7596 config X86_32_LAZY_GS
7597 def_bool y
7598 - depends on X86_32 && !CC_STACKPROTECTOR
7599 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7600
7601 config ARCH_HWEIGHT_CFLAGS
7602 string
7603 @@ -1058,7 +1058,7 @@ choice
7604
7605 config NOHIGHMEM
7606 bool "off"
7607 - depends on !X86_NUMAQ
7608 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7609 ---help---
7610 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7611 However, the address space of 32-bit x86 processors is only 4
7612 @@ -1095,7 +1095,7 @@ config NOHIGHMEM
7613
7614 config HIGHMEM4G
7615 bool "4GB"
7616 - depends on !X86_NUMAQ
7617 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7618 ---help---
7619 Select this if you have a 32-bit processor and between 1 and 4
7620 gigabytes of physical RAM.
7621 @@ -1149,7 +1149,7 @@ config PAGE_OFFSET
7622 hex
7623 default 0xB0000000 if VMSPLIT_3G_OPT
7624 default 0x80000000 if VMSPLIT_2G
7625 - default 0x78000000 if VMSPLIT_2G_OPT
7626 + default 0x70000000 if VMSPLIT_2G_OPT
7627 default 0x40000000 if VMSPLIT_1G
7628 default 0xC0000000
7629 depends on X86_32
7630 @@ -1539,6 +1539,7 @@ config SECCOMP
7631
7632 config CC_STACKPROTECTOR
7633 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7634 + depends on X86_64 || !PAX_MEMORY_UDEREF
7635 ---help---
7636 This option turns on the -fstack-protector GCC feature. This
7637 feature puts, at the beginning of functions, a canary value on
7638 @@ -1596,6 +1597,7 @@ config KEXEC_JUMP
7639 config PHYSICAL_START
7640 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7641 default "0x1000000"
7642 + range 0x400000 0x40000000
7643 ---help---
7644 This gives the physical address where the kernel is loaded.
7645
7646 @@ -1659,6 +1661,7 @@ config X86_NEED_RELOCS
7647 config PHYSICAL_ALIGN
7648 hex "Alignment value to which kernel should be aligned" if X86_32
7649 default "0x1000000"
7650 + range 0x400000 0x1000000 if PAX_KERNEXEC
7651 range 0x2000 0x1000000
7652 ---help---
7653 This value puts the alignment restrictions on physical address
7654 @@ -1690,9 +1693,10 @@ config HOTPLUG_CPU
7655 Say N if you want to disable CPU hotplug.
7656
7657 config COMPAT_VDSO
7658 - def_bool y
7659 + def_bool n
7660 prompt "Compat VDSO support"
7661 depends on X86_32 || IA32_EMULATION
7662 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7663 ---help---
7664 Map the 32-bit VDSO to the predictable old-style address too.
7665
7666 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7667 index 3c57033..22d44aa 100644
7668 --- a/arch/x86/Kconfig.cpu
7669 +++ b/arch/x86/Kconfig.cpu
7670 @@ -335,7 +335,7 @@ config X86_PPRO_FENCE
7671
7672 config X86_F00F_BUG
7673 def_bool y
7674 - depends on M586MMX || M586TSC || M586 || M486 || M386
7675 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7676
7677 config X86_INVD_BUG
7678 def_bool y
7679 @@ -359,7 +359,7 @@ config X86_POPAD_OK
7680
7681 config X86_ALIGNMENT_16
7682 def_bool y
7683 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7684 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7685
7686 config X86_INTEL_USERCOPY
7687 def_bool y
7688 @@ -405,7 +405,7 @@ config X86_CMPXCHG64
7689 # generates cmov.
7690 config X86_CMOV
7691 def_bool y
7692 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7693 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7694
7695 config X86_MINIMUM_CPU_FAMILY
7696 int
7697 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7698 index e46c214..7c72b55 100644
7699 --- a/arch/x86/Kconfig.debug
7700 +++ b/arch/x86/Kconfig.debug
7701 @@ -84,7 +84,7 @@ config X86_PTDUMP
7702 config DEBUG_RODATA
7703 bool "Write protect kernel read-only data structures"
7704 default y
7705 - depends on DEBUG_KERNEL
7706 + depends on DEBUG_KERNEL && BROKEN
7707 ---help---
7708 Mark the kernel read-only data as write-protected in the pagetables,
7709 in order to catch accidental (and incorrect) writes to such const
7710 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7711
7712 config DEBUG_SET_MODULE_RONX
7713 bool "Set loadable kernel module data as NX and text as RO"
7714 - depends on MODULES
7715 + depends on MODULES && BROKEN
7716 ---help---
7717 This option helps catch unintended modifications to loadable
7718 kernel module's text and read-only data. It also prevents execution
7719 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7720 index 209ba12..15140db 100644
7721 --- a/arch/x86/Makefile
7722 +++ b/arch/x86/Makefile
7723 @@ -46,6 +46,7 @@ else
7724 UTS_MACHINE := x86_64
7725 CHECKFLAGS += -D__x86_64__ -m64
7726
7727 + biarch := $(call cc-option,-m64)
7728 KBUILD_AFLAGS += -m64
7729 KBUILD_CFLAGS += -m64
7730
7731 @@ -201,3 +202,12 @@ define archhelp
7732 echo ' FDARGS="..." arguments for the booted kernel'
7733 echo ' FDINITRD=file initrd for the booted kernel'
7734 endef
7735 +
7736 +define OLD_LD
7737 +
7738 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7739 +*** Please upgrade your binutils to 2.18 or newer
7740 +endef
7741 +
7742 +archprepare:
7743 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7744 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7745 index 5a747dd..ff7b12c 100644
7746 --- a/arch/x86/boot/Makefile
7747 +++ b/arch/x86/boot/Makefile
7748 @@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7749 $(call cc-option, -fno-stack-protector) \
7750 $(call cc-option, -mpreferred-stack-boundary=2)
7751 KBUILD_CFLAGS += $(call cc-option, -m32)
7752 +ifdef CONSTIFY_PLUGIN
7753 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7754 +endif
7755 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7756 GCOV_PROFILE := n
7757
7758 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7759 index 878e4b9..20537ab 100644
7760 --- a/arch/x86/boot/bitops.h
7761 +++ b/arch/x86/boot/bitops.h
7762 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7763 u8 v;
7764 const u32 *p = (const u32 *)addr;
7765
7766 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7767 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7768 return v;
7769 }
7770
7771 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7772
7773 static inline void set_bit(int nr, void *addr)
7774 {
7775 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7776 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7777 }
7778
7779 #endif /* BOOT_BITOPS_H */
7780 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7781 index c7093bd..d4247ffe0 100644
7782 --- a/arch/x86/boot/boot.h
7783 +++ b/arch/x86/boot/boot.h
7784 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7785 static inline u16 ds(void)
7786 {
7787 u16 seg;
7788 - asm("movw %%ds,%0" : "=rm" (seg));
7789 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7790 return seg;
7791 }
7792
7793 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7794 static inline int memcmp(const void *s1, const void *s2, size_t len)
7795 {
7796 u8 diff;
7797 - asm("repe; cmpsb; setnz %0"
7798 + asm volatile("repe; cmpsb; setnz %0"
7799 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7800 return diff;
7801 }
7802 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7803 index fd55a2f..217b501 100644
7804 --- a/arch/x86/boot/compressed/Makefile
7805 +++ b/arch/x86/boot/compressed/Makefile
7806 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7807 KBUILD_CFLAGS += $(cflags-y)
7808 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7809 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7810 +ifdef CONSTIFY_PLUGIN
7811 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7812 +endif
7813
7814 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7815 GCOV_PROFILE := n
7816 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7817 index 0cdfc0d..6e79437 100644
7818 --- a/arch/x86/boot/compressed/eboot.c
7819 +++ b/arch/x86/boot/compressed/eboot.c
7820 @@ -122,7 +122,6 @@ again:
7821 *addr = max_addr;
7822 }
7823
7824 -free_pool:
7825 efi_call_phys1(sys_table->boottime->free_pool, map);
7826
7827 fail:
7828 @@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7829 if (i == map_size / desc_size)
7830 status = EFI_NOT_FOUND;
7831
7832 -free_pool:
7833 efi_call_phys1(sys_table->boottime->free_pool, map);
7834 fail:
7835 return status;
7836 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7837 index c85e3ac..6f5aa80 100644
7838 --- a/arch/x86/boot/compressed/head_32.S
7839 +++ b/arch/x86/boot/compressed/head_32.S
7840 @@ -106,7 +106,7 @@ preferred_addr:
7841 notl %eax
7842 andl %eax, %ebx
7843 #else
7844 - movl $LOAD_PHYSICAL_ADDR, %ebx
7845 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7846 #endif
7847
7848 /* Target address to relocate to for decompression */
7849 @@ -192,7 +192,7 @@ relocated:
7850 * and where it was actually loaded.
7851 */
7852 movl %ebp, %ebx
7853 - subl $LOAD_PHYSICAL_ADDR, %ebx
7854 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7855 jz 2f /* Nothing to be done if loaded at compiled addr. */
7856 /*
7857 * Process relocations.
7858 @@ -200,8 +200,7 @@ relocated:
7859
7860 1: subl $4, %edi
7861 movl (%edi), %ecx
7862 - testl %ecx, %ecx
7863 - jz 2f
7864 + jecxz 2f
7865 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7866 jmp 1b
7867 2:
7868 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7869 index 87e03a1..0d94c76 100644
7870 --- a/arch/x86/boot/compressed/head_64.S
7871 +++ b/arch/x86/boot/compressed/head_64.S
7872 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7873 notl %eax
7874 andl %eax, %ebx
7875 #else
7876 - movl $LOAD_PHYSICAL_ADDR, %ebx
7877 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7878 #endif
7879
7880 /* Target address to relocate to for decompression */
7881 @@ -263,7 +263,7 @@ preferred_addr:
7882 notq %rax
7883 andq %rax, %rbp
7884 #else
7885 - movq $LOAD_PHYSICAL_ADDR, %rbp
7886 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7887 #endif
7888
7889 /* Target address to relocate to for decompression */
7890 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7891 index 7116dcb..d9ae1d7 100644
7892 --- a/arch/x86/boot/compressed/misc.c
7893 +++ b/arch/x86/boot/compressed/misc.c
7894 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7895 case PT_LOAD:
7896 #ifdef CONFIG_RELOCATABLE
7897 dest = output;
7898 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7899 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7900 #else
7901 dest = (void *)(phdr->p_paddr);
7902 #endif
7903 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7904 error("Destination address too large");
7905 #endif
7906 #ifndef CONFIG_RELOCATABLE
7907 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7908 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7909 error("Wrong destination address");
7910 #endif
7911
7912 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7913 index e77f4e4..17e511f 100644
7914 --- a/arch/x86/boot/compressed/relocs.c
7915 +++ b/arch/x86/boot/compressed/relocs.c
7916 @@ -13,8 +13,11 @@
7917
7918 static void die(char *fmt, ...);
7919
7920 +#include "../../../../include/generated/autoconf.h"
7921 +
7922 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7923 static Elf32_Ehdr ehdr;
7924 +static Elf32_Phdr *phdr;
7925 static unsigned long reloc_count, reloc_idx;
7926 static unsigned long *relocs;
7927
7928 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7929 }
7930 }
7931
7932 +static void read_phdrs(FILE *fp)
7933 +{
7934 + unsigned int i;
7935 +
7936 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7937 + if (!phdr) {
7938 + die("Unable to allocate %d program headers\n",
7939 + ehdr.e_phnum);
7940 + }
7941 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7942 + die("Seek to %d failed: %s\n",
7943 + ehdr.e_phoff, strerror(errno));
7944 + }
7945 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7946 + die("Cannot read ELF program headers: %s\n",
7947 + strerror(errno));
7948 + }
7949 + for(i = 0; i < ehdr.e_phnum; i++) {
7950 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7951 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7952 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7953 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7954 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7955 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7956 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7957 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7958 + }
7959 +
7960 +}
7961 +
7962 static void read_shdrs(FILE *fp)
7963 {
7964 - int i;
7965 + unsigned int i;
7966 Elf32_Shdr shdr;
7967
7968 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7969 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7970
7971 static void read_strtabs(FILE *fp)
7972 {
7973 - int i;
7974 + unsigned int i;
7975 for (i = 0; i < ehdr.e_shnum; i++) {
7976 struct section *sec = &secs[i];
7977 if (sec->shdr.sh_type != SHT_STRTAB) {
7978 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7979
7980 static void read_symtabs(FILE *fp)
7981 {
7982 - int i,j;
7983 + unsigned int i,j;
7984 for (i = 0; i < ehdr.e_shnum; i++) {
7985 struct section *sec = &secs[i];
7986 if (sec->shdr.sh_type != SHT_SYMTAB) {
7987 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7988
7989 static void read_relocs(FILE *fp)
7990 {
7991 - int i,j;
7992 + unsigned int i,j;
7993 + uint32_t base;
7994 +
7995 for (i = 0; i < ehdr.e_shnum; i++) {
7996 struct section *sec = &secs[i];
7997 if (sec->shdr.sh_type != SHT_REL) {
7998 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7999 die("Cannot read symbol table: %s\n",
8000 strerror(errno));
8001 }
8002 + base = 0;
8003 + for (j = 0; j < ehdr.e_phnum; j++) {
8004 + if (phdr[j].p_type != PT_LOAD )
8005 + continue;
8006 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
8007 + continue;
8008 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
8009 + break;
8010 + }
8011 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
8012 Elf32_Rel *rel = &sec->reltab[j];
8013 - rel->r_offset = elf32_to_cpu(rel->r_offset);
8014 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
8015 rel->r_info = elf32_to_cpu(rel->r_info);
8016 }
8017 }
8018 @@ -396,13 +440,13 @@ static void read_relocs(FILE *fp)
8019
8020 static void print_absolute_symbols(void)
8021 {
8022 - int i;
8023 + unsigned int i;
8024 printf("Absolute symbols\n");
8025 printf(" Num: Value Size Type Bind Visibility Name\n");
8026 for (i = 0; i < ehdr.e_shnum; i++) {
8027 struct section *sec = &secs[i];
8028 char *sym_strtab;
8029 - int j;
8030 + unsigned int j;
8031
8032 if (sec->shdr.sh_type != SHT_SYMTAB) {
8033 continue;
8034 @@ -429,14 +473,14 @@ static void print_absolute_symbols(void)
8035
8036 static void print_absolute_relocs(void)
8037 {
8038 - int i, printed = 0;
8039 + unsigned int i, printed = 0;
8040
8041 for (i = 0; i < ehdr.e_shnum; i++) {
8042 struct section *sec = &secs[i];
8043 struct section *sec_applies, *sec_symtab;
8044 char *sym_strtab;
8045 Elf32_Sym *sh_symtab;
8046 - int j;
8047 + unsigned int j;
8048 if (sec->shdr.sh_type != SHT_REL) {
8049 continue;
8050 }
8051 @@ -497,13 +541,13 @@ static void print_absolute_relocs(void)
8052
8053 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8054 {
8055 - int i;
8056 + unsigned int i;
8057 /* Walk through the relocations */
8058 for (i = 0; i < ehdr.e_shnum; i++) {
8059 char *sym_strtab;
8060 Elf32_Sym *sh_symtab;
8061 struct section *sec_applies, *sec_symtab;
8062 - int j;
8063 + unsigned int j;
8064 struct section *sec = &secs[i];
8065
8066 if (sec->shdr.sh_type != SHT_REL) {
8067 @@ -528,6 +572,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8068 !is_rel_reloc(sym_name(sym_strtab, sym))) {
8069 continue;
8070 }
8071 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
8072 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
8073 + continue;
8074 +
8075 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
8076 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
8077 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
8078 + continue;
8079 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
8080 + continue;
8081 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
8082 + continue;
8083 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
8084 + continue;
8085 +#endif
8086 +
8087 switch (r_type) {
8088 case R_386_NONE:
8089 case R_386_PC32:
8090 @@ -569,7 +629,7 @@ static int cmp_relocs(const void *va, const void *vb)
8091
8092 static void emit_relocs(int as_text)
8093 {
8094 - int i;
8095 + unsigned int i;
8096 /* Count how many relocations I have and allocate space for them. */
8097 reloc_count = 0;
8098 walk_relocs(count_reloc);
8099 @@ -663,6 +723,7 @@ int main(int argc, char **argv)
8100 fname, strerror(errno));
8101 }
8102 read_ehdr(fp);
8103 + read_phdrs(fp);
8104 read_shdrs(fp);
8105 read_strtabs(fp);
8106 read_symtabs(fp);
8107 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8108 index 4d3ff03..e4972ff 100644
8109 --- a/arch/x86/boot/cpucheck.c
8110 +++ b/arch/x86/boot/cpucheck.c
8111 @@ -74,7 +74,7 @@ static int has_fpu(void)
8112 u16 fcw = -1, fsw = -1;
8113 u32 cr0;
8114
8115 - asm("movl %%cr0,%0" : "=r" (cr0));
8116 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
8117 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8118 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8119 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8120 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8121 {
8122 u32 f0, f1;
8123
8124 - asm("pushfl ; "
8125 + asm volatile("pushfl ; "
8126 "pushfl ; "
8127 "popl %0 ; "
8128 "movl %0,%1 ; "
8129 @@ -115,7 +115,7 @@ static void get_flags(void)
8130 set_bit(X86_FEATURE_FPU, cpu.flags);
8131
8132 if (has_eflag(X86_EFLAGS_ID)) {
8133 - asm("cpuid"
8134 + asm volatile("cpuid"
8135 : "=a" (max_intel_level),
8136 "=b" (cpu_vendor[0]),
8137 "=d" (cpu_vendor[1]),
8138 @@ -124,7 +124,7 @@ static void get_flags(void)
8139
8140 if (max_intel_level >= 0x00000001 &&
8141 max_intel_level <= 0x0000ffff) {
8142 - asm("cpuid"
8143 + asm volatile("cpuid"
8144 : "=a" (tfms),
8145 "=c" (cpu.flags[4]),
8146 "=d" (cpu.flags[0])
8147 @@ -136,7 +136,7 @@ static void get_flags(void)
8148 cpu.model += ((tfms >> 16) & 0xf) << 4;
8149 }
8150
8151 - asm("cpuid"
8152 + asm volatile("cpuid"
8153 : "=a" (max_amd_level)
8154 : "a" (0x80000000)
8155 : "ebx", "ecx", "edx");
8156 @@ -144,7 +144,7 @@ static void get_flags(void)
8157 if (max_amd_level >= 0x80000001 &&
8158 max_amd_level <= 0x8000ffff) {
8159 u32 eax = 0x80000001;
8160 - asm("cpuid"
8161 + asm volatile("cpuid"
8162 : "+a" (eax),
8163 "=c" (cpu.flags[6]),
8164 "=d" (cpu.flags[1])
8165 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8166 u32 ecx = MSR_K7_HWCR;
8167 u32 eax, edx;
8168
8169 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8170 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8171 eax &= ~(1 << 15);
8172 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8173 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8174
8175 get_flags(); /* Make sure it really did something */
8176 err = check_flags();
8177 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8178 u32 ecx = MSR_VIA_FCR;
8179 u32 eax, edx;
8180
8181 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8182 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8183 eax |= (1<<1)|(1<<7);
8184 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8185 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8186
8187 set_bit(X86_FEATURE_CX8, cpu.flags);
8188 err = check_flags();
8189 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8190 u32 eax, edx;
8191 u32 level = 1;
8192
8193 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8194 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8195 - asm("cpuid"
8196 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8197 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8198 + asm volatile("cpuid"
8199 : "+a" (level), "=d" (cpu.flags[0])
8200 : : "ecx", "ebx");
8201 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8202 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8203
8204 err = check_flags();
8205 }
8206 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8207 index f1bbeeb..aff09cb 100644
8208 --- a/arch/x86/boot/header.S
8209 +++ b/arch/x86/boot/header.S
8210 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8211 # single linked list of
8212 # struct setup_data
8213
8214 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8215 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8216
8217 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8218 #define VO_INIT_SIZE (VO__end - VO__text)
8219 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8220 index db75d07..8e6d0af 100644
8221 --- a/arch/x86/boot/memory.c
8222 +++ b/arch/x86/boot/memory.c
8223 @@ -19,7 +19,7 @@
8224
8225 static int detect_memory_e820(void)
8226 {
8227 - int count = 0;
8228 + unsigned int count = 0;
8229 struct biosregs ireg, oreg;
8230 struct e820entry *desc = boot_params.e820_map;
8231 static struct e820entry buf; /* static so it is zeroed */
8232 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8233 index 11e8c6e..fdbb1ed 100644
8234 --- a/arch/x86/boot/video-vesa.c
8235 +++ b/arch/x86/boot/video-vesa.c
8236 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8237
8238 boot_params.screen_info.vesapm_seg = oreg.es;
8239 boot_params.screen_info.vesapm_off = oreg.di;
8240 + boot_params.screen_info.vesapm_size = oreg.cx;
8241 }
8242
8243 /*
8244 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8245 index 43eda28..5ab5fdb 100644
8246 --- a/arch/x86/boot/video.c
8247 +++ b/arch/x86/boot/video.c
8248 @@ -96,7 +96,7 @@ static void store_mode_params(void)
8249 static unsigned int get_entry(void)
8250 {
8251 char entry_buf[4];
8252 - int i, len = 0;
8253 + unsigned int i, len = 0;
8254 int key;
8255 unsigned int v;
8256
8257 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8258 index 5b577d5..3c1fed4 100644
8259 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8260 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8261 @@ -8,6 +8,8 @@
8262 * including this sentence is retained in full.
8263 */
8264
8265 +#include <asm/alternative-asm.h>
8266 +
8267 .extern crypto_ft_tab
8268 .extern crypto_it_tab
8269 .extern crypto_fl_tab
8270 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8271 je B192; \
8272 leaq 32(r9),r9;
8273
8274 +#define ret pax_force_retaddr 0, 1; ret
8275 +
8276 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8277 movq r1,r2; \
8278 movq r3,r4; \
8279 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8280 index be6d9e3..21fbbca 100644
8281 --- a/arch/x86/crypto/aesni-intel_asm.S
8282 +++ b/arch/x86/crypto/aesni-intel_asm.S
8283 @@ -31,6 +31,7 @@
8284
8285 #include <linux/linkage.h>
8286 #include <asm/inst.h>
8287 +#include <asm/alternative-asm.h>
8288
8289 #ifdef __x86_64__
8290 .data
8291 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8292 pop %r14
8293 pop %r13
8294 pop %r12
8295 + pax_force_retaddr 0, 1
8296 ret
8297 +ENDPROC(aesni_gcm_dec)
8298
8299
8300 /*****************************************************************************
8301 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8302 pop %r14
8303 pop %r13
8304 pop %r12
8305 + pax_force_retaddr 0, 1
8306 ret
8307 +ENDPROC(aesni_gcm_enc)
8308
8309 #endif
8310
8311 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
8312 pxor %xmm1, %xmm0
8313 movaps %xmm0, (TKEYP)
8314 add $0x10, TKEYP
8315 + pax_force_retaddr_bts
8316 ret
8317
8318 .align 4
8319 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
8320 shufps $0b01001110, %xmm2, %xmm1
8321 movaps %xmm1, 0x10(TKEYP)
8322 add $0x20, TKEYP
8323 + pax_force_retaddr_bts
8324 ret
8325
8326 .align 4
8327 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
8328
8329 movaps %xmm0, (TKEYP)
8330 add $0x10, TKEYP
8331 + pax_force_retaddr_bts
8332 ret
8333
8334 .align 4
8335 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
8336 pxor %xmm1, %xmm2
8337 movaps %xmm2, (TKEYP)
8338 add $0x10, TKEYP
8339 + pax_force_retaddr_bts
8340 ret
8341
8342 /*
8343 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8344 #ifndef __x86_64__
8345 popl KEYP
8346 #endif
8347 + pax_force_retaddr 0, 1
8348 ret
8349 +ENDPROC(aesni_set_key)
8350
8351 /*
8352 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8353 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8354 popl KLEN
8355 popl KEYP
8356 #endif
8357 + pax_force_retaddr 0, 1
8358 ret
8359 +ENDPROC(aesni_enc)
8360
8361 /*
8362 * _aesni_enc1: internal ABI
8363 @@ -1959,6 +1972,7 @@ _aesni_enc1:
8364 AESENC KEY STATE
8365 movaps 0x70(TKEYP), KEY
8366 AESENCLAST KEY STATE
8367 + pax_force_retaddr_bts
8368 ret
8369
8370 /*
8371 @@ -2067,6 +2081,7 @@ _aesni_enc4:
8372 AESENCLAST KEY STATE2
8373 AESENCLAST KEY STATE3
8374 AESENCLAST KEY STATE4
8375 + pax_force_retaddr_bts
8376 ret
8377
8378 /*
8379 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8380 popl KLEN
8381 popl KEYP
8382 #endif
8383 + pax_force_retaddr 0, 1
8384 ret
8385 +ENDPROC(aesni_dec)
8386
8387 /*
8388 * _aesni_dec1: internal ABI
8389 @@ -2146,6 +2163,7 @@ _aesni_dec1:
8390 AESDEC KEY STATE
8391 movaps 0x70(TKEYP), KEY
8392 AESDECLAST KEY STATE
8393 + pax_force_retaddr_bts
8394 ret
8395
8396 /*
8397 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8398 AESDECLAST KEY STATE2
8399 AESDECLAST KEY STATE3
8400 AESDECLAST KEY STATE4
8401 + pax_force_retaddr_bts
8402 ret
8403
8404 /*
8405 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8406 popl KEYP
8407 popl LEN
8408 #endif
8409 + pax_force_retaddr 0, 1
8410 ret
8411 +ENDPROC(aesni_ecb_enc)
8412
8413 /*
8414 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8415 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8416 popl KEYP
8417 popl LEN
8418 #endif
8419 + pax_force_retaddr 0, 1
8420 ret
8421 +ENDPROC(aesni_ecb_dec)
8422
8423 /*
8424 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8425 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8426 popl LEN
8427 popl IVP
8428 #endif
8429 + pax_force_retaddr 0, 1
8430 ret
8431 +ENDPROC(aesni_cbc_enc)
8432
8433 /*
8434 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8435 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
8436 popl LEN
8437 popl IVP
8438 #endif
8439 + pax_force_retaddr 0, 1
8440 ret
8441 +ENDPROC(aesni_cbc_dec)
8442
8443 #ifdef __x86_64__
8444 .align 16
8445 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
8446 mov $1, TCTR_LOW
8447 MOVQ_R64_XMM TCTR_LOW INC
8448 MOVQ_R64_XMM CTR TCTR_LOW
8449 + pax_force_retaddr_bts
8450 ret
8451
8452 /*
8453 @@ -2552,6 +2580,7 @@ _aesni_inc:
8454 .Linc_low:
8455 movaps CTR, IV
8456 PSHUFB_XMM BSWAP_MASK IV
8457 + pax_force_retaddr_bts
8458 ret
8459
8460 /*
8461 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
8462 .Lctr_enc_ret:
8463 movups IV, (IVP)
8464 .Lctr_enc_just_ret:
8465 + pax_force_retaddr 0, 1
8466 ret
8467 +ENDPROC(aesni_ctr_enc)
8468 #endif
8469 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8470 index 391d245..67f35c2 100644
8471 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8472 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8473 @@ -20,6 +20,8 @@
8474 *
8475 */
8476
8477 +#include <asm/alternative-asm.h>
8478 +
8479 .file "blowfish-x86_64-asm.S"
8480 .text
8481
8482 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8483 jnz __enc_xor;
8484
8485 write_block();
8486 + pax_force_retaddr 0, 1
8487 ret;
8488 __enc_xor:
8489 xor_block();
8490 + pax_force_retaddr 0, 1
8491 ret;
8492
8493 .align 8
8494 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8495
8496 movq %r11, %rbp;
8497
8498 + pax_force_retaddr 0, 1
8499 ret;
8500
8501 /**********************************************************************
8502 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8503
8504 popq %rbx;
8505 popq %rbp;
8506 + pax_force_retaddr 0, 1
8507 ret;
8508
8509 __enc_xor4:
8510 @@ -349,6 +355,7 @@ __enc_xor4:
8511
8512 popq %rbx;
8513 popq %rbp;
8514 + pax_force_retaddr 0, 1
8515 ret;
8516
8517 .align 8
8518 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8519 popq %rbx;
8520 popq %rbp;
8521
8522 + pax_force_retaddr 0, 1
8523 ret;
8524
8525 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8526 index 6214a9b..1f4fc9a 100644
8527 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8528 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8529 @@ -1,3 +1,5 @@
8530 +#include <asm/alternative-asm.h>
8531 +
8532 # enter ECRYPT_encrypt_bytes
8533 .text
8534 .p2align 5
8535 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8536 add %r11,%rsp
8537 mov %rdi,%rax
8538 mov %rsi,%rdx
8539 + pax_force_retaddr 0, 1
8540 ret
8541 # bytesatleast65:
8542 ._bytesatleast65:
8543 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8544 add %r11,%rsp
8545 mov %rdi,%rax
8546 mov %rsi,%rdx
8547 + pax_force_retaddr
8548 ret
8549 # enter ECRYPT_ivsetup
8550 .text
8551 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8552 add %r11,%rsp
8553 mov %rdi,%rax
8554 mov %rsi,%rdx
8555 + pax_force_retaddr
8556 ret
8557 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8558 index 7f24a15..9cd3ffe 100644
8559 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8560 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8561 @@ -24,6 +24,8 @@
8562 *
8563 */
8564
8565 +#include <asm/alternative-asm.h>
8566 +
8567 .file "serpent-sse2-x86_64-asm_64.S"
8568 .text
8569
8570 @@ -695,12 +697,14 @@ __serpent_enc_blk_8way:
8571 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8572 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8573
8574 + pax_force_retaddr
8575 ret;
8576
8577 __enc_xor8:
8578 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8579 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8580
8581 + pax_force_retaddr
8582 ret;
8583
8584 .align 8
8585 @@ -758,4 +762,5 @@ serpent_dec_blk_8way:
8586 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8587 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8588
8589 + pax_force_retaddr
8590 ret;
8591 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8592 index b2c2f57..8470cab 100644
8593 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8594 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8595 @@ -28,6 +28,8 @@
8596 * (at your option) any later version.
8597 */
8598
8599 +#include <asm/alternative-asm.h>
8600 +
8601 #define CTX %rdi // arg1
8602 #define BUF %rsi // arg2
8603 #define CNT %rdx // arg3
8604 @@ -104,6 +106,7 @@
8605 pop %r12
8606 pop %rbp
8607 pop %rbx
8608 + pax_force_retaddr 0, 1
8609 ret
8610
8611 .size \name, .-\name
8612 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8613 index 5b012a2..36d5364 100644
8614 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8615 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8616 @@ -20,6 +20,8 @@
8617 *
8618 */
8619
8620 +#include <asm/alternative-asm.h>
8621 +
8622 .file "twofish-x86_64-asm-3way.S"
8623 .text
8624
8625 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8626 popq %r13;
8627 popq %r14;
8628 popq %r15;
8629 + pax_force_retaddr 0, 1
8630 ret;
8631
8632 __enc_xor3:
8633 @@ -271,6 +274,7 @@ __enc_xor3:
8634 popq %r13;
8635 popq %r14;
8636 popq %r15;
8637 + pax_force_retaddr 0, 1
8638 ret;
8639
8640 .global twofish_dec_blk_3way
8641 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8642 popq %r13;
8643 popq %r14;
8644 popq %r15;
8645 + pax_force_retaddr 0, 1
8646 ret;
8647
8648 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8649 index 7bcf3fc..f53832f 100644
8650 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8651 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8652 @@ -21,6 +21,7 @@
8653 .text
8654
8655 #include <asm/asm-offsets.h>
8656 +#include <asm/alternative-asm.h>
8657
8658 #define a_offset 0
8659 #define b_offset 4
8660 @@ -268,6 +269,7 @@ twofish_enc_blk:
8661
8662 popq R1
8663 movq $1,%rax
8664 + pax_force_retaddr 0, 1
8665 ret
8666
8667 twofish_dec_blk:
8668 @@ -319,4 +321,5 @@ twofish_dec_blk:
8669
8670 popq R1
8671 movq $1,%rax
8672 + pax_force_retaddr 0, 1
8673 ret
8674 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8675 index 39e4909..887aa7e 100644
8676 --- a/arch/x86/ia32/ia32_aout.c
8677 +++ b/arch/x86/ia32/ia32_aout.c
8678 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8679 unsigned long dump_start, dump_size;
8680 struct user32 dump;
8681
8682 + memset(&dump, 0, sizeof(dump));
8683 +
8684 fs = get_fs();
8685 set_fs(KERNEL_DS);
8686 has_dumped = 1;
8687 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8688 index 6557769..ef6ae89 100644
8689 --- a/arch/x86/ia32/ia32_signal.c
8690 +++ b/arch/x86/ia32/ia32_signal.c
8691 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8692 }
8693 seg = get_fs();
8694 set_fs(KERNEL_DS);
8695 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8696 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8697 set_fs(seg);
8698 if (ret >= 0 && uoss_ptr) {
8699 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8700 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8701 */
8702 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8703 size_t frame_size,
8704 - void **fpstate)
8705 + void __user **fpstate)
8706 {
8707 unsigned long sp;
8708
8709 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8710
8711 if (used_math()) {
8712 sp = sp - sig_xstate_ia32_size;
8713 - *fpstate = (struct _fpstate_ia32 *) sp;
8714 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8715 if (save_i387_xstate_ia32(*fpstate) < 0)
8716 return (void __user *) -1L;
8717 }
8718 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8719 sp -= frame_size;
8720 /* Align the stack pointer according to the i386 ABI,
8721 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8722 - sp = ((sp + 4) & -16ul) - 4;
8723 + sp = ((sp - 12) & -16ul) - 4;
8724 return (void __user *) sp;
8725 }
8726
8727 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8728 * These are actually not used anymore, but left because some
8729 * gdb versions depend on them as a marker.
8730 */
8731 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8732 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8733 } put_user_catch(err);
8734
8735 if (err)
8736 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8737 0xb8,
8738 __NR_ia32_rt_sigreturn,
8739 0x80cd,
8740 - 0,
8741 + 0
8742 };
8743
8744 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8745 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8746
8747 if (ka->sa.sa_flags & SA_RESTORER)
8748 restorer = ka->sa.sa_restorer;
8749 + else if (current->mm->context.vdso)
8750 + /* Return stub is in 32bit vsyscall page */
8751 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8752 else
8753 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8754 - rt_sigreturn);
8755 + restorer = &frame->retcode;
8756 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8757
8758 /*
8759 * Not actually used anymore, but left because some gdb
8760 * versions need it.
8761 */
8762 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8763 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8764 } put_user_catch(err);
8765
8766 if (err)
8767 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8768 index e3e7340..05ed805 100644
8769 --- a/arch/x86/ia32/ia32entry.S
8770 +++ b/arch/x86/ia32/ia32entry.S
8771 @@ -13,8 +13,10 @@
8772 #include <asm/thread_info.h>
8773 #include <asm/segment.h>
8774 #include <asm/irqflags.h>
8775 +#include <asm/pgtable.h>
8776 #include <linux/linkage.h>
8777 #include <linux/err.h>
8778 +#include <asm/alternative-asm.h>
8779
8780 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8781 #include <linux/elf-em.h>
8782 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8783 ENDPROC(native_irq_enable_sysexit)
8784 #endif
8785
8786 + .macro pax_enter_kernel_user
8787 + pax_set_fptr_mask
8788 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8789 + call pax_enter_kernel_user
8790 +#endif
8791 + .endm
8792 +
8793 + .macro pax_exit_kernel_user
8794 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8795 + call pax_exit_kernel_user
8796 +#endif
8797 +#ifdef CONFIG_PAX_RANDKSTACK
8798 + pushq %rax
8799 + pushq %r11
8800 + call pax_randomize_kstack
8801 + popq %r11
8802 + popq %rax
8803 +#endif
8804 + .endm
8805 +
8806 +.macro pax_erase_kstack
8807 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8808 + call pax_erase_kstack
8809 +#endif
8810 +.endm
8811 +
8812 /*
8813 * 32bit SYSENTER instruction entry.
8814 *
8815 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8816 CFI_REGISTER rsp,rbp
8817 SWAPGS_UNSAFE_STACK
8818 movq PER_CPU_VAR(kernel_stack), %rsp
8819 - addq $(KERNEL_STACK_OFFSET),%rsp
8820 - /*
8821 - * No need to follow this irqs on/off section: the syscall
8822 - * disabled irqs, here we enable it straight after entry:
8823 - */
8824 - ENABLE_INTERRUPTS(CLBR_NONE)
8825 movl %ebp,%ebp /* zero extension */
8826 pushq_cfi $__USER32_DS
8827 /*CFI_REL_OFFSET ss,0*/
8828 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8829 CFI_REL_OFFSET rsp,0
8830 pushfq_cfi
8831 /*CFI_REL_OFFSET rflags,0*/
8832 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8833 - CFI_REGISTER rip,r10
8834 + orl $X86_EFLAGS_IF,(%rsp)
8835 + GET_THREAD_INFO(%r11)
8836 + movl TI_sysenter_return(%r11), %r11d
8837 + CFI_REGISTER rip,r11
8838 pushq_cfi $__USER32_CS
8839 /*CFI_REL_OFFSET cs,0*/
8840 movl %eax, %eax
8841 - pushq_cfi %r10
8842 + pushq_cfi %r11
8843 CFI_REL_OFFSET rip,0
8844 pushq_cfi %rax
8845 cld
8846 SAVE_ARGS 0,1,0
8847 + pax_enter_kernel_user
8848 + /*
8849 + * No need to follow this irqs on/off section: the syscall
8850 + * disabled irqs, here we enable it straight after entry:
8851 + */
8852 + ENABLE_INTERRUPTS(CLBR_NONE)
8853 /* no need to do an access_ok check here because rbp has been
8854 32bit zero extended */
8855 +
8856 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8857 + mov $PAX_USER_SHADOW_BASE,%r11
8858 + add %r11,%rbp
8859 +#endif
8860 +
8861 1: movl (%rbp),%ebp
8862 .section __ex_table,"a"
8863 .quad 1b,ia32_badarg
8864 .previous
8865 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8866 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8867 + GET_THREAD_INFO(%r11)
8868 + orl $TS_COMPAT,TI_status(%r11)
8869 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8870 CFI_REMEMBER_STATE
8871 jnz sysenter_tracesys
8872 cmpq $(IA32_NR_syscalls-1),%rax
8873 @@ -160,12 +197,15 @@ sysenter_do_call:
8874 sysenter_dispatch:
8875 call *ia32_sys_call_table(,%rax,8)
8876 movq %rax,RAX-ARGOFFSET(%rsp)
8877 + GET_THREAD_INFO(%r11)
8878 DISABLE_INTERRUPTS(CLBR_NONE)
8879 TRACE_IRQS_OFF
8880 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8881 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8882 jnz sysexit_audit
8883 sysexit_from_sys_call:
8884 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8885 + pax_exit_kernel_user
8886 + pax_erase_kstack
8887 + andl $~TS_COMPAT,TI_status(%r11)
8888 /* clear IF, that popfq doesn't enable interrupts early */
8889 andl $~0x200,EFLAGS-R11(%rsp)
8890 movl RIP-R11(%rsp),%edx /* User %eip */
8891 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8892 movl %eax,%esi /* 2nd arg: syscall number */
8893 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8894 call __audit_syscall_entry
8895 +
8896 + pax_erase_kstack
8897 +
8898 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8899 cmpq $(IA32_NR_syscalls-1),%rax
8900 ja ia32_badsys
8901 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8902 .endm
8903
8904 .macro auditsys_exit exit
8905 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8906 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8907 jnz ia32_ret_from_sys_call
8908 TRACE_IRQS_ON
8909 sti
8910 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8911 1: setbe %al /* 1 if error, 0 if not */
8912 movzbl %al,%edi /* zero-extend that into %edi */
8913 call __audit_syscall_exit
8914 + GET_THREAD_INFO(%r11)
8915 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8916 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8917 cli
8918 TRACE_IRQS_OFF
8919 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8920 + testl %edi,TI_flags(%r11)
8921 jz \exit
8922 CLEAR_RREGS -ARGOFFSET
8923 jmp int_with_check
8924 @@ -235,7 +279,7 @@ sysexit_audit:
8925
8926 sysenter_tracesys:
8927 #ifdef CONFIG_AUDITSYSCALL
8928 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8929 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8930 jz sysenter_auditsys
8931 #endif
8932 SAVE_REST
8933 @@ -243,6 +287,9 @@ sysenter_tracesys:
8934 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8935 movq %rsp,%rdi /* &pt_regs -> arg1 */
8936 call syscall_trace_enter
8937 +
8938 + pax_erase_kstack
8939 +
8940 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8941 RESTORE_REST
8942 cmpq $(IA32_NR_syscalls-1),%rax
8943 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8944 ENTRY(ia32_cstar_target)
8945 CFI_STARTPROC32 simple
8946 CFI_SIGNAL_FRAME
8947 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8948 + CFI_DEF_CFA rsp,0
8949 CFI_REGISTER rip,rcx
8950 /*CFI_REGISTER rflags,r11*/
8951 SWAPGS_UNSAFE_STACK
8952 movl %esp,%r8d
8953 CFI_REGISTER rsp,r8
8954 movq PER_CPU_VAR(kernel_stack),%rsp
8955 + SAVE_ARGS 8*6,0,0
8956 + pax_enter_kernel_user
8957 /*
8958 * No need to follow this irqs on/off section: the syscall
8959 * disabled irqs and here we enable it straight after entry:
8960 */
8961 ENABLE_INTERRUPTS(CLBR_NONE)
8962 - SAVE_ARGS 8,0,0
8963 movl %eax,%eax /* zero extension */
8964 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8965 movq %rcx,RIP-ARGOFFSET(%rsp)
8966 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8967 /* no need to do an access_ok check here because r8 has been
8968 32bit zero extended */
8969 /* hardware stack frame is complete now */
8970 +
8971 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8972 + mov $PAX_USER_SHADOW_BASE,%r11
8973 + add %r11,%r8
8974 +#endif
8975 +
8976 1: movl (%r8),%r9d
8977 .section __ex_table,"a"
8978 .quad 1b,ia32_badarg
8979 .previous
8980 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8981 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8982 + GET_THREAD_INFO(%r11)
8983 + orl $TS_COMPAT,TI_status(%r11)
8984 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8985 CFI_REMEMBER_STATE
8986 jnz cstar_tracesys
8987 cmpq $IA32_NR_syscalls-1,%rax
8988 @@ -317,12 +372,15 @@ cstar_do_call:
8989 cstar_dispatch:
8990 call *ia32_sys_call_table(,%rax,8)
8991 movq %rax,RAX-ARGOFFSET(%rsp)
8992 + GET_THREAD_INFO(%r11)
8993 DISABLE_INTERRUPTS(CLBR_NONE)
8994 TRACE_IRQS_OFF
8995 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8996 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8997 jnz sysretl_audit
8998 sysretl_from_sys_call:
8999 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9000 + pax_exit_kernel_user
9001 + pax_erase_kstack
9002 + andl $~TS_COMPAT,TI_status(%r11)
9003 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
9004 movl RIP-ARGOFFSET(%rsp),%ecx
9005 CFI_REGISTER rip,rcx
9006 @@ -350,7 +408,7 @@ sysretl_audit:
9007
9008 cstar_tracesys:
9009 #ifdef CONFIG_AUDITSYSCALL
9010 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9011 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9012 jz cstar_auditsys
9013 #endif
9014 xchgl %r9d,%ebp
9015 @@ -359,6 +417,9 @@ cstar_tracesys:
9016 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9017 movq %rsp,%rdi /* &pt_regs -> arg1 */
9018 call syscall_trace_enter
9019 +
9020 + pax_erase_kstack
9021 +
9022 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
9023 RESTORE_REST
9024 xchgl %ebp,%r9d
9025 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
9026 CFI_REL_OFFSET rip,RIP-RIP
9027 PARAVIRT_ADJUST_EXCEPTION_FRAME
9028 SWAPGS
9029 - /*
9030 - * No need to follow this irqs on/off section: the syscall
9031 - * disabled irqs and here we enable it straight after entry:
9032 - */
9033 - ENABLE_INTERRUPTS(CLBR_NONE)
9034 movl %eax,%eax
9035 pushq_cfi %rax
9036 cld
9037 /* note the registers are not zero extended to the sf.
9038 this could be a problem. */
9039 SAVE_ARGS 0,1,0
9040 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9041 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9042 + pax_enter_kernel_user
9043 + /*
9044 + * No need to follow this irqs on/off section: the syscall
9045 + * disabled irqs and here we enable it straight after entry:
9046 + */
9047 + ENABLE_INTERRUPTS(CLBR_NONE)
9048 + GET_THREAD_INFO(%r11)
9049 + orl $TS_COMPAT,TI_status(%r11)
9050 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9051 jnz ia32_tracesys
9052 cmpq $(IA32_NR_syscalls-1),%rax
9053 ja ia32_badsys
9054 @@ -435,6 +498,9 @@ ia32_tracesys:
9055 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9056 movq %rsp,%rdi /* &pt_regs -> arg1 */
9057 call syscall_trace_enter
9058 +
9059 + pax_erase_kstack
9060 +
9061 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9062 RESTORE_REST
9063 cmpq $(IA32_NR_syscalls-1),%rax
9064 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9065 index f6f5c53..b358b28 100644
9066 --- a/arch/x86/ia32/sys_ia32.c
9067 +++ b/arch/x86/ia32/sys_ia32.c
9068 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
9069 */
9070 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9071 {
9072 - typeof(ubuf->st_uid) uid = 0;
9073 - typeof(ubuf->st_gid) gid = 0;
9074 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
9075 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
9076 SET_UID(uid, stat->uid);
9077 SET_GID(gid, stat->gid);
9078 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9079 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
9080 }
9081 set_fs(KERNEL_DS);
9082 ret = sys_rt_sigprocmask(how,
9083 - set ? (sigset_t __user *)&s : NULL,
9084 - oset ? (sigset_t __user *)&s : NULL,
9085 + set ? (sigset_t __force_user *)&s : NULL,
9086 + oset ? (sigset_t __force_user *)&s : NULL,
9087 sigsetsize);
9088 set_fs(old_fs);
9089 if (ret)
9090 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
9091 return alarm_setitimer(seconds);
9092 }
9093
9094 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
9095 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
9096 int options)
9097 {
9098 return compat_sys_wait4(pid, stat_addr, options, NULL);
9099 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9100 mm_segment_t old_fs = get_fs();
9101
9102 set_fs(KERNEL_DS);
9103 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9104 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9105 set_fs(old_fs);
9106 if (put_compat_timespec(&t, interval))
9107 return -EFAULT;
9108 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9109 mm_segment_t old_fs = get_fs();
9110
9111 set_fs(KERNEL_DS);
9112 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9113 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9114 set_fs(old_fs);
9115 if (!ret) {
9116 switch (_NSIG_WORDS) {
9117 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9118 if (copy_siginfo_from_user32(&info, uinfo))
9119 return -EFAULT;
9120 set_fs(KERNEL_DS);
9121 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9122 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9123 set_fs(old_fs);
9124 return ret;
9125 }
9126 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9127 return -EFAULT;
9128
9129 set_fs(KERNEL_DS);
9130 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9131 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9132 count);
9133 set_fs(old_fs);
9134
9135 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9136 index 952bd01..7692c6f 100644
9137 --- a/arch/x86/include/asm/alternative-asm.h
9138 +++ b/arch/x86/include/asm/alternative-asm.h
9139 @@ -15,6 +15,45 @@
9140 .endm
9141 #endif
9142
9143 +#ifdef KERNEXEC_PLUGIN
9144 + .macro pax_force_retaddr_bts rip=0
9145 + btsq $63,\rip(%rsp)
9146 + .endm
9147 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9148 + .macro pax_force_retaddr rip=0, reload=0
9149 + btsq $63,\rip(%rsp)
9150 + .endm
9151 + .macro pax_force_fptr ptr
9152 + btsq $63,\ptr
9153 + .endm
9154 + .macro pax_set_fptr_mask
9155 + .endm
9156 +#endif
9157 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9158 + .macro pax_force_retaddr rip=0, reload=0
9159 + .if \reload
9160 + pax_set_fptr_mask
9161 + .endif
9162 + orq %r10,\rip(%rsp)
9163 + .endm
9164 + .macro pax_force_fptr ptr
9165 + orq %r10,\ptr
9166 + .endm
9167 + .macro pax_set_fptr_mask
9168 + movabs $0x8000000000000000,%r10
9169 + .endm
9170 +#endif
9171 +#else
9172 + .macro pax_force_retaddr rip=0, reload=0
9173 + .endm
9174 + .macro pax_force_fptr ptr
9175 + .endm
9176 + .macro pax_force_retaddr_bts rip=0
9177 + .endm
9178 + .macro pax_set_fptr_mask
9179 + .endm
9180 +#endif
9181 +
9182 .macro altinstruction_entry orig alt feature orig_len alt_len
9183 .long \orig - .
9184 .long \alt - .
9185 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9186 index 37ad100..7d47faa 100644
9187 --- a/arch/x86/include/asm/alternative.h
9188 +++ b/arch/x86/include/asm/alternative.h
9189 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9190 ".section .discard,\"aw\",@progbits\n" \
9191 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9192 ".previous\n" \
9193 - ".section .altinstr_replacement, \"ax\"\n" \
9194 + ".section .altinstr_replacement, \"a\"\n" \
9195 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9196 ".previous"
9197
9198 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9199 index 3ab9bdd..238033e 100644
9200 --- a/arch/x86/include/asm/apic.h
9201 +++ b/arch/x86/include/asm/apic.h
9202 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
9203
9204 #ifdef CONFIG_X86_LOCAL_APIC
9205
9206 -extern unsigned int apic_verbosity;
9207 +extern int apic_verbosity;
9208 extern int local_apic_timer_c2_ok;
9209
9210 extern int disable_apic;
9211 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9212 index 20370c6..a2eb9b0 100644
9213 --- a/arch/x86/include/asm/apm.h
9214 +++ b/arch/x86/include/asm/apm.h
9215 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9216 __asm__ __volatile__(APM_DO_ZERO_SEGS
9217 "pushl %%edi\n\t"
9218 "pushl %%ebp\n\t"
9219 - "lcall *%%cs:apm_bios_entry\n\t"
9220 + "lcall *%%ss:apm_bios_entry\n\t"
9221 "setc %%al\n\t"
9222 "popl %%ebp\n\t"
9223 "popl %%edi\n\t"
9224 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9225 __asm__ __volatile__(APM_DO_ZERO_SEGS
9226 "pushl %%edi\n\t"
9227 "pushl %%ebp\n\t"
9228 - "lcall *%%cs:apm_bios_entry\n\t"
9229 + "lcall *%%ss:apm_bios_entry\n\t"
9230 "setc %%bl\n\t"
9231 "popl %%ebp\n\t"
9232 "popl %%edi\n\t"
9233 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9234 index 58cb6d4..ca9010d 100644
9235 --- a/arch/x86/include/asm/atomic.h
9236 +++ b/arch/x86/include/asm/atomic.h
9237 @@ -22,7 +22,18 @@
9238 */
9239 static inline int atomic_read(const atomic_t *v)
9240 {
9241 - return (*(volatile int *)&(v)->counter);
9242 + return (*(volatile const int *)&(v)->counter);
9243 +}
9244 +
9245 +/**
9246 + * atomic_read_unchecked - read atomic variable
9247 + * @v: pointer of type atomic_unchecked_t
9248 + *
9249 + * Atomically reads the value of @v.
9250 + */
9251 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9252 +{
9253 + return (*(volatile const int *)&(v)->counter);
9254 }
9255
9256 /**
9257 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9258 }
9259
9260 /**
9261 + * atomic_set_unchecked - set atomic variable
9262 + * @v: pointer of type atomic_unchecked_t
9263 + * @i: required value
9264 + *
9265 + * Atomically sets the value of @v to @i.
9266 + */
9267 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9268 +{
9269 + v->counter = i;
9270 +}
9271 +
9272 +/**
9273 * atomic_add - add integer to atomic variable
9274 * @i: integer value to add
9275 * @v: pointer of type atomic_t
9276 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9277 */
9278 static inline void atomic_add(int i, atomic_t *v)
9279 {
9280 - asm volatile(LOCK_PREFIX "addl %1,%0"
9281 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9282 +
9283 +#ifdef CONFIG_PAX_REFCOUNT
9284 + "jno 0f\n"
9285 + LOCK_PREFIX "subl %1,%0\n"
9286 + "int $4\n0:\n"
9287 + _ASM_EXTABLE(0b, 0b)
9288 +#endif
9289 +
9290 + : "+m" (v->counter)
9291 + : "ir" (i));
9292 +}
9293 +
9294 +/**
9295 + * atomic_add_unchecked - add integer to atomic variable
9296 + * @i: integer value to add
9297 + * @v: pointer of type atomic_unchecked_t
9298 + *
9299 + * Atomically adds @i to @v.
9300 + */
9301 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9302 +{
9303 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9304 : "+m" (v->counter)
9305 : "ir" (i));
9306 }
9307 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9308 */
9309 static inline void atomic_sub(int i, atomic_t *v)
9310 {
9311 - asm volatile(LOCK_PREFIX "subl %1,%0"
9312 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9313 +
9314 +#ifdef CONFIG_PAX_REFCOUNT
9315 + "jno 0f\n"
9316 + LOCK_PREFIX "addl %1,%0\n"
9317 + "int $4\n0:\n"
9318 + _ASM_EXTABLE(0b, 0b)
9319 +#endif
9320 +
9321 + : "+m" (v->counter)
9322 + : "ir" (i));
9323 +}
9324 +
9325 +/**
9326 + * atomic_sub_unchecked - subtract integer from atomic variable
9327 + * @i: integer value to subtract
9328 + * @v: pointer of type atomic_unchecked_t
9329 + *
9330 + * Atomically subtracts @i from @v.
9331 + */
9332 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9333 +{
9334 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9335 : "+m" (v->counter)
9336 : "ir" (i));
9337 }
9338 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9339 {
9340 unsigned char c;
9341
9342 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9343 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9344 +
9345 +#ifdef CONFIG_PAX_REFCOUNT
9346 + "jno 0f\n"
9347 + LOCK_PREFIX "addl %2,%0\n"
9348 + "int $4\n0:\n"
9349 + _ASM_EXTABLE(0b, 0b)
9350 +#endif
9351 +
9352 + "sete %1\n"
9353 : "+m" (v->counter), "=qm" (c)
9354 : "ir" (i) : "memory");
9355 return c;
9356 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9357 */
9358 static inline void atomic_inc(atomic_t *v)
9359 {
9360 - asm volatile(LOCK_PREFIX "incl %0"
9361 + asm volatile(LOCK_PREFIX "incl %0\n"
9362 +
9363 +#ifdef CONFIG_PAX_REFCOUNT
9364 + "jno 0f\n"
9365 + LOCK_PREFIX "decl %0\n"
9366 + "int $4\n0:\n"
9367 + _ASM_EXTABLE(0b, 0b)
9368 +#endif
9369 +
9370 + : "+m" (v->counter));
9371 +}
9372 +
9373 +/**
9374 + * atomic_inc_unchecked - increment atomic variable
9375 + * @v: pointer of type atomic_unchecked_t
9376 + *
9377 + * Atomically increments @v by 1.
9378 + */
9379 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9380 +{
9381 + asm volatile(LOCK_PREFIX "incl %0\n"
9382 : "+m" (v->counter));
9383 }
9384
9385 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9386 */
9387 static inline void atomic_dec(atomic_t *v)
9388 {
9389 - asm volatile(LOCK_PREFIX "decl %0"
9390 + asm volatile(LOCK_PREFIX "decl %0\n"
9391 +
9392 +#ifdef CONFIG_PAX_REFCOUNT
9393 + "jno 0f\n"
9394 + LOCK_PREFIX "incl %0\n"
9395 + "int $4\n0:\n"
9396 + _ASM_EXTABLE(0b, 0b)
9397 +#endif
9398 +
9399 + : "+m" (v->counter));
9400 +}
9401 +
9402 +/**
9403 + * atomic_dec_unchecked - decrement atomic variable
9404 + * @v: pointer of type atomic_unchecked_t
9405 + *
9406 + * Atomically decrements @v by 1.
9407 + */
9408 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9409 +{
9410 + asm volatile(LOCK_PREFIX "decl %0\n"
9411 : "+m" (v->counter));
9412 }
9413
9414 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9415 {
9416 unsigned char c;
9417
9418 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9419 + asm volatile(LOCK_PREFIX "decl %0\n"
9420 +
9421 +#ifdef CONFIG_PAX_REFCOUNT
9422 + "jno 0f\n"
9423 + LOCK_PREFIX "incl %0\n"
9424 + "int $4\n0:\n"
9425 + _ASM_EXTABLE(0b, 0b)
9426 +#endif
9427 +
9428 + "sete %1\n"
9429 : "+m" (v->counter), "=qm" (c)
9430 : : "memory");
9431 return c != 0;
9432 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9433 {
9434 unsigned char c;
9435
9436 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9437 + asm volatile(LOCK_PREFIX "incl %0\n"
9438 +
9439 +#ifdef CONFIG_PAX_REFCOUNT
9440 + "jno 0f\n"
9441 + LOCK_PREFIX "decl %0\n"
9442 + "int $4\n0:\n"
9443 + _ASM_EXTABLE(0b, 0b)
9444 +#endif
9445 +
9446 + "sete %1\n"
9447 + : "+m" (v->counter), "=qm" (c)
9448 + : : "memory");
9449 + return c != 0;
9450 +}
9451 +
9452 +/**
9453 + * atomic_inc_and_test_unchecked - increment and test
9454 + * @v: pointer of type atomic_unchecked_t
9455 + *
9456 + * Atomically increments @v by 1
9457 + * and returns true if the result is zero, or false for all
9458 + * other cases.
9459 + */
9460 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9461 +{
9462 + unsigned char c;
9463 +
9464 + asm volatile(LOCK_PREFIX "incl %0\n"
9465 + "sete %1\n"
9466 : "+m" (v->counter), "=qm" (c)
9467 : : "memory");
9468 return c != 0;
9469 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9470 {
9471 unsigned char c;
9472
9473 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9474 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9475 +
9476 +#ifdef CONFIG_PAX_REFCOUNT
9477 + "jno 0f\n"
9478 + LOCK_PREFIX "subl %2,%0\n"
9479 + "int $4\n0:\n"
9480 + _ASM_EXTABLE(0b, 0b)
9481 +#endif
9482 +
9483 + "sets %1\n"
9484 : "+m" (v->counter), "=qm" (c)
9485 : "ir" (i) : "memory");
9486 return c;
9487 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9488 goto no_xadd;
9489 #endif
9490 /* Modern 486+ processor */
9491 - return i + xadd(&v->counter, i);
9492 + return i + xadd_check_overflow(&v->counter, i);
9493
9494 #ifdef CONFIG_M386
9495 no_xadd: /* Legacy 386 processor */
9496 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9497 }
9498
9499 /**
9500 + * atomic_add_return_unchecked - add integer and return
9501 + * @i: integer value to add
9502 + * @v: pointer of type atomic_unchecked_t
9503 + *
9504 + * Atomically adds @i to @v and returns @i + @v
9505 + */
9506 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9507 +{
9508 +#ifdef CONFIG_M386
9509 + int __i;
9510 + unsigned long flags;
9511 + if (unlikely(boot_cpu_data.x86 <= 3))
9512 + goto no_xadd;
9513 +#endif
9514 + /* Modern 486+ processor */
9515 + return i + xadd(&v->counter, i);
9516 +
9517 +#ifdef CONFIG_M386
9518 +no_xadd: /* Legacy 386 processor */
9519 + raw_local_irq_save(flags);
9520 + __i = atomic_read_unchecked(v);
9521 + atomic_set_unchecked(v, i + __i);
9522 + raw_local_irq_restore(flags);
9523 + return i + __i;
9524 +#endif
9525 +}
9526 +
9527 +/**
9528 * atomic_sub_return - subtract integer and return
9529 * @v: pointer of type atomic_t
9530 * @i: integer value to subtract
9531 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9532 }
9533
9534 #define atomic_inc_return(v) (atomic_add_return(1, v))
9535 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9536 +{
9537 + return atomic_add_return_unchecked(1, v);
9538 +}
9539 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9540
9541 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9542 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9543 return cmpxchg(&v->counter, old, new);
9544 }
9545
9546 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9547 +{
9548 + return cmpxchg(&v->counter, old, new);
9549 +}
9550 +
9551 static inline int atomic_xchg(atomic_t *v, int new)
9552 {
9553 return xchg(&v->counter, new);
9554 }
9555
9556 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9557 +{
9558 + return xchg(&v->counter, new);
9559 +}
9560 +
9561 /**
9562 * __atomic_add_unless - add unless the number is already a given value
9563 * @v: pointer of type atomic_t
9564 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9565 */
9566 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9567 {
9568 - int c, old;
9569 + int c, old, new;
9570 c = atomic_read(v);
9571 for (;;) {
9572 - if (unlikely(c == (u)))
9573 + if (unlikely(c == u))
9574 break;
9575 - old = atomic_cmpxchg((v), c, c + (a));
9576 +
9577 + asm volatile("addl %2,%0\n"
9578 +
9579 +#ifdef CONFIG_PAX_REFCOUNT
9580 + "jno 0f\n"
9581 + "subl %2,%0\n"
9582 + "int $4\n0:\n"
9583 + _ASM_EXTABLE(0b, 0b)
9584 +#endif
9585 +
9586 + : "=r" (new)
9587 + : "0" (c), "ir" (a));
9588 +
9589 + old = atomic_cmpxchg(v, c, new);
9590 if (likely(old == c))
9591 break;
9592 c = old;
9593 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9594 return c;
9595 }
9596
9597 +/**
9598 + * atomic_inc_not_zero_hint - increment if not null
9599 + * @v: pointer of type atomic_t
9600 + * @hint: probable value of the atomic before the increment
9601 + *
9602 + * This version of atomic_inc_not_zero() gives a hint of probable
9603 + * value of the atomic. This helps processor to not read the memory
9604 + * before doing the atomic read/modify/write cycle, lowering
9605 + * number of bus transactions on some arches.
9606 + *
9607 + * Returns: 0 if increment was not done, 1 otherwise.
9608 + */
9609 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9610 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9611 +{
9612 + int val, c = hint, new;
9613 +
9614 + /* sanity test, should be removed by compiler if hint is a constant */
9615 + if (!hint)
9616 + return __atomic_add_unless(v, 1, 0);
9617 +
9618 + do {
9619 + asm volatile("incl %0\n"
9620 +
9621 +#ifdef CONFIG_PAX_REFCOUNT
9622 + "jno 0f\n"
9623 + "decl %0\n"
9624 + "int $4\n0:\n"
9625 + _ASM_EXTABLE(0b, 0b)
9626 +#endif
9627 +
9628 + : "=r" (new)
9629 + : "0" (c));
9630 +
9631 + val = atomic_cmpxchg(v, c, new);
9632 + if (val == c)
9633 + return 1;
9634 + c = val;
9635 + } while (c);
9636 +
9637 + return 0;
9638 +}
9639
9640 /*
9641 * atomic_dec_if_positive - decrement by 1 if old value positive
9642 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9643 index fa13f0e..27c2e08 100644
9644 --- a/arch/x86/include/asm/atomic64_32.h
9645 +++ b/arch/x86/include/asm/atomic64_32.h
9646 @@ -12,6 +12,14 @@ typedef struct {
9647 u64 __aligned(8) counter;
9648 } atomic64_t;
9649
9650 +#ifdef CONFIG_PAX_REFCOUNT
9651 +typedef struct {
9652 + u64 __aligned(8) counter;
9653 +} atomic64_unchecked_t;
9654 +#else
9655 +typedef atomic64_t atomic64_unchecked_t;
9656 +#endif
9657 +
9658 #define ATOMIC64_INIT(val) { (val) }
9659
9660 #ifdef CONFIG_X86_CMPXCHG64
9661 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9662 }
9663
9664 /**
9665 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9666 + * @p: pointer to type atomic64_unchecked_t
9667 + * @o: expected value
9668 + * @n: new value
9669 + *
9670 + * Atomically sets @v to @n if it was equal to @o and returns
9671 + * the old value.
9672 + */
9673 +
9674 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9675 +{
9676 + return cmpxchg64(&v->counter, o, n);
9677 +}
9678 +
9679 +/**
9680 * atomic64_xchg - xchg atomic64 variable
9681 * @v: pointer to type atomic64_t
9682 * @n: value to assign
9683 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9684 }
9685
9686 /**
9687 + * atomic64_set_unchecked - set atomic64 variable
9688 + * @v: pointer to type atomic64_unchecked_t
9689 + * @n: value to assign
9690 + *
9691 + * Atomically sets the value of @v to @n.
9692 + */
9693 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9694 +{
9695 + unsigned high = (unsigned)(i >> 32);
9696 + unsigned low = (unsigned)i;
9697 + asm volatile(ATOMIC64_ALTERNATIVE(set)
9698 + : "+b" (low), "+c" (high)
9699 + : "S" (v)
9700 + : "eax", "edx", "memory"
9701 + );
9702 +}
9703 +
9704 +/**
9705 * atomic64_read - read atomic64 variable
9706 * @v: pointer to type atomic64_t
9707 *
9708 @@ -93,6 +134,22 @@ static inline long long atomic64_read(const atomic64_t *v)
9709 }
9710
9711 /**
9712 + * atomic64_read_unchecked - read atomic64 variable
9713 + * @v: pointer to type atomic64_unchecked_t
9714 + *
9715 + * Atomically reads the value of @v and returns it.
9716 + */
9717 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9718 +{
9719 + long long r;
9720 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9721 + : "=A" (r), "+c" (v)
9722 + : : "memory"
9723 + );
9724 + return r;
9725 + }
9726 +
9727 +/**
9728 * atomic64_add_return - add and return
9729 * @i: integer value to add
9730 * @v: pointer to type atomic64_t
9731 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9732 return i;
9733 }
9734
9735 +/**
9736 + * atomic64_add_return_unchecked - add and return
9737 + * @i: integer value to add
9738 + * @v: pointer to type atomic64_unchecked_t
9739 + *
9740 + * Atomically adds @i to @v and returns @i + *@v
9741 + */
9742 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9743 +{
9744 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9745 + : "+A" (i), "+c" (v)
9746 + : : "memory"
9747 + );
9748 + return i;
9749 +}
9750 +
9751 /*
9752 * Other variants with different arithmetic operators:
9753 */
9754 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9755 return a;
9756 }
9757
9758 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9759 +{
9760 + long long a;
9761 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9762 + : "=A" (a)
9763 + : "S" (v)
9764 + : "memory", "ecx"
9765 + );
9766 + return a;
9767 +}
9768 +
9769 static inline long long atomic64_dec_return(atomic64_t *v)
9770 {
9771 long long a;
9772 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9773 }
9774
9775 /**
9776 + * atomic64_add_unchecked - add integer to atomic64 variable
9777 + * @i: integer value to add
9778 + * @v: pointer to type atomic64_unchecked_t
9779 + *
9780 + * Atomically adds @i to @v.
9781 + */
9782 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9783 +{
9784 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9785 + : "+A" (i), "+c" (v)
9786 + : : "memory"
9787 + );
9788 + return i;
9789 +}
9790 +
9791 +/**
9792 * atomic64_sub - subtract the atomic64 variable
9793 * @i: integer value to subtract
9794 * @v: pointer to type atomic64_t
9795 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9796 index 0e1cbfc..5623683 100644
9797 --- a/arch/x86/include/asm/atomic64_64.h
9798 +++ b/arch/x86/include/asm/atomic64_64.h
9799 @@ -18,7 +18,19 @@
9800 */
9801 static inline long atomic64_read(const atomic64_t *v)
9802 {
9803 - return (*(volatile long *)&(v)->counter);
9804 + return (*(volatile const long *)&(v)->counter);
9805 +}
9806 +
9807 +/**
9808 + * atomic64_read_unchecked - read atomic64 variable
9809 + * @v: pointer of type atomic64_unchecked_t
9810 + *
9811 + * Atomically reads the value of @v.
9812 + * Doesn't imply a read memory barrier.
9813 + */
9814 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9815 +{
9816 + return (*(volatile const long *)&(v)->counter);
9817 }
9818
9819 /**
9820 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9821 }
9822
9823 /**
9824 + * atomic64_set_unchecked - set atomic64 variable
9825 + * @v: pointer to type atomic64_unchecked_t
9826 + * @i: required value
9827 + *
9828 + * Atomically sets the value of @v to @i.
9829 + */
9830 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9831 +{
9832 + v->counter = i;
9833 +}
9834 +
9835 +/**
9836 * atomic64_add - add integer to atomic64 variable
9837 * @i: integer value to add
9838 * @v: pointer to type atomic64_t
9839 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9840 */
9841 static inline void atomic64_add(long i, atomic64_t *v)
9842 {
9843 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9844 +
9845 +#ifdef CONFIG_PAX_REFCOUNT
9846 + "jno 0f\n"
9847 + LOCK_PREFIX "subq %1,%0\n"
9848 + "int $4\n0:\n"
9849 + _ASM_EXTABLE(0b, 0b)
9850 +#endif
9851 +
9852 + : "=m" (v->counter)
9853 + : "er" (i), "m" (v->counter));
9854 +}
9855 +
9856 +/**
9857 + * atomic64_add_unchecked - add integer to atomic64 variable
9858 + * @i: integer value to add
9859 + * @v: pointer to type atomic64_unchecked_t
9860 + *
9861 + * Atomically adds @i to @v.
9862 + */
9863 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9864 +{
9865 asm volatile(LOCK_PREFIX "addq %1,%0"
9866 : "=m" (v->counter)
9867 : "er" (i), "m" (v->counter));
9868 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9869 */
9870 static inline void atomic64_sub(long i, atomic64_t *v)
9871 {
9872 - asm volatile(LOCK_PREFIX "subq %1,%0"
9873 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9874 +
9875 +#ifdef CONFIG_PAX_REFCOUNT
9876 + "jno 0f\n"
9877 + LOCK_PREFIX "addq %1,%0\n"
9878 + "int $4\n0:\n"
9879 + _ASM_EXTABLE(0b, 0b)
9880 +#endif
9881 +
9882 + : "=m" (v->counter)
9883 + : "er" (i), "m" (v->counter));
9884 +}
9885 +
9886 +/**
9887 + * atomic64_sub_unchecked - subtract the atomic64 variable
9888 + * @i: integer value to subtract
9889 + * @v: pointer to type atomic64_unchecked_t
9890 + *
9891 + * Atomically subtracts @i from @v.
9892 + */
9893 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9894 +{
9895 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9896 : "=m" (v->counter)
9897 : "er" (i), "m" (v->counter));
9898 }
9899 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9900 {
9901 unsigned char c;
9902
9903 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9904 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9905 +
9906 +#ifdef CONFIG_PAX_REFCOUNT
9907 + "jno 0f\n"
9908 + LOCK_PREFIX "addq %2,%0\n"
9909 + "int $4\n0:\n"
9910 + _ASM_EXTABLE(0b, 0b)
9911 +#endif
9912 +
9913 + "sete %1\n"
9914 : "=m" (v->counter), "=qm" (c)
9915 : "er" (i), "m" (v->counter) : "memory");
9916 return c;
9917 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9918 */
9919 static inline void atomic64_inc(atomic64_t *v)
9920 {
9921 + asm volatile(LOCK_PREFIX "incq %0\n"
9922 +
9923 +#ifdef CONFIG_PAX_REFCOUNT
9924 + "jno 0f\n"
9925 + LOCK_PREFIX "decq %0\n"
9926 + "int $4\n0:\n"
9927 + _ASM_EXTABLE(0b, 0b)
9928 +#endif
9929 +
9930 + : "=m" (v->counter)
9931 + : "m" (v->counter));
9932 +}
9933 +
9934 +/**
9935 + * atomic64_inc_unchecked - increment atomic64 variable
9936 + * @v: pointer to type atomic64_unchecked_t
9937 + *
9938 + * Atomically increments @v by 1.
9939 + */
9940 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9941 +{
9942 asm volatile(LOCK_PREFIX "incq %0"
9943 : "=m" (v->counter)
9944 : "m" (v->counter));
9945 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9946 */
9947 static inline void atomic64_dec(atomic64_t *v)
9948 {
9949 - asm volatile(LOCK_PREFIX "decq %0"
9950 + asm volatile(LOCK_PREFIX "decq %0\n"
9951 +
9952 +#ifdef CONFIG_PAX_REFCOUNT
9953 + "jno 0f\n"
9954 + LOCK_PREFIX "incq %0\n"
9955 + "int $4\n0:\n"
9956 + _ASM_EXTABLE(0b, 0b)
9957 +#endif
9958 +
9959 + : "=m" (v->counter)
9960 + : "m" (v->counter));
9961 +}
9962 +
9963 +/**
9964 + * atomic64_dec_unchecked - decrement atomic64 variable
9965 + * @v: pointer to type atomic64_t
9966 + *
9967 + * Atomically decrements @v by 1.
9968 + */
9969 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9970 +{
9971 + asm volatile(LOCK_PREFIX "decq %0\n"
9972 : "=m" (v->counter)
9973 : "m" (v->counter));
9974 }
9975 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9976 {
9977 unsigned char c;
9978
9979 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9980 + asm volatile(LOCK_PREFIX "decq %0\n"
9981 +
9982 +#ifdef CONFIG_PAX_REFCOUNT
9983 + "jno 0f\n"
9984 + LOCK_PREFIX "incq %0\n"
9985 + "int $4\n0:\n"
9986 + _ASM_EXTABLE(0b, 0b)
9987 +#endif
9988 +
9989 + "sete %1\n"
9990 : "=m" (v->counter), "=qm" (c)
9991 : "m" (v->counter) : "memory");
9992 return c != 0;
9993 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9994 {
9995 unsigned char c;
9996
9997 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9998 + asm volatile(LOCK_PREFIX "incq %0\n"
9999 +
10000 +#ifdef CONFIG_PAX_REFCOUNT
10001 + "jno 0f\n"
10002 + LOCK_PREFIX "decq %0\n"
10003 + "int $4\n0:\n"
10004 + _ASM_EXTABLE(0b, 0b)
10005 +#endif
10006 +
10007 + "sete %1\n"
10008 : "=m" (v->counter), "=qm" (c)
10009 : "m" (v->counter) : "memory");
10010 return c != 0;
10011 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10012 {
10013 unsigned char c;
10014
10015 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
10016 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
10017 +
10018 +#ifdef CONFIG_PAX_REFCOUNT
10019 + "jno 0f\n"
10020 + LOCK_PREFIX "subq %2,%0\n"
10021 + "int $4\n0:\n"
10022 + _ASM_EXTABLE(0b, 0b)
10023 +#endif
10024 +
10025 + "sets %1\n"
10026 : "=m" (v->counter), "=qm" (c)
10027 : "er" (i), "m" (v->counter) : "memory");
10028 return c;
10029 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10030 */
10031 static inline long atomic64_add_return(long i, atomic64_t *v)
10032 {
10033 + return i + xadd_check_overflow(&v->counter, i);
10034 +}
10035 +
10036 +/**
10037 + * atomic64_add_return_unchecked - add and return
10038 + * @i: integer value to add
10039 + * @v: pointer to type atomic64_unchecked_t
10040 + *
10041 + * Atomically adds @i to @v and returns @i + @v
10042 + */
10043 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10044 +{
10045 return i + xadd(&v->counter, i);
10046 }
10047
10048 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10049 }
10050
10051 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10052 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10053 +{
10054 + return atomic64_add_return_unchecked(1, v);
10055 +}
10056 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10057
10058 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10059 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10060 return cmpxchg(&v->counter, old, new);
10061 }
10062
10063 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10064 +{
10065 + return cmpxchg(&v->counter, old, new);
10066 +}
10067 +
10068 static inline long atomic64_xchg(atomic64_t *v, long new)
10069 {
10070 return xchg(&v->counter, new);
10071 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
10072 */
10073 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10074 {
10075 - long c, old;
10076 + long c, old, new;
10077 c = atomic64_read(v);
10078 for (;;) {
10079 - if (unlikely(c == (u)))
10080 + if (unlikely(c == u))
10081 break;
10082 - old = atomic64_cmpxchg((v), c, c + (a));
10083 +
10084 + asm volatile("add %2,%0\n"
10085 +
10086 +#ifdef CONFIG_PAX_REFCOUNT
10087 + "jno 0f\n"
10088 + "sub %2,%0\n"
10089 + "int $4\n0:\n"
10090 + _ASM_EXTABLE(0b, 0b)
10091 +#endif
10092 +
10093 + : "=r" (new)
10094 + : "0" (c), "ir" (a));
10095 +
10096 + old = atomic64_cmpxchg(v, c, new);
10097 if (likely(old == c))
10098 break;
10099 c = old;
10100 }
10101 - return c != (u);
10102 + return c != u;
10103 }
10104
10105 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10106 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10107 index b97596e..9bd48b06 100644
10108 --- a/arch/x86/include/asm/bitops.h
10109 +++ b/arch/x86/include/asm/bitops.h
10110 @@ -38,7 +38,7 @@
10111 * a mask operation on a byte.
10112 */
10113 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10114 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10115 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10116 #define CONST_MASK(nr) (1 << ((nr) & 7))
10117
10118 /**
10119 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10120 index 5e1a2ee..c9f9533 100644
10121 --- a/arch/x86/include/asm/boot.h
10122 +++ b/arch/x86/include/asm/boot.h
10123 @@ -11,10 +11,15 @@
10124 #include <asm/pgtable_types.h>
10125
10126 /* Physical address where kernel should be loaded. */
10127 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10128 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10129 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10130 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10131
10132 +#ifndef __ASSEMBLY__
10133 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
10134 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10135 +#endif
10136 +
10137 /* Minimum kernel alignment, as a power of two */
10138 #ifdef CONFIG_X86_64
10139 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10140 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10141 index 48f99f1..d78ebf9 100644
10142 --- a/arch/x86/include/asm/cache.h
10143 +++ b/arch/x86/include/asm/cache.h
10144 @@ -5,12 +5,13 @@
10145
10146 /* L1 cache line size */
10147 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10148 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10149 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10150
10151 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10152 +#define __read_only __attribute__((__section__(".data..read_only")))
10153
10154 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10155 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10156 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10157
10158 #ifdef CONFIG_X86_VSMP
10159 #ifdef CONFIG_SMP
10160 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10161 index 4e12668..501d239 100644
10162 --- a/arch/x86/include/asm/cacheflush.h
10163 +++ b/arch/x86/include/asm/cacheflush.h
10164 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10165 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10166
10167 if (pg_flags == _PGMT_DEFAULT)
10168 - return -1;
10169 + return ~0UL;
10170 else if (pg_flags == _PGMT_WC)
10171 return _PAGE_CACHE_WC;
10172 else if (pg_flags == _PGMT_UC_MINUS)
10173 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10174 index 46fc474..b02b0f9 100644
10175 --- a/arch/x86/include/asm/checksum_32.h
10176 +++ b/arch/x86/include/asm/checksum_32.h
10177 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10178 int len, __wsum sum,
10179 int *src_err_ptr, int *dst_err_ptr);
10180
10181 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10182 + int len, __wsum sum,
10183 + int *src_err_ptr, int *dst_err_ptr);
10184 +
10185 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10186 + int len, __wsum sum,
10187 + int *src_err_ptr, int *dst_err_ptr);
10188 +
10189 /*
10190 * Note: when you get a NULL pointer exception here this means someone
10191 * passed in an incorrect kernel address to one of these functions.
10192 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10193 int *err_ptr)
10194 {
10195 might_sleep();
10196 - return csum_partial_copy_generic((__force void *)src, dst,
10197 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10198 len, sum, err_ptr, NULL);
10199 }
10200
10201 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10202 {
10203 might_sleep();
10204 if (access_ok(VERIFY_WRITE, dst, len))
10205 - return csum_partial_copy_generic(src, (__force void *)dst,
10206 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10207 len, sum, NULL, err_ptr);
10208
10209 if (len)
10210 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10211 index 99480e5..d81165b 100644
10212 --- a/arch/x86/include/asm/cmpxchg.h
10213 +++ b/arch/x86/include/asm/cmpxchg.h
10214 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10215 __compiletime_error("Bad argument size for cmpxchg");
10216 extern void __xadd_wrong_size(void)
10217 __compiletime_error("Bad argument size for xadd");
10218 +extern void __xadd_check_overflow_wrong_size(void)
10219 + __compiletime_error("Bad argument size for xadd_check_overflow");
10220 extern void __add_wrong_size(void)
10221 __compiletime_error("Bad argument size for add");
10222 +extern void __add_check_overflow_wrong_size(void)
10223 + __compiletime_error("Bad argument size for add_check_overflow");
10224
10225 /*
10226 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10227 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10228 __ret; \
10229 })
10230
10231 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10232 + ({ \
10233 + __typeof__ (*(ptr)) __ret = (arg); \
10234 + switch (sizeof(*(ptr))) { \
10235 + case __X86_CASE_L: \
10236 + asm volatile (lock #op "l %0, %1\n" \
10237 + "jno 0f\n" \
10238 + "mov %0,%1\n" \
10239 + "int $4\n0:\n" \
10240 + _ASM_EXTABLE(0b, 0b) \
10241 + : "+r" (__ret), "+m" (*(ptr)) \
10242 + : : "memory", "cc"); \
10243 + break; \
10244 + case __X86_CASE_Q: \
10245 + asm volatile (lock #op "q %q0, %1\n" \
10246 + "jno 0f\n" \
10247 + "mov %0,%1\n" \
10248 + "int $4\n0:\n" \
10249 + _ASM_EXTABLE(0b, 0b) \
10250 + : "+r" (__ret), "+m" (*(ptr)) \
10251 + : : "memory", "cc"); \
10252 + break; \
10253 + default: \
10254 + __ ## op ## _check_overflow_wrong_size(); \
10255 + } \
10256 + __ret; \
10257 + })
10258 +
10259 /*
10260 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10261 * Since this is generally used to protect other memory information, we
10262 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10263 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10264 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10265
10266 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10267 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10268 +
10269 #define __add(ptr, inc, lock) \
10270 ({ \
10271 __typeof__ (*(ptr)) __ret = (inc); \
10272 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10273 index 8d67d42..183d0eb 100644
10274 --- a/arch/x86/include/asm/cpufeature.h
10275 +++ b/arch/x86/include/asm/cpufeature.h
10276 @@ -367,7 +367,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10277 ".section .discard,\"aw\",@progbits\n"
10278 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10279 ".previous\n"
10280 - ".section .altinstr_replacement,\"ax\"\n"
10281 + ".section .altinstr_replacement,\"a\"\n"
10282 "3: movb $1,%0\n"
10283 "4:\n"
10284 ".previous\n"
10285 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10286 index e95822d..a90010e 100644
10287 --- a/arch/x86/include/asm/desc.h
10288 +++ b/arch/x86/include/asm/desc.h
10289 @@ -4,6 +4,7 @@
10290 #include <asm/desc_defs.h>
10291 #include <asm/ldt.h>
10292 #include <asm/mmu.h>
10293 +#include <asm/pgtable.h>
10294
10295 #include <linux/smp.h>
10296
10297 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10298
10299 desc->type = (info->read_exec_only ^ 1) << 1;
10300 desc->type |= info->contents << 2;
10301 + desc->type |= info->seg_not_present ^ 1;
10302
10303 desc->s = 1;
10304 desc->dpl = 0x3;
10305 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10306 }
10307
10308 extern struct desc_ptr idt_descr;
10309 -extern gate_desc idt_table[];
10310 extern struct desc_ptr nmi_idt_descr;
10311 -extern gate_desc nmi_idt_table[];
10312 -
10313 -struct gdt_page {
10314 - struct desc_struct gdt[GDT_ENTRIES];
10315 -} __attribute__((aligned(PAGE_SIZE)));
10316 -
10317 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10318 +extern gate_desc idt_table[256];
10319 +extern gate_desc nmi_idt_table[256];
10320
10321 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10322 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10323 {
10324 - return per_cpu(gdt_page, cpu).gdt;
10325 + return cpu_gdt_table[cpu];
10326 }
10327
10328 #ifdef CONFIG_X86_64
10329 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10330 unsigned long base, unsigned dpl, unsigned flags,
10331 unsigned short seg)
10332 {
10333 - gate->a = (seg << 16) | (base & 0xffff);
10334 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10335 + gate->gate.offset_low = base;
10336 + gate->gate.seg = seg;
10337 + gate->gate.reserved = 0;
10338 + gate->gate.type = type;
10339 + gate->gate.s = 0;
10340 + gate->gate.dpl = dpl;
10341 + gate->gate.p = 1;
10342 + gate->gate.offset_high = base >> 16;
10343 }
10344
10345 #endif
10346 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10347
10348 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10349 {
10350 + pax_open_kernel();
10351 memcpy(&idt[entry], gate, sizeof(*gate));
10352 + pax_close_kernel();
10353 }
10354
10355 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10356 {
10357 + pax_open_kernel();
10358 memcpy(&ldt[entry], desc, 8);
10359 + pax_close_kernel();
10360 }
10361
10362 static inline void
10363 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10364 default: size = sizeof(*gdt); break;
10365 }
10366
10367 + pax_open_kernel();
10368 memcpy(&gdt[entry], desc, size);
10369 + pax_close_kernel();
10370 }
10371
10372 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10373 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10374
10375 static inline void native_load_tr_desc(void)
10376 {
10377 + pax_open_kernel();
10378 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10379 + pax_close_kernel();
10380 }
10381
10382 static inline void native_load_gdt(const struct desc_ptr *dtr)
10383 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10384 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10385 unsigned int i;
10386
10387 + pax_open_kernel();
10388 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10389 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10390 + pax_close_kernel();
10391 }
10392
10393 #define _LDT_empty(info) \
10394 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10395 }
10396
10397 #ifdef CONFIG_X86_64
10398 -static inline void set_nmi_gate(int gate, void *addr)
10399 +static inline void set_nmi_gate(int gate, const void *addr)
10400 {
10401 gate_desc s;
10402
10403 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10404 }
10405 #endif
10406
10407 -static inline void _set_gate(int gate, unsigned type, void *addr,
10408 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10409 unsigned dpl, unsigned ist, unsigned seg)
10410 {
10411 gate_desc s;
10412 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10413 * Pentium F0 0F bugfix can have resulted in the mapped
10414 * IDT being write-protected.
10415 */
10416 -static inline void set_intr_gate(unsigned int n, void *addr)
10417 +static inline void set_intr_gate(unsigned int n, const void *addr)
10418 {
10419 BUG_ON((unsigned)n > 0xFF);
10420 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10421 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10422 /*
10423 * This routine sets up an interrupt gate at directory privilege level 3.
10424 */
10425 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10426 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10427 {
10428 BUG_ON((unsigned)n > 0xFF);
10429 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10430 }
10431
10432 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10433 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10434 {
10435 BUG_ON((unsigned)n > 0xFF);
10436 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10437 }
10438
10439 -static inline void set_trap_gate(unsigned int n, void *addr)
10440 +static inline void set_trap_gate(unsigned int n, const void *addr)
10441 {
10442 BUG_ON((unsigned)n > 0xFF);
10443 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10444 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10445 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10446 {
10447 BUG_ON((unsigned)n > 0xFF);
10448 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10449 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10450 }
10451
10452 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10453 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10454 {
10455 BUG_ON((unsigned)n > 0xFF);
10456 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10457 }
10458
10459 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10460 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10461 {
10462 BUG_ON((unsigned)n > 0xFF);
10463 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10464 }
10465
10466 +#ifdef CONFIG_X86_32
10467 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10468 +{
10469 + struct desc_struct d;
10470 +
10471 + if (likely(limit))
10472 + limit = (limit - 1UL) >> PAGE_SHIFT;
10473 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10474 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10475 +}
10476 +#endif
10477 +
10478 #endif /* _ASM_X86_DESC_H */
10479 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10480 index 278441f..b95a174 100644
10481 --- a/arch/x86/include/asm/desc_defs.h
10482 +++ b/arch/x86/include/asm/desc_defs.h
10483 @@ -31,6 +31,12 @@ struct desc_struct {
10484 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10485 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10486 };
10487 + struct {
10488 + u16 offset_low;
10489 + u16 seg;
10490 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10491 + unsigned offset_high: 16;
10492 + } gate;
10493 };
10494 } __attribute__((packed));
10495
10496 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10497 index 3778256..c5d4fce 100644
10498 --- a/arch/x86/include/asm/e820.h
10499 +++ b/arch/x86/include/asm/e820.h
10500 @@ -69,7 +69,7 @@ struct e820map {
10501 #define ISA_START_ADDRESS 0xa0000
10502 #define ISA_END_ADDRESS 0x100000
10503
10504 -#define BIOS_BEGIN 0x000a0000
10505 +#define BIOS_BEGIN 0x000c0000
10506 #define BIOS_END 0x00100000
10507
10508 #define BIOS_ROM_BASE 0xffe00000
10509 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10510 index 5f962df..7289f09 100644
10511 --- a/arch/x86/include/asm/elf.h
10512 +++ b/arch/x86/include/asm/elf.h
10513 @@ -238,7 +238,25 @@ extern int force_personality32;
10514 the loader. We need to make sure that it is out of the way of the program
10515 that it will "exec", and that there is sufficient room for the brk. */
10516
10517 +#ifdef CONFIG_PAX_SEGMEXEC
10518 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10519 +#else
10520 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10521 +#endif
10522 +
10523 +#ifdef CONFIG_PAX_ASLR
10524 +#ifdef CONFIG_X86_32
10525 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10526 +
10527 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10528 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10529 +#else
10530 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10531 +
10532 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10533 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10534 +#endif
10535 +#endif
10536
10537 /* This yields a mask that user programs can use to figure out what
10538 instruction set this CPU supports. This could be done in user space,
10539 @@ -291,9 +309,7 @@ do { \
10540
10541 #define ARCH_DLINFO \
10542 do { \
10543 - if (vdso_enabled) \
10544 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10545 - (unsigned long)current->mm->context.vdso); \
10546 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10547 } while (0)
10548
10549 #define AT_SYSINFO 32
10550 @@ -304,7 +320,7 @@ do { \
10551
10552 #endif /* !CONFIG_X86_32 */
10553
10554 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10555 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10556
10557 #define VDSO_ENTRY \
10558 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10559 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10560 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10561 #define compat_arch_setup_additional_pages syscall32_setup_pages
10562
10563 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10564 -#define arch_randomize_brk arch_randomize_brk
10565 -
10566 /*
10567 * True on X86_32 or when emulating IA32 on X86_64
10568 */
10569 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10570 index cc70c1c..d96d011 100644
10571 --- a/arch/x86/include/asm/emergency-restart.h
10572 +++ b/arch/x86/include/asm/emergency-restart.h
10573 @@ -15,6 +15,6 @@ enum reboot_type {
10574
10575 extern enum reboot_type reboot_type;
10576
10577 -extern void machine_emergency_restart(void);
10578 +extern void machine_emergency_restart(void) __noreturn;
10579
10580 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10581 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10582 index d09bb03..4ea4194 100644
10583 --- a/arch/x86/include/asm/futex.h
10584 +++ b/arch/x86/include/asm/futex.h
10585 @@ -12,16 +12,18 @@
10586 #include <asm/system.h>
10587
10588 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10589 + typecheck(u32 __user *, uaddr); \
10590 asm volatile("1:\t" insn "\n" \
10591 "2:\t.section .fixup,\"ax\"\n" \
10592 "3:\tmov\t%3, %1\n" \
10593 "\tjmp\t2b\n" \
10594 "\t.previous\n" \
10595 _ASM_EXTABLE(1b, 3b) \
10596 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10597 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10598 : "i" (-EFAULT), "0" (oparg), "1" (0))
10599
10600 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10601 + typecheck(u32 __user *, uaddr); \
10602 asm volatile("1:\tmovl %2, %0\n" \
10603 "\tmovl\t%0, %3\n" \
10604 "\t" insn "\n" \
10605 @@ -34,7 +36,7 @@
10606 _ASM_EXTABLE(1b, 4b) \
10607 _ASM_EXTABLE(2b, 4b) \
10608 : "=&a" (oldval), "=&r" (ret), \
10609 - "+m" (*uaddr), "=&r" (tem) \
10610 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10611 : "r" (oparg), "i" (-EFAULT), "1" (0))
10612
10613 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10614 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10615
10616 switch (op) {
10617 case FUTEX_OP_SET:
10618 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10619 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10620 break;
10621 case FUTEX_OP_ADD:
10622 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10623 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10624 uaddr, oparg);
10625 break;
10626 case FUTEX_OP_OR:
10627 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10628 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10629 return -EFAULT;
10630
10631 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10632 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10633 "2:\t.section .fixup, \"ax\"\n"
10634 "3:\tmov %3, %0\n"
10635 "\tjmp 2b\n"
10636 "\t.previous\n"
10637 _ASM_EXTABLE(1b, 3b)
10638 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10639 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10640 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10641 : "memory"
10642 );
10643 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10644 index eb92a6e..b98b2f4 100644
10645 --- a/arch/x86/include/asm/hw_irq.h
10646 +++ b/arch/x86/include/asm/hw_irq.h
10647 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10648 extern void enable_IO_APIC(void);
10649
10650 /* Statistics */
10651 -extern atomic_t irq_err_count;
10652 -extern atomic_t irq_mis_count;
10653 +extern atomic_unchecked_t irq_err_count;
10654 +extern atomic_unchecked_t irq_mis_count;
10655
10656 /* EISA */
10657 extern void eisa_set_level_irq(unsigned int irq);
10658 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10659 index 2479049..3fb9795 100644
10660 --- a/arch/x86/include/asm/i387.h
10661 +++ b/arch/x86/include/asm/i387.h
10662 @@ -93,6 +93,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10663 {
10664 int err;
10665
10666 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10667 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10668 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10669 +#endif
10670 +
10671 /* See comment in fxsave() below. */
10672 #ifdef CONFIG_AS_FXSAVEQ
10673 asm volatile("1: fxrstorq %[fx]\n\t"
10674 @@ -122,6 +127,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10675 {
10676 int err;
10677
10678 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10679 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10680 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10681 +#endif
10682 +
10683 /*
10684 * Clear the bytes not touched by the fxsave and reserved
10685 * for the SW usage.
10686 @@ -278,7 +288,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10687 "emms\n\t" /* clear stack tags */
10688 "fildl %P[addr]", /* set F?P to defined value */
10689 X86_FEATURE_FXSAVE_LEAK,
10690 - [addr] "m" (tsk->thread.fpu.has_fpu));
10691 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10692
10693 return fpu_restore_checking(&tsk->thread.fpu);
10694 }
10695 @@ -445,7 +455,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10696 static inline bool interrupted_user_mode(void)
10697 {
10698 struct pt_regs *regs = get_irq_regs();
10699 - return regs && user_mode_vm(regs);
10700 + return regs && user_mode(regs);
10701 }
10702
10703 /*
10704 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10705 index d8e8eef..99f81ae 100644
10706 --- a/arch/x86/include/asm/io.h
10707 +++ b/arch/x86/include/asm/io.h
10708 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10709
10710 #include <linux/vmalloc.h>
10711
10712 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10713 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10714 +{
10715 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10716 +}
10717 +
10718 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10719 +{
10720 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10721 +}
10722 +
10723 /*
10724 * Convert a virtual cached pointer to an uncached pointer
10725 */
10726 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10727 index bba3cf8..06bc8da 100644
10728 --- a/arch/x86/include/asm/irqflags.h
10729 +++ b/arch/x86/include/asm/irqflags.h
10730 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10731 sti; \
10732 sysexit
10733
10734 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10735 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10736 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10737 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10738 +
10739 #else
10740 #define INTERRUPT_RETURN iret
10741 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10742 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10743 index 5478825..839e88c 100644
10744 --- a/arch/x86/include/asm/kprobes.h
10745 +++ b/arch/x86/include/asm/kprobes.h
10746 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10747 #define RELATIVEJUMP_SIZE 5
10748 #define RELATIVECALL_OPCODE 0xe8
10749 #define RELATIVE_ADDR_SIZE 4
10750 -#define MAX_STACK_SIZE 64
10751 -#define MIN_STACK_SIZE(ADDR) \
10752 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10753 - THREAD_SIZE - (unsigned long)(ADDR))) \
10754 - ? (MAX_STACK_SIZE) \
10755 - : (((unsigned long)current_thread_info()) + \
10756 - THREAD_SIZE - (unsigned long)(ADDR)))
10757 +#define MAX_STACK_SIZE 64UL
10758 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10759
10760 #define flush_insn_slot(p) do { } while (0)
10761
10762 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10763 index 52d6640..a013b87 100644
10764 --- a/arch/x86/include/asm/kvm_host.h
10765 +++ b/arch/x86/include/asm/kvm_host.h
10766 @@ -663,7 +663,7 @@ struct kvm_x86_ops {
10767 int (*check_intercept)(struct kvm_vcpu *vcpu,
10768 struct x86_instruction_info *info,
10769 enum x86_intercept_stage stage);
10770 -};
10771 +} __do_const;
10772
10773 struct kvm_arch_async_pf {
10774 u32 token;
10775 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10776 index 9cdae5d..300d20f 100644
10777 --- a/arch/x86/include/asm/local.h
10778 +++ b/arch/x86/include/asm/local.h
10779 @@ -18,26 +18,58 @@ typedef struct {
10780
10781 static inline void local_inc(local_t *l)
10782 {
10783 - asm volatile(_ASM_INC "%0"
10784 + asm volatile(_ASM_INC "%0\n"
10785 +
10786 +#ifdef CONFIG_PAX_REFCOUNT
10787 + "jno 0f\n"
10788 + _ASM_DEC "%0\n"
10789 + "int $4\n0:\n"
10790 + _ASM_EXTABLE(0b, 0b)
10791 +#endif
10792 +
10793 : "+m" (l->a.counter));
10794 }
10795
10796 static inline void local_dec(local_t *l)
10797 {
10798 - asm volatile(_ASM_DEC "%0"
10799 + asm volatile(_ASM_DEC "%0\n"
10800 +
10801 +#ifdef CONFIG_PAX_REFCOUNT
10802 + "jno 0f\n"
10803 + _ASM_INC "%0\n"
10804 + "int $4\n0:\n"
10805 + _ASM_EXTABLE(0b, 0b)
10806 +#endif
10807 +
10808 : "+m" (l->a.counter));
10809 }
10810
10811 static inline void local_add(long i, local_t *l)
10812 {
10813 - asm volatile(_ASM_ADD "%1,%0"
10814 + asm volatile(_ASM_ADD "%1,%0\n"
10815 +
10816 +#ifdef CONFIG_PAX_REFCOUNT
10817 + "jno 0f\n"
10818 + _ASM_SUB "%1,%0\n"
10819 + "int $4\n0:\n"
10820 + _ASM_EXTABLE(0b, 0b)
10821 +#endif
10822 +
10823 : "+m" (l->a.counter)
10824 : "ir" (i));
10825 }
10826
10827 static inline void local_sub(long i, local_t *l)
10828 {
10829 - asm volatile(_ASM_SUB "%1,%0"
10830 + asm volatile(_ASM_SUB "%1,%0\n"
10831 +
10832 +#ifdef CONFIG_PAX_REFCOUNT
10833 + "jno 0f\n"
10834 + _ASM_ADD "%1,%0\n"
10835 + "int $4\n0:\n"
10836 + _ASM_EXTABLE(0b, 0b)
10837 +#endif
10838 +
10839 : "+m" (l->a.counter)
10840 : "ir" (i));
10841 }
10842 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10843 {
10844 unsigned char c;
10845
10846 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10847 + asm volatile(_ASM_SUB "%2,%0\n"
10848 +
10849 +#ifdef CONFIG_PAX_REFCOUNT
10850 + "jno 0f\n"
10851 + _ASM_ADD "%2,%0\n"
10852 + "int $4\n0:\n"
10853 + _ASM_EXTABLE(0b, 0b)
10854 +#endif
10855 +
10856 + "sete %1\n"
10857 : "+m" (l->a.counter), "=qm" (c)
10858 : "ir" (i) : "memory");
10859 return c;
10860 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10861 {
10862 unsigned char c;
10863
10864 - asm volatile(_ASM_DEC "%0; sete %1"
10865 + asm volatile(_ASM_DEC "%0\n"
10866 +
10867 +#ifdef CONFIG_PAX_REFCOUNT
10868 + "jno 0f\n"
10869 + _ASM_INC "%0\n"
10870 + "int $4\n0:\n"
10871 + _ASM_EXTABLE(0b, 0b)
10872 +#endif
10873 +
10874 + "sete %1\n"
10875 : "+m" (l->a.counter), "=qm" (c)
10876 : : "memory");
10877 return c != 0;
10878 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10879 {
10880 unsigned char c;
10881
10882 - asm volatile(_ASM_INC "%0; sete %1"
10883 + asm volatile(_ASM_INC "%0\n"
10884 +
10885 +#ifdef CONFIG_PAX_REFCOUNT
10886 + "jno 0f\n"
10887 + _ASM_DEC "%0\n"
10888 + "int $4\n0:\n"
10889 + _ASM_EXTABLE(0b, 0b)
10890 +#endif
10891 +
10892 + "sete %1\n"
10893 : "+m" (l->a.counter), "=qm" (c)
10894 : : "memory");
10895 return c != 0;
10896 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10897 {
10898 unsigned char c;
10899
10900 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10901 + asm volatile(_ASM_ADD "%2,%0\n"
10902 +
10903 +#ifdef CONFIG_PAX_REFCOUNT
10904 + "jno 0f\n"
10905 + _ASM_SUB "%2,%0\n"
10906 + "int $4\n0:\n"
10907 + _ASM_EXTABLE(0b, 0b)
10908 +#endif
10909 +
10910 + "sets %1\n"
10911 : "+m" (l->a.counter), "=qm" (c)
10912 : "ir" (i) : "memory");
10913 return c;
10914 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10915 #endif
10916 /* Modern 486+ processor */
10917 __i = i;
10918 - asm volatile(_ASM_XADD "%0, %1;"
10919 + asm volatile(_ASM_XADD "%0, %1\n"
10920 +
10921 +#ifdef CONFIG_PAX_REFCOUNT
10922 + "jno 0f\n"
10923 + _ASM_MOV "%0,%1\n"
10924 + "int $4\n0:\n"
10925 + _ASM_EXTABLE(0b, 0b)
10926 +#endif
10927 +
10928 : "+r" (i), "+m" (l->a.counter)
10929 : : "memory");
10930 return i + __i;
10931 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10932 index 593e51d..fa69c9a 100644
10933 --- a/arch/x86/include/asm/mman.h
10934 +++ b/arch/x86/include/asm/mman.h
10935 @@ -5,4 +5,14 @@
10936
10937 #include <asm-generic/mman.h>
10938
10939 +#ifdef __KERNEL__
10940 +#ifndef __ASSEMBLY__
10941 +#ifdef CONFIG_X86_32
10942 +#define arch_mmap_check i386_mmap_check
10943 +int i386_mmap_check(unsigned long addr, unsigned long len,
10944 + unsigned long flags);
10945 +#endif
10946 +#endif
10947 +#endif
10948 +
10949 #endif /* _ASM_X86_MMAN_H */
10950 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10951 index 5f55e69..e20bfb1 100644
10952 --- a/arch/x86/include/asm/mmu.h
10953 +++ b/arch/x86/include/asm/mmu.h
10954 @@ -9,7 +9,7 @@
10955 * we put the segment information here.
10956 */
10957 typedef struct {
10958 - void *ldt;
10959 + struct desc_struct *ldt;
10960 int size;
10961
10962 #ifdef CONFIG_X86_64
10963 @@ -18,7 +18,19 @@ typedef struct {
10964 #endif
10965
10966 struct mutex lock;
10967 - void *vdso;
10968 + unsigned long vdso;
10969 +
10970 +#ifdef CONFIG_X86_32
10971 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10972 + unsigned long user_cs_base;
10973 + unsigned long user_cs_limit;
10974 +
10975 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10976 + cpumask_t cpu_user_cs_mask;
10977 +#endif
10978 +
10979 +#endif
10980 +#endif
10981 } mm_context_t;
10982
10983 #ifdef CONFIG_SMP
10984 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10985 index 6902152..399f3a2 100644
10986 --- a/arch/x86/include/asm/mmu_context.h
10987 +++ b/arch/x86/include/asm/mmu_context.h
10988 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10989
10990 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10991 {
10992 +
10993 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10994 + unsigned int i;
10995 + pgd_t *pgd;
10996 +
10997 + pax_open_kernel();
10998 + pgd = get_cpu_pgd(smp_processor_id());
10999 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
11000 + set_pgd_batched(pgd+i, native_make_pgd(0));
11001 + pax_close_kernel();
11002 +#endif
11003 +
11004 #ifdef CONFIG_SMP
11005 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11006 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
11007 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11008 struct task_struct *tsk)
11009 {
11010 unsigned cpu = smp_processor_id();
11011 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11012 + int tlbstate = TLBSTATE_OK;
11013 +#endif
11014
11015 if (likely(prev != next)) {
11016 #ifdef CONFIG_SMP
11017 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11018 + tlbstate = percpu_read(cpu_tlbstate.state);
11019 +#endif
11020 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11021 percpu_write(cpu_tlbstate.active_mm, next);
11022 #endif
11023 cpumask_set_cpu(cpu, mm_cpumask(next));
11024
11025 /* Re-load page tables */
11026 +#ifdef CONFIG_PAX_PER_CPU_PGD
11027 + pax_open_kernel();
11028 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11029 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11030 + pax_close_kernel();
11031 + load_cr3(get_cpu_pgd(cpu));
11032 +#else
11033 load_cr3(next->pgd);
11034 +#endif
11035
11036 /* stop flush ipis for the previous mm */
11037 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11038 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11039 */
11040 if (unlikely(prev->context.ldt != next->context.ldt))
11041 load_LDT_nolock(&next->context);
11042 - }
11043 +
11044 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11045 + if (!(__supported_pte_mask & _PAGE_NX)) {
11046 + smp_mb__before_clear_bit();
11047 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11048 + smp_mb__after_clear_bit();
11049 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11050 + }
11051 +#endif
11052 +
11053 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11054 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11055 + prev->context.user_cs_limit != next->context.user_cs_limit))
11056 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11057 #ifdef CONFIG_SMP
11058 + else if (unlikely(tlbstate != TLBSTATE_OK))
11059 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11060 +#endif
11061 +#endif
11062 +
11063 + }
11064 else {
11065 +
11066 +#ifdef CONFIG_PAX_PER_CPU_PGD
11067 + pax_open_kernel();
11068 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11069 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11070 + pax_close_kernel();
11071 + load_cr3(get_cpu_pgd(cpu));
11072 +#endif
11073 +
11074 +#ifdef CONFIG_SMP
11075 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11076 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11077
11078 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11079 * tlb flush IPI delivery. We must reload CR3
11080 * to make sure to use no freed page tables.
11081 */
11082 +
11083 +#ifndef CONFIG_PAX_PER_CPU_PGD
11084 load_cr3(next->pgd);
11085 +#endif
11086 +
11087 load_LDT_nolock(&next->context);
11088 +
11089 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11090 + if (!(__supported_pte_mask & _PAGE_NX))
11091 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11092 +#endif
11093 +
11094 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11095 +#ifdef CONFIG_PAX_PAGEEXEC
11096 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
11097 +#endif
11098 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11099 +#endif
11100 +
11101 }
11102 +#endif
11103 }
11104 -#endif
11105 }
11106
11107 #define activate_mm(prev, next) \
11108 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11109 index 9eae775..c914fea 100644
11110 --- a/arch/x86/include/asm/module.h
11111 +++ b/arch/x86/include/asm/module.h
11112 @@ -5,6 +5,7 @@
11113
11114 #ifdef CONFIG_X86_64
11115 /* X86_64 does not define MODULE_PROC_FAMILY */
11116 +#define MODULE_PROC_FAMILY ""
11117 #elif defined CONFIG_M386
11118 #define MODULE_PROC_FAMILY "386 "
11119 #elif defined CONFIG_M486
11120 @@ -59,8 +60,20 @@
11121 #error unknown processor family
11122 #endif
11123
11124 -#ifdef CONFIG_X86_32
11125 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11126 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11127 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11128 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11129 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11130 +#else
11131 +#define MODULE_PAX_KERNEXEC ""
11132 #endif
11133
11134 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11135 +#define MODULE_PAX_UDEREF "UDEREF "
11136 +#else
11137 +#define MODULE_PAX_UDEREF ""
11138 +#endif
11139 +
11140 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11141 +
11142 #endif /* _ASM_X86_MODULE_H */
11143 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11144 index 7639dbf..e08a58c 100644
11145 --- a/arch/x86/include/asm/page_64_types.h
11146 +++ b/arch/x86/include/asm/page_64_types.h
11147 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11148
11149 /* duplicated to the one in bootmem.h */
11150 extern unsigned long max_pfn;
11151 -extern unsigned long phys_base;
11152 +extern const unsigned long phys_base;
11153
11154 extern unsigned long __phys_addr(unsigned long);
11155 #define __phys_reloc_hide(x) (x)
11156 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11157 index a7d2db9..edb023e 100644
11158 --- a/arch/x86/include/asm/paravirt.h
11159 +++ b/arch/x86/include/asm/paravirt.h
11160 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11161 val);
11162 }
11163
11164 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11165 +{
11166 + pgdval_t val = native_pgd_val(pgd);
11167 +
11168 + if (sizeof(pgdval_t) > sizeof(long))
11169 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11170 + val, (u64)val >> 32);
11171 + else
11172 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11173 + val);
11174 +}
11175 +
11176 static inline void pgd_clear(pgd_t *pgdp)
11177 {
11178 set_pgd(pgdp, __pgd(0));
11179 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11180 pv_mmu_ops.set_fixmap(idx, phys, flags);
11181 }
11182
11183 +#ifdef CONFIG_PAX_KERNEXEC
11184 +static inline unsigned long pax_open_kernel(void)
11185 +{
11186 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11187 +}
11188 +
11189 +static inline unsigned long pax_close_kernel(void)
11190 +{
11191 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11192 +}
11193 +#else
11194 +static inline unsigned long pax_open_kernel(void) { return 0; }
11195 +static inline unsigned long pax_close_kernel(void) { return 0; }
11196 +#endif
11197 +
11198 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11199
11200 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11201 @@ -964,7 +991,7 @@ extern void default_banner(void);
11202
11203 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11204 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11205 -#define PARA_INDIRECT(addr) *%cs:addr
11206 +#define PARA_INDIRECT(addr) *%ss:addr
11207 #endif
11208
11209 #define INTERRUPT_RETURN \
11210 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
11211 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11212 CLBR_NONE, \
11213 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11214 +
11215 +#define GET_CR0_INTO_RDI \
11216 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11217 + mov %rax,%rdi
11218 +
11219 +#define SET_RDI_INTO_CR0 \
11220 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11221 +
11222 +#define GET_CR3_INTO_RDI \
11223 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11224 + mov %rax,%rdi
11225 +
11226 +#define SET_RDI_INTO_CR3 \
11227 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11228 +
11229 #endif /* CONFIG_X86_32 */
11230
11231 #endif /* __ASSEMBLY__ */
11232 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11233 index 8e8b9a4..f07d725 100644
11234 --- a/arch/x86/include/asm/paravirt_types.h
11235 +++ b/arch/x86/include/asm/paravirt_types.h
11236 @@ -84,20 +84,20 @@ struct pv_init_ops {
11237 */
11238 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11239 unsigned long addr, unsigned len);
11240 -};
11241 +} __no_const;
11242
11243
11244 struct pv_lazy_ops {
11245 /* Set deferred update mode, used for batching operations. */
11246 void (*enter)(void);
11247 void (*leave)(void);
11248 -};
11249 +} __no_const;
11250
11251 struct pv_time_ops {
11252 unsigned long long (*sched_clock)(void);
11253 unsigned long long (*steal_clock)(int cpu);
11254 unsigned long (*get_tsc_khz)(void);
11255 -};
11256 +} __no_const;
11257
11258 struct pv_cpu_ops {
11259 /* hooks for various privileged instructions */
11260 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
11261
11262 void (*start_context_switch)(struct task_struct *prev);
11263 void (*end_context_switch)(struct task_struct *next);
11264 -};
11265 +} __no_const;
11266
11267 struct pv_irq_ops {
11268 /*
11269 @@ -224,7 +224,7 @@ struct pv_apic_ops {
11270 unsigned long start_eip,
11271 unsigned long start_esp);
11272 #endif
11273 -};
11274 +} __no_const;
11275
11276 struct pv_mmu_ops {
11277 unsigned long (*read_cr2)(void);
11278 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
11279 struct paravirt_callee_save make_pud;
11280
11281 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11282 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11283 #endif /* PAGETABLE_LEVELS == 4 */
11284 #endif /* PAGETABLE_LEVELS >= 3 */
11285
11286 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
11287 an mfn. We can tell which is which from the index. */
11288 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11289 phys_addr_t phys, pgprot_t flags);
11290 +
11291 +#ifdef CONFIG_PAX_KERNEXEC
11292 + unsigned long (*pax_open_kernel)(void);
11293 + unsigned long (*pax_close_kernel)(void);
11294 +#endif
11295 +
11296 };
11297
11298 struct arch_spinlock;
11299 @@ -334,7 +341,7 @@ struct pv_lock_ops {
11300 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11301 int (*spin_trylock)(struct arch_spinlock *lock);
11302 void (*spin_unlock)(struct arch_spinlock *lock);
11303 -};
11304 +} __no_const;
11305
11306 /* This contains all the paravirt structures: we get a convenient
11307 * number for each function using the offset which we use to indicate
11308 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11309 index b4389a4..7024269 100644
11310 --- a/arch/x86/include/asm/pgalloc.h
11311 +++ b/arch/x86/include/asm/pgalloc.h
11312 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11313 pmd_t *pmd, pte_t *pte)
11314 {
11315 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11316 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11317 +}
11318 +
11319 +static inline void pmd_populate_user(struct mm_struct *mm,
11320 + pmd_t *pmd, pte_t *pte)
11321 +{
11322 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11323 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11324 }
11325
11326 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11327
11328 #ifdef CONFIG_X86_PAE
11329 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11330 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11331 +{
11332 + pud_populate(mm, pudp, pmd);
11333 +}
11334 #else /* !CONFIG_X86_PAE */
11335 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11336 {
11337 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11338 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11339 }
11340 +
11341 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11342 +{
11343 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11344 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11345 +}
11346 #endif /* CONFIG_X86_PAE */
11347
11348 #if PAGETABLE_LEVELS > 3
11349 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11350 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11351 }
11352
11353 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11354 +{
11355 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11356 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11357 +}
11358 +
11359 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11360 {
11361 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11362 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11363 index 98391db..8f6984e 100644
11364 --- a/arch/x86/include/asm/pgtable-2level.h
11365 +++ b/arch/x86/include/asm/pgtable-2level.h
11366 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11367
11368 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11369 {
11370 + pax_open_kernel();
11371 *pmdp = pmd;
11372 + pax_close_kernel();
11373 }
11374
11375 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11376 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11377 index effff47..bbb8295 100644
11378 --- a/arch/x86/include/asm/pgtable-3level.h
11379 +++ b/arch/x86/include/asm/pgtable-3level.h
11380 @@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
11381 ptep->pte_low = pte.pte_low;
11382 }
11383
11384 +#define __HAVE_ARCH_READ_PMD_ATOMIC
11385 +/*
11386 + * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
11387 + * a "*pmdp" dereference done by gcc. Problem is, in certain places
11388 + * where pte_offset_map_lock is called, concurrent page faults are
11389 + * allowed, if the mmap_sem is hold for reading. An example is mincore
11390 + * vs page faults vs MADV_DONTNEED. On the page fault side
11391 + * pmd_populate rightfully does a set_64bit, but if we're reading the
11392 + * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
11393 + * because gcc will not read the 64bit of the pmd atomically. To fix
11394 + * this all places running pmd_offset_map_lock() while holding the
11395 + * mmap_sem in read mode, shall read the pmdp pointer using this
11396 + * function to know if the pmd is null nor not, and in turn to know if
11397 + * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
11398 + * operations.
11399 + *
11400 + * Without THP if the mmap_sem is hold for reading, the
11401 + * pmd can only transition from null to not null while read_pmd_atomic runs.
11402 + * So there's no need of literally reading it atomically.
11403 + *
11404 + * With THP if the mmap_sem is hold for reading, the pmd can become
11405 + * THP or null or point to a pte (and in turn become "stable") at any
11406 + * time under read_pmd_atomic, so it's mandatory to read it atomically
11407 + * with cmpxchg8b.
11408 + */
11409 +#ifndef CONFIG_TRANSPARENT_HUGEPAGE
11410 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11411 +{
11412 + pmdval_t ret;
11413 + u32 *tmp = (u32 *)pmdp;
11414 +
11415 + ret = (pmdval_t) (*tmp);
11416 + if (ret) {
11417 + /*
11418 + * If the low part is null, we must not read the high part
11419 + * or we can end up with a partial pmd.
11420 + */
11421 + smp_rmb();
11422 + ret |= ((pmdval_t)*(tmp + 1)) << 32;
11423 + }
11424 +
11425 + return __pmd(ret);
11426 +}
11427 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
11428 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
11429 +{
11430 + return __pmd(atomic64_read((atomic64_t *)pmdp));
11431 +}
11432 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
11433 +
11434 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11435 {
11436 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
11437 @@ -38,12 +88,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11438
11439 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11440 {
11441 + pax_open_kernel();
11442 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11443 + pax_close_kernel();
11444 }
11445
11446 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11447 {
11448 + pax_open_kernel();
11449 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11450 + pax_close_kernel();
11451 }
11452
11453 /*
11454 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11455 index 49afb3f..ed14d07 100644
11456 --- a/arch/x86/include/asm/pgtable.h
11457 +++ b/arch/x86/include/asm/pgtable.h
11458 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11459
11460 #ifndef __PAGETABLE_PUD_FOLDED
11461 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11462 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11463 #define pgd_clear(pgd) native_pgd_clear(pgd)
11464 #endif
11465
11466 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11467
11468 #define arch_end_context_switch(prev) do {} while(0)
11469
11470 +#define pax_open_kernel() native_pax_open_kernel()
11471 +#define pax_close_kernel() native_pax_close_kernel()
11472 #endif /* CONFIG_PARAVIRT */
11473
11474 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11475 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11476 +
11477 +#ifdef CONFIG_PAX_KERNEXEC
11478 +static inline unsigned long native_pax_open_kernel(void)
11479 +{
11480 + unsigned long cr0;
11481 +
11482 + preempt_disable();
11483 + barrier();
11484 + cr0 = read_cr0() ^ X86_CR0_WP;
11485 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11486 + write_cr0(cr0);
11487 + return cr0 ^ X86_CR0_WP;
11488 +}
11489 +
11490 +static inline unsigned long native_pax_close_kernel(void)
11491 +{
11492 + unsigned long cr0;
11493 +
11494 + cr0 = read_cr0() ^ X86_CR0_WP;
11495 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11496 + write_cr0(cr0);
11497 + barrier();
11498 + preempt_enable_no_resched();
11499 + return cr0 ^ X86_CR0_WP;
11500 +}
11501 +#else
11502 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11503 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11504 +#endif
11505 +
11506 /*
11507 * The following only work if pte_present() is true.
11508 * Undefined behaviour if not..
11509 */
11510 +static inline int pte_user(pte_t pte)
11511 +{
11512 + return pte_val(pte) & _PAGE_USER;
11513 +}
11514 +
11515 static inline int pte_dirty(pte_t pte)
11516 {
11517 return pte_flags(pte) & _PAGE_DIRTY;
11518 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11519 return pte_clear_flags(pte, _PAGE_RW);
11520 }
11521
11522 +static inline pte_t pte_mkread(pte_t pte)
11523 +{
11524 + return __pte(pte_val(pte) | _PAGE_USER);
11525 +}
11526 +
11527 static inline pte_t pte_mkexec(pte_t pte)
11528 {
11529 - return pte_clear_flags(pte, _PAGE_NX);
11530 +#ifdef CONFIG_X86_PAE
11531 + if (__supported_pte_mask & _PAGE_NX)
11532 + return pte_clear_flags(pte, _PAGE_NX);
11533 + else
11534 +#endif
11535 + return pte_set_flags(pte, _PAGE_USER);
11536 +}
11537 +
11538 +static inline pte_t pte_exprotect(pte_t pte)
11539 +{
11540 +#ifdef CONFIG_X86_PAE
11541 + if (__supported_pte_mask & _PAGE_NX)
11542 + return pte_set_flags(pte, _PAGE_NX);
11543 + else
11544 +#endif
11545 + return pte_clear_flags(pte, _PAGE_USER);
11546 }
11547
11548 static inline pte_t pte_mkdirty(pte_t pte)
11549 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11550 #endif
11551
11552 #ifndef __ASSEMBLY__
11553 +
11554 +#ifdef CONFIG_PAX_PER_CPU_PGD
11555 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11556 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11557 +{
11558 + return cpu_pgd[cpu];
11559 +}
11560 +#endif
11561 +
11562 #include <linux/mm_types.h>
11563
11564 static inline int pte_none(pte_t pte)
11565 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11566
11567 static inline int pgd_bad(pgd_t pgd)
11568 {
11569 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11570 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11571 }
11572
11573 static inline int pgd_none(pgd_t pgd)
11574 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11575 * pgd_offset() returns a (pgd_t *)
11576 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11577 */
11578 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11579 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11580 +
11581 +#ifdef CONFIG_PAX_PER_CPU_PGD
11582 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11583 +#endif
11584 +
11585 /*
11586 * a shortcut which implies the use of the kernel's pgd, instead
11587 * of a process's
11588 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11589 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11590 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11591
11592 +#ifdef CONFIG_X86_32
11593 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11594 +#else
11595 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11596 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11597 +
11598 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11599 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11600 +#else
11601 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11602 +#endif
11603 +
11604 +#endif
11605 +
11606 #ifndef __ASSEMBLY__
11607
11608 extern int direct_gbpages;
11609 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11610 * dst and src can be on the same page, but the range must not overlap,
11611 * and must not cross a page boundary.
11612 */
11613 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11614 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11615 {
11616 - memcpy(dst, src, count * sizeof(pgd_t));
11617 + pax_open_kernel();
11618 + while (count--)
11619 + *dst++ = *src++;
11620 + pax_close_kernel();
11621 }
11622
11623 +#ifdef CONFIG_PAX_PER_CPU_PGD
11624 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11625 +#endif
11626 +
11627 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11628 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11629 +#else
11630 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11631 +#endif
11632
11633 #include <asm-generic/pgtable.h>
11634 #endif /* __ASSEMBLY__ */
11635 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11636 index 0c92113..34a77c6 100644
11637 --- a/arch/x86/include/asm/pgtable_32.h
11638 +++ b/arch/x86/include/asm/pgtable_32.h
11639 @@ -25,9 +25,6 @@
11640 struct mm_struct;
11641 struct vm_area_struct;
11642
11643 -extern pgd_t swapper_pg_dir[1024];
11644 -extern pgd_t initial_page_table[1024];
11645 -
11646 static inline void pgtable_cache_init(void) { }
11647 static inline void check_pgt_cache(void) { }
11648 void paging_init(void);
11649 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11650 # include <asm/pgtable-2level.h>
11651 #endif
11652
11653 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11654 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11655 +#ifdef CONFIG_X86_PAE
11656 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11657 +#endif
11658 +
11659 #if defined(CONFIG_HIGHPTE)
11660 #define pte_offset_map(dir, address) \
11661 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11662 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11663 /* Clear a kernel PTE and flush it from the TLB */
11664 #define kpte_clear_flush(ptep, vaddr) \
11665 do { \
11666 + pax_open_kernel(); \
11667 pte_clear(&init_mm, (vaddr), (ptep)); \
11668 + pax_close_kernel(); \
11669 __flush_tlb_one((vaddr)); \
11670 } while (0)
11671
11672 @@ -74,6 +79,9 @@ do { \
11673
11674 #endif /* !__ASSEMBLY__ */
11675
11676 +#define HAVE_ARCH_UNMAPPED_AREA
11677 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11678 +
11679 /*
11680 * kern_addr_valid() is (1) for FLATMEM and (0) for
11681 * SPARSEMEM and DISCONTIGMEM
11682 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11683 index ed5903b..c7fe163 100644
11684 --- a/arch/x86/include/asm/pgtable_32_types.h
11685 +++ b/arch/x86/include/asm/pgtable_32_types.h
11686 @@ -8,7 +8,7 @@
11687 */
11688 #ifdef CONFIG_X86_PAE
11689 # include <asm/pgtable-3level_types.h>
11690 -# define PMD_SIZE (1UL << PMD_SHIFT)
11691 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11692 # define PMD_MASK (~(PMD_SIZE - 1))
11693 #else
11694 # include <asm/pgtable-2level_types.h>
11695 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11696 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11697 #endif
11698
11699 +#ifdef CONFIG_PAX_KERNEXEC
11700 +#ifndef __ASSEMBLY__
11701 +extern unsigned char MODULES_EXEC_VADDR[];
11702 +extern unsigned char MODULES_EXEC_END[];
11703 +#endif
11704 +#include <asm/boot.h>
11705 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11706 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11707 +#else
11708 +#define ktla_ktva(addr) (addr)
11709 +#define ktva_ktla(addr) (addr)
11710 +#endif
11711 +
11712 #define MODULES_VADDR VMALLOC_START
11713 #define MODULES_END VMALLOC_END
11714 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11715 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11716 index 975f709..9f779c9 100644
11717 --- a/arch/x86/include/asm/pgtable_64.h
11718 +++ b/arch/x86/include/asm/pgtable_64.h
11719 @@ -16,10 +16,14 @@
11720
11721 extern pud_t level3_kernel_pgt[512];
11722 extern pud_t level3_ident_pgt[512];
11723 +extern pud_t level3_vmalloc_start_pgt[512];
11724 +extern pud_t level3_vmalloc_end_pgt[512];
11725 +extern pud_t level3_vmemmap_pgt[512];
11726 +extern pud_t level2_vmemmap_pgt[512];
11727 extern pmd_t level2_kernel_pgt[512];
11728 extern pmd_t level2_fixmap_pgt[512];
11729 -extern pmd_t level2_ident_pgt[512];
11730 -extern pgd_t init_level4_pgt[];
11731 +extern pmd_t level2_ident_pgt[512*2];
11732 +extern pgd_t init_level4_pgt[512];
11733
11734 #define swapper_pg_dir init_level4_pgt
11735
11736 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11737
11738 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11739 {
11740 + pax_open_kernel();
11741 *pmdp = pmd;
11742 + pax_close_kernel();
11743 }
11744
11745 static inline void native_pmd_clear(pmd_t *pmd)
11746 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11747
11748 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11749 {
11750 + pax_open_kernel();
11751 *pudp = pud;
11752 + pax_close_kernel();
11753 }
11754
11755 static inline void native_pud_clear(pud_t *pud)
11756 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11757
11758 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11759 {
11760 + pax_open_kernel();
11761 + *pgdp = pgd;
11762 + pax_close_kernel();
11763 +}
11764 +
11765 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11766 +{
11767 *pgdp = pgd;
11768 }
11769
11770 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11771 index 766ea16..5b96cb3 100644
11772 --- a/arch/x86/include/asm/pgtable_64_types.h
11773 +++ b/arch/x86/include/asm/pgtable_64_types.h
11774 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11775 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11776 #define MODULES_END _AC(0xffffffffff000000, UL)
11777 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11778 +#define MODULES_EXEC_VADDR MODULES_VADDR
11779 +#define MODULES_EXEC_END MODULES_END
11780 +
11781 +#define ktla_ktva(addr) (addr)
11782 +#define ktva_ktla(addr) (addr)
11783
11784 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11785 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11786 index 013286a..8b42f4f 100644
11787 --- a/arch/x86/include/asm/pgtable_types.h
11788 +++ b/arch/x86/include/asm/pgtable_types.h
11789 @@ -16,13 +16,12 @@
11790 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11791 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11792 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11793 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11794 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11795 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11796 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11797 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11798 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11799 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11800 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11801 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11802 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11803 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11804
11805 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11806 @@ -40,7 +39,6 @@
11807 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11808 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11809 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11810 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11811 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11812 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11813 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11814 @@ -57,8 +55,10 @@
11815
11816 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11817 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11818 -#else
11819 +#elif defined(CONFIG_KMEMCHECK)
11820 #define _PAGE_NX (_AT(pteval_t, 0))
11821 +#else
11822 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11823 #endif
11824
11825 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11826 @@ -96,6 +96,9 @@
11827 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11828 _PAGE_ACCESSED)
11829
11830 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11831 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11832 +
11833 #define __PAGE_KERNEL_EXEC \
11834 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11835 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11836 @@ -106,7 +109,7 @@
11837 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11838 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11839 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11840 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11841 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11842 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11843 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11844 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11845 @@ -168,8 +171,8 @@
11846 * bits are combined, this will alow user to access the high address mapped
11847 * VDSO in the presence of CONFIG_COMPAT_VDSO
11848 */
11849 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11850 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11851 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11852 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11853 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11854 #endif
11855
11856 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11857 {
11858 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11859 }
11860 +#endif
11861
11862 +#if PAGETABLE_LEVELS == 3
11863 +#include <asm-generic/pgtable-nopud.h>
11864 +#endif
11865 +
11866 +#if PAGETABLE_LEVELS == 2
11867 +#include <asm-generic/pgtable-nopmd.h>
11868 +#endif
11869 +
11870 +#ifndef __ASSEMBLY__
11871 #if PAGETABLE_LEVELS > 3
11872 typedef struct { pudval_t pud; } pud_t;
11873
11874 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11875 return pud.pud;
11876 }
11877 #else
11878 -#include <asm-generic/pgtable-nopud.h>
11879 -
11880 static inline pudval_t native_pud_val(pud_t pud)
11881 {
11882 return native_pgd_val(pud.pgd);
11883 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11884 return pmd.pmd;
11885 }
11886 #else
11887 -#include <asm-generic/pgtable-nopmd.h>
11888 -
11889 static inline pmdval_t native_pmd_val(pmd_t pmd)
11890 {
11891 return native_pgd_val(pmd.pud.pgd);
11892 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11893
11894 extern pteval_t __supported_pte_mask;
11895 extern void set_nx(void);
11896 -extern int nx_enabled;
11897
11898 #define pgprot_writecombine pgprot_writecombine
11899 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11900 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11901 index 58545c9..fe6fc38e 100644
11902 --- a/arch/x86/include/asm/processor.h
11903 +++ b/arch/x86/include/asm/processor.h
11904 @@ -266,7 +266,7 @@ struct tss_struct {
11905
11906 } ____cacheline_aligned;
11907
11908 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11909 +extern struct tss_struct init_tss[NR_CPUS];
11910
11911 /*
11912 * Save the original ist values for checking stack pointers during debugging
11913 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
11914 */
11915 #define TASK_SIZE PAGE_OFFSET
11916 #define TASK_SIZE_MAX TASK_SIZE
11917 +
11918 +#ifdef CONFIG_PAX_SEGMEXEC
11919 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11920 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11921 +#else
11922 #define STACK_TOP TASK_SIZE
11923 -#define STACK_TOP_MAX STACK_TOP
11924 +#endif
11925 +
11926 +#define STACK_TOP_MAX TASK_SIZE
11927
11928 #define INIT_THREAD { \
11929 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11930 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11931 .vm86_info = NULL, \
11932 .sysenter_cs = __KERNEL_CS, \
11933 .io_bitmap_ptr = NULL, \
11934 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
11935 */
11936 #define INIT_TSS { \
11937 .x86_tss = { \
11938 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11939 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11940 .ss0 = __KERNEL_DS, \
11941 .ss1 = __KERNEL_CS, \
11942 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11943 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
11944 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11945
11946 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11947 -#define KSTK_TOP(info) \
11948 -({ \
11949 - unsigned long *__ptr = (unsigned long *)(info); \
11950 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11951 -})
11952 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11953
11954 /*
11955 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11956 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11957 #define task_pt_regs(task) \
11958 ({ \
11959 struct pt_regs *__regs__; \
11960 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11961 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11962 __regs__ - 1; \
11963 })
11964
11965 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11966 /*
11967 * User space process size. 47bits minus one guard page.
11968 */
11969 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11970 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11971
11972 /* This decides where the kernel will search for a free chunk of vm
11973 * space during mmap's.
11974 */
11975 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11976 - 0xc0000000 : 0xFFFFe000)
11977 + 0xc0000000 : 0xFFFFf000)
11978
11979 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11980 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11981 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11982 #define STACK_TOP_MAX TASK_SIZE_MAX
11983
11984 #define INIT_THREAD { \
11985 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11986 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11987 }
11988
11989 #define INIT_TSS { \
11990 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11991 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11992 }
11993
11994 /*
11995 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11996 */
11997 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11998
11999 +#ifdef CONFIG_PAX_SEGMEXEC
12000 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
12001 +#endif
12002 +
12003 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
12004
12005 /* Get/set a process' ability to use the timestamp counter instruction */
12006 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
12007 index 3566454..4bdfb8c 100644
12008 --- a/arch/x86/include/asm/ptrace.h
12009 +++ b/arch/x86/include/asm/ptrace.h
12010 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
12011 }
12012
12013 /*
12014 - * user_mode_vm(regs) determines whether a register set came from user mode.
12015 + * user_mode(regs) determines whether a register set came from user mode.
12016 * This is true if V8086 mode was enabled OR if the register set was from
12017 * protected mode with RPL-3 CS value. This tricky test checks that with
12018 * one comparison. Many places in the kernel can bypass this full check
12019 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
12020 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
12021 + * be used.
12022 */
12023 -static inline int user_mode(struct pt_regs *regs)
12024 +static inline int user_mode_novm(struct pt_regs *regs)
12025 {
12026 #ifdef CONFIG_X86_32
12027 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
12028 #else
12029 - return !!(regs->cs & 3);
12030 + return !!(regs->cs & SEGMENT_RPL_MASK);
12031 #endif
12032 }
12033
12034 -static inline int user_mode_vm(struct pt_regs *regs)
12035 +static inline int user_mode(struct pt_regs *regs)
12036 {
12037 #ifdef CONFIG_X86_32
12038 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
12039 USER_RPL;
12040 #else
12041 - return user_mode(regs);
12042 + return user_mode_novm(regs);
12043 #endif
12044 }
12045
12046 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
12047 #ifdef CONFIG_X86_64
12048 static inline bool user_64bit_mode(struct pt_regs *regs)
12049 {
12050 + unsigned long cs = regs->cs & 0xffff;
12051 #ifndef CONFIG_PARAVIRT
12052 /*
12053 * On non-paravirt systems, this is the only long mode CPL 3
12054 * selector. We do not allow long mode selectors in the LDT.
12055 */
12056 - return regs->cs == __USER_CS;
12057 + return cs == __USER_CS;
12058 #else
12059 /* Headers are too twisted for this to go in paravirt.h. */
12060 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
12061 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
12062 #endif
12063 }
12064 #endif
12065 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12066 index 92f29706..a79cbbb 100644
12067 --- a/arch/x86/include/asm/reboot.h
12068 +++ b/arch/x86/include/asm/reboot.h
12069 @@ -6,19 +6,19 @@
12070 struct pt_regs;
12071
12072 struct machine_ops {
12073 - void (*restart)(char *cmd);
12074 - void (*halt)(void);
12075 - void (*power_off)(void);
12076 + void (* __noreturn restart)(char *cmd);
12077 + void (* __noreturn halt)(void);
12078 + void (* __noreturn power_off)(void);
12079 void (*shutdown)(void);
12080 void (*crash_shutdown)(struct pt_regs *);
12081 - void (*emergency_restart)(void);
12082 -};
12083 + void (* __noreturn emergency_restart)(void);
12084 +} __no_const;
12085
12086 extern struct machine_ops machine_ops;
12087
12088 void native_machine_crash_shutdown(struct pt_regs *regs);
12089 void native_machine_shutdown(void);
12090 -void machine_real_restart(unsigned int type);
12091 +void machine_real_restart(unsigned int type) __noreturn;
12092 /* These must match dispatch_table in reboot_32.S */
12093 #define MRR_BIOS 0
12094 #define MRR_APM 1
12095 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12096 index 2dbe4a7..ce1db00 100644
12097 --- a/arch/x86/include/asm/rwsem.h
12098 +++ b/arch/x86/include/asm/rwsem.h
12099 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12100 {
12101 asm volatile("# beginning down_read\n\t"
12102 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12103 +
12104 +#ifdef CONFIG_PAX_REFCOUNT
12105 + "jno 0f\n"
12106 + LOCK_PREFIX _ASM_DEC "(%1)\n"
12107 + "int $4\n0:\n"
12108 + _ASM_EXTABLE(0b, 0b)
12109 +#endif
12110 +
12111 /* adds 0x00000001 */
12112 " jns 1f\n"
12113 " call call_rwsem_down_read_failed\n"
12114 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12115 "1:\n\t"
12116 " mov %1,%2\n\t"
12117 " add %3,%2\n\t"
12118 +
12119 +#ifdef CONFIG_PAX_REFCOUNT
12120 + "jno 0f\n"
12121 + "sub %3,%2\n"
12122 + "int $4\n0:\n"
12123 + _ASM_EXTABLE(0b, 0b)
12124 +#endif
12125 +
12126 " jle 2f\n\t"
12127 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12128 " jnz 1b\n\t"
12129 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12130 long tmp;
12131 asm volatile("# beginning down_write\n\t"
12132 LOCK_PREFIX " xadd %1,(%2)\n\t"
12133 +
12134 +#ifdef CONFIG_PAX_REFCOUNT
12135 + "jno 0f\n"
12136 + "mov %1,(%2)\n"
12137 + "int $4\n0:\n"
12138 + _ASM_EXTABLE(0b, 0b)
12139 +#endif
12140 +
12141 /* adds 0xffff0001, returns the old value */
12142 " test %1,%1\n\t"
12143 /* was the count 0 before? */
12144 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12145 long tmp;
12146 asm volatile("# beginning __up_read\n\t"
12147 LOCK_PREFIX " xadd %1,(%2)\n\t"
12148 +
12149 +#ifdef CONFIG_PAX_REFCOUNT
12150 + "jno 0f\n"
12151 + "mov %1,(%2)\n"
12152 + "int $4\n0:\n"
12153 + _ASM_EXTABLE(0b, 0b)
12154 +#endif
12155 +
12156 /* subtracts 1, returns the old value */
12157 " jns 1f\n\t"
12158 " call call_rwsem_wake\n" /* expects old value in %edx */
12159 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12160 long tmp;
12161 asm volatile("# beginning __up_write\n\t"
12162 LOCK_PREFIX " xadd %1,(%2)\n\t"
12163 +
12164 +#ifdef CONFIG_PAX_REFCOUNT
12165 + "jno 0f\n"
12166 + "mov %1,(%2)\n"
12167 + "int $4\n0:\n"
12168 + _ASM_EXTABLE(0b, 0b)
12169 +#endif
12170 +
12171 /* subtracts 0xffff0001, returns the old value */
12172 " jns 1f\n\t"
12173 " call call_rwsem_wake\n" /* expects old value in %edx */
12174 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12175 {
12176 asm volatile("# beginning __downgrade_write\n\t"
12177 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12178 +
12179 +#ifdef CONFIG_PAX_REFCOUNT
12180 + "jno 0f\n"
12181 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12182 + "int $4\n0:\n"
12183 + _ASM_EXTABLE(0b, 0b)
12184 +#endif
12185 +
12186 /*
12187 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12188 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12189 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12190 */
12191 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12192 {
12193 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12194 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12195 +
12196 +#ifdef CONFIG_PAX_REFCOUNT
12197 + "jno 0f\n"
12198 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12199 + "int $4\n0:\n"
12200 + _ASM_EXTABLE(0b, 0b)
12201 +#endif
12202 +
12203 : "+m" (sem->count)
12204 : "er" (delta));
12205 }
12206 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12207 */
12208 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12209 {
12210 - return delta + xadd(&sem->count, delta);
12211 + return delta + xadd_check_overflow(&sem->count, delta);
12212 }
12213
12214 #endif /* __KERNEL__ */
12215 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12216 index 5e64171..f58957e 100644
12217 --- a/arch/x86/include/asm/segment.h
12218 +++ b/arch/x86/include/asm/segment.h
12219 @@ -64,10 +64,15 @@
12220 * 26 - ESPFIX small SS
12221 * 27 - per-cpu [ offset to per-cpu data area ]
12222 * 28 - stack_canary-20 [ for stack protector ]
12223 - * 29 - unused
12224 - * 30 - unused
12225 + * 29 - PCI BIOS CS
12226 + * 30 - PCI BIOS DS
12227 * 31 - TSS for double fault handler
12228 */
12229 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12230 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12231 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12232 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12233 +
12234 #define GDT_ENTRY_TLS_MIN 6
12235 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12236
12237 @@ -79,6 +84,8 @@
12238
12239 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12240
12241 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12242 +
12243 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12244
12245 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12246 @@ -104,6 +111,12 @@
12247 #define __KERNEL_STACK_CANARY 0
12248 #endif
12249
12250 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12251 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12252 +
12253 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12254 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12255 +
12256 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12257
12258 /*
12259 @@ -141,7 +154,7 @@
12260 */
12261
12262 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12263 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12264 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12265
12266
12267 #else
12268 @@ -165,6 +178,8 @@
12269 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12270 #define __USER32_DS __USER_DS
12271
12272 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12273 +
12274 #define GDT_ENTRY_TSS 8 /* needs two entries */
12275 #define GDT_ENTRY_LDT 10 /* needs two entries */
12276 #define GDT_ENTRY_TLS_MIN 12
12277 @@ -185,6 +200,7 @@
12278 #endif
12279
12280 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12281 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12282 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12283 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12284 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12285 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12286 index 0434c40..1714bf0 100644
12287 --- a/arch/x86/include/asm/smp.h
12288 +++ b/arch/x86/include/asm/smp.h
12289 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12290 /* cpus sharing the last level cache: */
12291 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12292 DECLARE_PER_CPU(u16, cpu_llc_id);
12293 -DECLARE_PER_CPU(int, cpu_number);
12294 +DECLARE_PER_CPU(unsigned int, cpu_number);
12295
12296 static inline struct cpumask *cpu_sibling_mask(int cpu)
12297 {
12298 @@ -77,7 +77,7 @@ struct smp_ops {
12299
12300 void (*send_call_func_ipi)(const struct cpumask *mask);
12301 void (*send_call_func_single_ipi)(int cpu);
12302 -};
12303 +} __no_const;
12304
12305 /* Globals due to paravirt */
12306 extern void set_cpu_sibling_map(int cpu);
12307 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12308 extern int safe_smp_processor_id(void);
12309
12310 #elif defined(CONFIG_X86_64_SMP)
12311 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12312 -
12313 -#define stack_smp_processor_id() \
12314 -({ \
12315 - struct thread_info *ti; \
12316 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12317 - ti->cpu; \
12318 -})
12319 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12320 +#define stack_smp_processor_id() raw_smp_processor_id()
12321 #define safe_smp_processor_id() smp_processor_id()
12322
12323 #endif
12324 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12325 index a82c2bf..2198f61 100644
12326 --- a/arch/x86/include/asm/spinlock.h
12327 +++ b/arch/x86/include/asm/spinlock.h
12328 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12329 static inline void arch_read_lock(arch_rwlock_t *rw)
12330 {
12331 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12332 +
12333 +#ifdef CONFIG_PAX_REFCOUNT
12334 + "jno 0f\n"
12335 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12336 + "int $4\n0:\n"
12337 + _ASM_EXTABLE(0b, 0b)
12338 +#endif
12339 +
12340 "jns 1f\n"
12341 "call __read_lock_failed\n\t"
12342 "1:\n"
12343 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12344 static inline void arch_write_lock(arch_rwlock_t *rw)
12345 {
12346 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12347 +
12348 +#ifdef CONFIG_PAX_REFCOUNT
12349 + "jno 0f\n"
12350 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12351 + "int $4\n0:\n"
12352 + _ASM_EXTABLE(0b, 0b)
12353 +#endif
12354 +
12355 "jz 1f\n"
12356 "call __write_lock_failed\n\t"
12357 "1:\n"
12358 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12359
12360 static inline void arch_read_unlock(arch_rwlock_t *rw)
12361 {
12362 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12363 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12364 +
12365 +#ifdef CONFIG_PAX_REFCOUNT
12366 + "jno 0f\n"
12367 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12368 + "int $4\n0:\n"
12369 + _ASM_EXTABLE(0b, 0b)
12370 +#endif
12371 +
12372 :"+m" (rw->lock) : : "memory");
12373 }
12374
12375 static inline void arch_write_unlock(arch_rwlock_t *rw)
12376 {
12377 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12378 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12379 +
12380 +#ifdef CONFIG_PAX_REFCOUNT
12381 + "jno 0f\n"
12382 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12383 + "int $4\n0:\n"
12384 + _ASM_EXTABLE(0b, 0b)
12385 +#endif
12386 +
12387 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12388 }
12389
12390 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12391 index 1575177..cb23f52 100644
12392 --- a/arch/x86/include/asm/stackprotector.h
12393 +++ b/arch/x86/include/asm/stackprotector.h
12394 @@ -48,7 +48,7 @@
12395 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12396 */
12397 #define GDT_STACK_CANARY_INIT \
12398 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12399 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12400
12401 /*
12402 * Initialize the stackprotector canary value.
12403 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
12404
12405 static inline void load_stack_canary_segment(void)
12406 {
12407 -#ifdef CONFIG_X86_32
12408 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12409 asm volatile ("mov %0, %%gs" : : "r" (0));
12410 #endif
12411 }
12412 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12413 index 70bbe39..4ae2bd4 100644
12414 --- a/arch/x86/include/asm/stacktrace.h
12415 +++ b/arch/x86/include/asm/stacktrace.h
12416 @@ -11,28 +11,20 @@
12417
12418 extern int kstack_depth_to_print;
12419
12420 -struct thread_info;
12421 +struct task_struct;
12422 struct stacktrace_ops;
12423
12424 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12425 - unsigned long *stack,
12426 - unsigned long bp,
12427 - const struct stacktrace_ops *ops,
12428 - void *data,
12429 - unsigned long *end,
12430 - int *graph);
12431 +typedef unsigned long walk_stack_t(struct task_struct *task,
12432 + void *stack_start,
12433 + unsigned long *stack,
12434 + unsigned long bp,
12435 + const struct stacktrace_ops *ops,
12436 + void *data,
12437 + unsigned long *end,
12438 + int *graph);
12439
12440 -extern unsigned long
12441 -print_context_stack(struct thread_info *tinfo,
12442 - unsigned long *stack, unsigned long bp,
12443 - const struct stacktrace_ops *ops, void *data,
12444 - unsigned long *end, int *graph);
12445 -
12446 -extern unsigned long
12447 -print_context_stack_bp(struct thread_info *tinfo,
12448 - unsigned long *stack, unsigned long bp,
12449 - const struct stacktrace_ops *ops, void *data,
12450 - unsigned long *end, int *graph);
12451 +extern walk_stack_t print_context_stack;
12452 +extern walk_stack_t print_context_stack_bp;
12453
12454 /* Generic stack tracer with callbacks */
12455
12456 @@ -40,7 +32,7 @@ struct stacktrace_ops {
12457 void (*address)(void *data, unsigned long address, int reliable);
12458 /* On negative return stop dumping */
12459 int (*stack)(void *data, char *name);
12460 - walk_stack_t walk_stack;
12461 + walk_stack_t *walk_stack;
12462 };
12463
12464 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12465 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12466 index cb23852..2dde194 100644
12467 --- a/arch/x86/include/asm/sys_ia32.h
12468 +++ b/arch/x86/include/asm/sys_ia32.h
12469 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
12470 compat_sigset_t __user *, unsigned int);
12471 asmlinkage long sys32_alarm(unsigned int);
12472
12473 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12474 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12475 asmlinkage long sys32_sysfs(int, u32, u32);
12476
12477 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12478 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12479 index 2d2f01c..f985723 100644
12480 --- a/arch/x86/include/asm/system.h
12481 +++ b/arch/x86/include/asm/system.h
12482 @@ -129,7 +129,7 @@ do { \
12483 "call __switch_to\n\t" \
12484 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12485 __switch_canary \
12486 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12487 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12488 "movq %%rax,%%rdi\n\t" \
12489 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12490 "jnz ret_from_fork\n\t" \
12491 @@ -140,7 +140,7 @@ do { \
12492 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12493 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12494 [_tif_fork] "i" (_TIF_FORK), \
12495 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12496 + [thread_info] "m" (current_tinfo), \
12497 [current_task] "m" (current_task) \
12498 __switch_canary_iparam \
12499 : "memory", "cc" __EXTRA_CLOBBER)
12500 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12501 {
12502 unsigned long __limit;
12503 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12504 - return __limit + 1;
12505 + return __limit;
12506 }
12507
12508 static inline void native_clts(void)
12509 @@ -397,13 +397,13 @@ void enable_hlt(void);
12510
12511 void cpu_idle_wait(void);
12512
12513 -extern unsigned long arch_align_stack(unsigned long sp);
12514 +#define arch_align_stack(x) ((x) & ~0xfUL)
12515 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12516
12517 void default_idle(void);
12518 bool set_pm_idle_to_default(void);
12519
12520 -void stop_this_cpu(void *dummy);
12521 +void stop_this_cpu(void *dummy) __noreturn;
12522
12523 /*
12524 * Force strict CPU ordering.
12525 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12526 index cfd8144..664ac89 100644
12527 --- a/arch/x86/include/asm/thread_info.h
12528 +++ b/arch/x86/include/asm/thread_info.h
12529 @@ -10,6 +10,7 @@
12530 #include <linux/compiler.h>
12531 #include <asm/page.h>
12532 #include <asm/types.h>
12533 +#include <asm/percpu.h>
12534
12535 /*
12536 * low level task data that entry.S needs immediate access to
12537 @@ -24,7 +25,6 @@ struct exec_domain;
12538 #include <linux/atomic.h>
12539
12540 struct thread_info {
12541 - struct task_struct *task; /* main task structure */
12542 struct exec_domain *exec_domain; /* execution domain */
12543 __u32 flags; /* low level flags */
12544 __u32 status; /* thread synchronous flags */
12545 @@ -34,19 +34,13 @@ struct thread_info {
12546 mm_segment_t addr_limit;
12547 struct restart_block restart_block;
12548 void __user *sysenter_return;
12549 -#ifdef CONFIG_X86_32
12550 - unsigned long previous_esp; /* ESP of the previous stack in
12551 - case of nested (IRQ) stacks
12552 - */
12553 - __u8 supervisor_stack[0];
12554 -#endif
12555 + unsigned long lowest_stack;
12556 unsigned int sig_on_uaccess_error:1;
12557 unsigned int uaccess_err:1; /* uaccess failed */
12558 };
12559
12560 -#define INIT_THREAD_INFO(tsk) \
12561 +#define INIT_THREAD_INFO \
12562 { \
12563 - .task = &tsk, \
12564 .exec_domain = &default_exec_domain, \
12565 .flags = 0, \
12566 .cpu = 0, \
12567 @@ -57,7 +51,7 @@ struct thread_info {
12568 }, \
12569 }
12570
12571 -#define init_thread_info (init_thread_union.thread_info)
12572 +#define init_thread_info (init_thread_union.stack)
12573 #define init_stack (init_thread_union.stack)
12574
12575 #else /* !__ASSEMBLY__ */
12576 @@ -95,6 +89,7 @@ struct thread_info {
12577 #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
12578 #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
12579 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12580 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
12581
12582 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12583 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12584 @@ -116,16 +111,17 @@ struct thread_info {
12585 #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
12586 #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
12587 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12588 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12589
12590 /* work to do in syscall_trace_enter() */
12591 #define _TIF_WORK_SYSCALL_ENTRY \
12592 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12593 - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12594 + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12595
12596 /* work to do in syscall_trace_leave() */
12597 #define _TIF_WORK_SYSCALL_EXIT \
12598 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12599 - _TIF_SYSCALL_TRACEPOINT)
12600 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12601
12602 /* work to do on interrupt/exception return */
12603 #define _TIF_WORK_MASK \
12604 @@ -135,7 +131,8 @@ struct thread_info {
12605
12606 /* work to do on any return to user space */
12607 #define _TIF_ALLWORK_MASK \
12608 - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12609 + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12610 + _TIF_GRSEC_SETXID)
12611
12612 /* Only used for 64 bit */
12613 #define _TIF_DO_NOTIFY_MASK \
12614 @@ -169,45 +166,40 @@ struct thread_info {
12615 ret; \
12616 })
12617
12618 -#ifdef CONFIG_X86_32
12619 -
12620 -#define STACK_WARN (THREAD_SIZE/8)
12621 -/*
12622 - * macros/functions for gaining access to the thread information structure
12623 - *
12624 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12625 - */
12626 -#ifndef __ASSEMBLY__
12627 -
12628 -
12629 -/* how to get the current stack pointer from C */
12630 -register unsigned long current_stack_pointer asm("esp") __used;
12631 -
12632 -/* how to get the thread information struct from C */
12633 -static inline struct thread_info *current_thread_info(void)
12634 -{
12635 - return (struct thread_info *)
12636 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12637 -}
12638 -
12639 -#else /* !__ASSEMBLY__ */
12640 -
12641 +#ifdef __ASSEMBLY__
12642 /* how to get the thread information struct from ASM */
12643 #define GET_THREAD_INFO(reg) \
12644 - movl $-THREAD_SIZE, reg; \
12645 - andl %esp, reg
12646 + mov PER_CPU_VAR(current_tinfo), reg
12647
12648 /* use this one if reg already contains %esp */
12649 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12650 - andl $-THREAD_SIZE, reg
12651 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12652 +#else
12653 +/* how to get the thread information struct from C */
12654 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12655 +
12656 +static __always_inline struct thread_info *current_thread_info(void)
12657 +{
12658 + return percpu_read_stable(current_tinfo);
12659 +}
12660 +#endif
12661 +
12662 +#ifdef CONFIG_X86_32
12663 +
12664 +#define STACK_WARN (THREAD_SIZE/8)
12665 +/*
12666 + * macros/functions for gaining access to the thread information structure
12667 + *
12668 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12669 + */
12670 +#ifndef __ASSEMBLY__
12671 +
12672 +/* how to get the current stack pointer from C */
12673 +register unsigned long current_stack_pointer asm("esp") __used;
12674
12675 #endif
12676
12677 #else /* X86_32 */
12678
12679 -#include <asm/percpu.h>
12680 -#define KERNEL_STACK_OFFSET (5*8)
12681 -
12682 /*
12683 * macros/functions for gaining access to the thread information structure
12684 * preempt_count needs to be 1 initially, until the scheduler is functional.
12685 @@ -215,27 +207,8 @@ static inline struct thread_info *current_thread_info(void)
12686 #ifndef __ASSEMBLY__
12687 DECLARE_PER_CPU(unsigned long, kernel_stack);
12688
12689 -static inline struct thread_info *current_thread_info(void)
12690 -{
12691 - struct thread_info *ti;
12692 - ti = (void *)(percpu_read_stable(kernel_stack) +
12693 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12694 - return ti;
12695 -}
12696 -
12697 -#else /* !__ASSEMBLY__ */
12698 -
12699 -/* how to get the thread information struct from ASM */
12700 -#define GET_THREAD_INFO(reg) \
12701 - movq PER_CPU_VAR(kernel_stack),reg ; \
12702 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12703 -
12704 -/*
12705 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12706 - * a certain register (to be used in assembler memory operands).
12707 - */
12708 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12709 -
12710 +/* how to get the current stack pointer from C */
12711 +register unsigned long current_stack_pointer asm("rsp") __used;
12712 #endif
12713
12714 #endif /* !X86_32 */
12715 @@ -269,5 +242,16 @@ extern void arch_task_cache_init(void);
12716 extern void free_thread_info(struct thread_info *ti);
12717 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12718 #define arch_task_cache_init arch_task_cache_init
12719 +
12720 +#define __HAVE_THREAD_FUNCTIONS
12721 +#define task_thread_info(task) (&(task)->tinfo)
12722 +#define task_stack_page(task) ((task)->stack)
12723 +#define setup_thread_stack(p, org) do {} while (0)
12724 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12725 +
12726 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12727 +extern struct task_struct *alloc_task_struct_node(int node);
12728 +extern void free_task_struct(struct task_struct *);
12729 +
12730 #endif
12731 #endif /* _ASM_X86_THREAD_INFO_H */
12732 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12733 index 8be5f54..7ae826d 100644
12734 --- a/arch/x86/include/asm/uaccess.h
12735 +++ b/arch/x86/include/asm/uaccess.h
12736 @@ -7,12 +7,15 @@
12737 #include <linux/compiler.h>
12738 #include <linux/thread_info.h>
12739 #include <linux/string.h>
12740 +#include <linux/sched.h>
12741 #include <asm/asm.h>
12742 #include <asm/page.h>
12743
12744 #define VERIFY_READ 0
12745 #define VERIFY_WRITE 1
12746
12747 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12748 +
12749 /*
12750 * The fs value determines whether argument validity checking should be
12751 * performed or not. If get_fs() == USER_DS, checking is performed, with
12752 @@ -28,7 +31,12 @@
12753
12754 #define get_ds() (KERNEL_DS)
12755 #define get_fs() (current_thread_info()->addr_limit)
12756 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12757 +void __set_fs(mm_segment_t x);
12758 +void set_fs(mm_segment_t x);
12759 +#else
12760 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12761 +#endif
12762
12763 #define segment_eq(a, b) ((a).seg == (b).seg)
12764
12765 @@ -76,7 +84,33 @@
12766 * checks that the pointer is in the user space range - after calling
12767 * this function, memory access functions may still return -EFAULT.
12768 */
12769 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12770 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12771 +#define access_ok(type, addr, size) \
12772 +({ \
12773 + long __size = size; \
12774 + unsigned long __addr = (unsigned long)addr; \
12775 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12776 + unsigned long __end_ao = __addr + __size - 1; \
12777 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12778 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12779 + while(__addr_ao <= __end_ao) { \
12780 + char __c_ao; \
12781 + __addr_ao += PAGE_SIZE; \
12782 + if (__size > PAGE_SIZE) \
12783 + cond_resched(); \
12784 + if (__get_user(__c_ao, (char __user *)__addr)) \
12785 + break; \
12786 + if (type != VERIFY_WRITE) { \
12787 + __addr = __addr_ao; \
12788 + continue; \
12789 + } \
12790 + if (__put_user(__c_ao, (char __user *)__addr)) \
12791 + break; \
12792 + __addr = __addr_ao; \
12793 + } \
12794 + } \
12795 + __ret_ao; \
12796 +})
12797
12798 /*
12799 * The exception table consists of pairs of addresses: the first is the
12800 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12801 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12802 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12803
12804 -
12805 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12806 +#define __copyuser_seg "gs;"
12807 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12808 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12809 +#else
12810 +#define __copyuser_seg
12811 +#define __COPYUSER_SET_ES
12812 +#define __COPYUSER_RESTORE_ES
12813 +#endif
12814
12815 #ifdef CONFIG_X86_32
12816 #define __put_user_asm_u64(x, addr, err, errret) \
12817 - asm volatile("1: movl %%eax,0(%2)\n" \
12818 - "2: movl %%edx,4(%2)\n" \
12819 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12820 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12821 "3:\n" \
12822 ".section .fixup,\"ax\"\n" \
12823 "4: movl %3,%0\n" \
12824 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12825 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12826
12827 #define __put_user_asm_ex_u64(x, addr) \
12828 - asm volatile("1: movl %%eax,0(%1)\n" \
12829 - "2: movl %%edx,4(%1)\n" \
12830 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12831 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12832 "3:\n" \
12833 _ASM_EXTABLE(1b, 2b - 1b) \
12834 _ASM_EXTABLE(2b, 3b - 2b) \
12835 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12836 __typeof__(*(ptr)) __pu_val; \
12837 __chk_user_ptr(ptr); \
12838 might_fault(); \
12839 - __pu_val = x; \
12840 + __pu_val = (x); \
12841 switch (sizeof(*(ptr))) { \
12842 case 1: \
12843 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12844 @@ -373,7 +415,7 @@ do { \
12845 } while (0)
12846
12847 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12848 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12849 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12850 "2:\n" \
12851 ".section .fixup,\"ax\"\n" \
12852 "3: mov %3,%0\n" \
12853 @@ -381,7 +423,7 @@ do { \
12854 " jmp 2b\n" \
12855 ".previous\n" \
12856 _ASM_EXTABLE(1b, 3b) \
12857 - : "=r" (err), ltype(x) \
12858 + : "=r" (err), ltype (x) \
12859 : "m" (__m(addr)), "i" (errret), "0" (err))
12860
12861 #define __get_user_size_ex(x, ptr, size) \
12862 @@ -406,7 +448,7 @@ do { \
12863 } while (0)
12864
12865 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12866 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12867 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12868 "2:\n" \
12869 _ASM_EXTABLE(1b, 2b - 1b) \
12870 : ltype(x) : "m" (__m(addr)))
12871 @@ -423,13 +465,24 @@ do { \
12872 int __gu_err; \
12873 unsigned long __gu_val; \
12874 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12875 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12876 + (x) = (__typeof__(*(ptr)))__gu_val; \
12877 __gu_err; \
12878 })
12879
12880 /* FIXME: this hack is definitely wrong -AK */
12881 struct __large_struct { unsigned long buf[100]; };
12882 -#define __m(x) (*(struct __large_struct __user *)(x))
12883 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12884 +#define ____m(x) \
12885 +({ \
12886 + unsigned long ____x = (unsigned long)(x); \
12887 + if (____x < PAX_USER_SHADOW_BASE) \
12888 + ____x += PAX_USER_SHADOW_BASE; \
12889 + (void __user *)____x; \
12890 +})
12891 +#else
12892 +#define ____m(x) (x)
12893 +#endif
12894 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12895
12896 /*
12897 * Tell gcc we read from memory instead of writing: this is because
12898 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12899 * aliasing issues.
12900 */
12901 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12902 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12903 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12904 "2:\n" \
12905 ".section .fixup,\"ax\"\n" \
12906 "3: mov %3,%0\n" \
12907 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12908 ".previous\n" \
12909 _ASM_EXTABLE(1b, 3b) \
12910 : "=r"(err) \
12911 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12912 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12913
12914 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12915 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12916 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12917 "2:\n" \
12918 _ASM_EXTABLE(1b, 2b - 1b) \
12919 : : ltype(x), "m" (__m(addr)))
12920 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12921 * On error, the variable @x is set to zero.
12922 */
12923
12924 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12925 +#define __get_user(x, ptr) get_user((x), (ptr))
12926 +#else
12927 #define __get_user(x, ptr) \
12928 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12929 +#endif
12930
12931 /**
12932 * __put_user: - Write a simple value into user space, with less checking.
12933 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12934 * Returns zero on success, or -EFAULT on error.
12935 */
12936
12937 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12938 +#define __put_user(x, ptr) put_user((x), (ptr))
12939 +#else
12940 #define __put_user(x, ptr) \
12941 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12942 +#endif
12943
12944 #define __get_user_unaligned __get_user
12945 #define __put_user_unaligned __put_user
12946 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12947 #define get_user_ex(x, ptr) do { \
12948 unsigned long __gue_val; \
12949 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12950 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12951 + (x) = (__typeof__(*(ptr)))__gue_val; \
12952 } while (0)
12953
12954 #ifdef CONFIG_X86_WP_WORKS_OK
12955 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12956 index 566e803..1230707 100644
12957 --- a/arch/x86/include/asm/uaccess_32.h
12958 +++ b/arch/x86/include/asm/uaccess_32.h
12959 @@ -11,15 +11,15 @@
12960 #include <asm/page.h>
12961
12962 unsigned long __must_check __copy_to_user_ll
12963 - (void __user *to, const void *from, unsigned long n);
12964 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12965 unsigned long __must_check __copy_from_user_ll
12966 - (void *to, const void __user *from, unsigned long n);
12967 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12968 unsigned long __must_check __copy_from_user_ll_nozero
12969 - (void *to, const void __user *from, unsigned long n);
12970 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12971 unsigned long __must_check __copy_from_user_ll_nocache
12972 - (void *to, const void __user *from, unsigned long n);
12973 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12974 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12975 - (void *to, const void __user *from, unsigned long n);
12976 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12977
12978 /**
12979 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12980 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12981 static __always_inline unsigned long __must_check
12982 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12983 {
12984 + if ((long)n < 0)
12985 + return n;
12986 +
12987 if (__builtin_constant_p(n)) {
12988 unsigned long ret;
12989
12990 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12991 return ret;
12992 }
12993 }
12994 + if (!__builtin_constant_p(n))
12995 + check_object_size(from, n, true);
12996 return __copy_to_user_ll(to, from, n);
12997 }
12998
12999 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
13000 __copy_to_user(void __user *to, const void *from, unsigned long n)
13001 {
13002 might_fault();
13003 +
13004 return __copy_to_user_inatomic(to, from, n);
13005 }
13006
13007 static __always_inline unsigned long
13008 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13009 {
13010 + if ((long)n < 0)
13011 + return n;
13012 +
13013 /* Avoid zeroing the tail if the copy fails..
13014 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
13015 * but as the zeroing behaviour is only significant when n is not
13016 @@ -137,6 +146,10 @@ static __always_inline unsigned long
13017 __copy_from_user(void *to, const void __user *from, unsigned long n)
13018 {
13019 might_fault();
13020 +
13021 + if ((long)n < 0)
13022 + return n;
13023 +
13024 if (__builtin_constant_p(n)) {
13025 unsigned long ret;
13026
13027 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
13028 return ret;
13029 }
13030 }
13031 + if (!__builtin_constant_p(n))
13032 + check_object_size(to, n, false);
13033 return __copy_from_user_ll(to, from, n);
13034 }
13035
13036 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13037 const void __user *from, unsigned long n)
13038 {
13039 might_fault();
13040 +
13041 + if ((long)n < 0)
13042 + return n;
13043 +
13044 if (__builtin_constant_p(n)) {
13045 unsigned long ret;
13046
13047 @@ -181,15 +200,19 @@ static __always_inline unsigned long
13048 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
13049 unsigned long n)
13050 {
13051 - return __copy_from_user_ll_nocache_nozero(to, from, n);
13052 + if ((long)n < 0)
13053 + return n;
13054 +
13055 + return __copy_from_user_ll_nocache_nozero(to, from, n);
13056 }
13057
13058 -unsigned long __must_check copy_to_user(void __user *to,
13059 - const void *from, unsigned long n);
13060 -unsigned long __must_check _copy_from_user(void *to,
13061 - const void __user *from,
13062 - unsigned long n);
13063 -
13064 +extern void copy_to_user_overflow(void)
13065 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13066 + __compiletime_error("copy_to_user() buffer size is not provably correct")
13067 +#else
13068 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
13069 +#endif
13070 +;
13071
13072 extern void copy_from_user_overflow(void)
13073 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13074 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
13075 #endif
13076 ;
13077
13078 -static inline unsigned long __must_check copy_from_user(void *to,
13079 - const void __user *from,
13080 - unsigned long n)
13081 +/**
13082 + * copy_to_user: - Copy a block of data into user space.
13083 + * @to: Destination address, in user space.
13084 + * @from: Source address, in kernel space.
13085 + * @n: Number of bytes to copy.
13086 + *
13087 + * Context: User context only. This function may sleep.
13088 + *
13089 + * Copy data from kernel space to user space.
13090 + *
13091 + * Returns number of bytes that could not be copied.
13092 + * On success, this will be zero.
13093 + */
13094 +static inline unsigned long __must_check
13095 +copy_to_user(void __user *to, const void *from, unsigned long n)
13096 +{
13097 + int sz = __compiletime_object_size(from);
13098 +
13099 + if (unlikely(sz != -1 && sz < n))
13100 + copy_to_user_overflow();
13101 + else if (access_ok(VERIFY_WRITE, to, n))
13102 + n = __copy_to_user(to, from, n);
13103 + return n;
13104 +}
13105 +
13106 +/**
13107 + * copy_from_user: - Copy a block of data from user space.
13108 + * @to: Destination address, in kernel space.
13109 + * @from: Source address, in user space.
13110 + * @n: Number of bytes to copy.
13111 + *
13112 + * Context: User context only. This function may sleep.
13113 + *
13114 + * Copy data from user space to kernel space.
13115 + *
13116 + * Returns number of bytes that could not be copied.
13117 + * On success, this will be zero.
13118 + *
13119 + * If some data could not be copied, this function will pad the copied
13120 + * data to the requested size using zero bytes.
13121 + */
13122 +static inline unsigned long __must_check
13123 +copy_from_user(void *to, const void __user *from, unsigned long n)
13124 {
13125 int sz = __compiletime_object_size(to);
13126
13127 - if (likely(sz == -1 || sz >= n))
13128 - n = _copy_from_user(to, from, n);
13129 - else
13130 + if (unlikely(sz != -1 && sz < n))
13131 copy_from_user_overflow();
13132 -
13133 + else if (access_ok(VERIFY_READ, from, n))
13134 + n = __copy_from_user(to, from, n);
13135 + else if ((long)n > 0) {
13136 + if (!__builtin_constant_p(n))
13137 + check_object_size(to, n, false);
13138 + memset(to, 0, n);
13139 + }
13140 return n;
13141 }
13142
13143 @@ -235,7 +302,7 @@ long __must_check __strncpy_from_user(char *dst,
13144 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13145
13146 long strnlen_user(const char __user *str, long n);
13147 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13148 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13149 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13150 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13151
13152 #endif /* _ASM_X86_UACCESS_32_H */
13153 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13154 index 1c66d30..bf1a2cc 100644
13155 --- a/arch/x86/include/asm/uaccess_64.h
13156 +++ b/arch/x86/include/asm/uaccess_64.h
13157 @@ -10,6 +10,9 @@
13158 #include <asm/alternative.h>
13159 #include <asm/cpufeature.h>
13160 #include <asm/page.h>
13161 +#include <asm/pgtable.h>
13162 +
13163 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
13164
13165 /*
13166 * Copy To/From Userspace
13167 @@ -17,12 +20,14 @@
13168
13169 /* Handles exceptions in both to and from, but doesn't do access_ok */
13170 __must_check unsigned long
13171 -copy_user_generic_string(void *to, const void *from, unsigned len);
13172 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13173 __must_check unsigned long
13174 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13175 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13176
13177 static __always_inline __must_check unsigned long
13178 -copy_user_generic(void *to, const void *from, unsigned len)
13179 +copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13180 +static __always_inline __must_check unsigned long
13181 +copy_user_generic(void *to, const void *from, unsigned long len)
13182 {
13183 unsigned ret;
13184
13185 @@ -32,142 +37,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
13186 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13187 "=d" (len)),
13188 "1" (to), "2" (from), "3" (len)
13189 - : "memory", "rcx", "r8", "r9", "r10", "r11");
13190 + : "memory", "rcx", "r8", "r9", "r11");
13191 return ret;
13192 }
13193
13194 +static __always_inline __must_check unsigned long
13195 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13196 +static __always_inline __must_check unsigned long
13197 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13198 __must_check unsigned long
13199 -_copy_to_user(void __user *to, const void *from, unsigned len);
13200 -__must_check unsigned long
13201 -_copy_from_user(void *to, const void __user *from, unsigned len);
13202 -__must_check unsigned long
13203 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13204 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13205
13206 static inline unsigned long __must_check copy_from_user(void *to,
13207 const void __user *from,
13208 unsigned long n)
13209 {
13210 - int sz = __compiletime_object_size(to);
13211 -
13212 might_fault();
13213 - if (likely(sz == -1 || sz >= n))
13214 - n = _copy_from_user(to, from, n);
13215 -#ifdef CONFIG_DEBUG_VM
13216 - else
13217 - WARN(1, "Buffer overflow detected!\n");
13218 -#endif
13219 +
13220 + if (access_ok(VERIFY_READ, from, n))
13221 + n = __copy_from_user(to, from, n);
13222 + else if (n < INT_MAX) {
13223 + if (!__builtin_constant_p(n))
13224 + check_object_size(to, n, false);
13225 + memset(to, 0, n);
13226 + }
13227 return n;
13228 }
13229
13230 static __always_inline __must_check
13231 -int copy_to_user(void __user *dst, const void *src, unsigned size)
13232 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
13233 {
13234 might_fault();
13235
13236 - return _copy_to_user(dst, src, size);
13237 + if (access_ok(VERIFY_WRITE, dst, size))
13238 + size = __copy_to_user(dst, src, size);
13239 + return size;
13240 }
13241
13242 static __always_inline __must_check
13243 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13244 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13245 {
13246 - int ret = 0;
13247 + int sz = __compiletime_object_size(dst);
13248 + unsigned ret = 0;
13249
13250 might_fault();
13251 - if (!__builtin_constant_p(size))
13252 - return copy_user_generic(dst, (__force void *)src, size);
13253 +
13254 + if (size > INT_MAX)
13255 + return size;
13256 +
13257 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13258 + if (!__access_ok(VERIFY_READ, src, size))
13259 + return size;
13260 +#endif
13261 +
13262 + if (unlikely(sz != -1 && sz < size)) {
13263 +#ifdef CONFIG_DEBUG_VM
13264 + WARN(1, "Buffer overflow detected!\n");
13265 +#endif
13266 + return size;
13267 + }
13268 +
13269 + if (!__builtin_constant_p(size)) {
13270 + check_object_size(dst, size, false);
13271 +
13272 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13273 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13274 + src += PAX_USER_SHADOW_BASE;
13275 +#endif
13276 +
13277 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13278 + }
13279 switch (size) {
13280 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13281 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13282 ret, "b", "b", "=q", 1);
13283 return ret;
13284 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13285 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13286 ret, "w", "w", "=r", 2);
13287 return ret;
13288 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13289 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13290 ret, "l", "k", "=r", 4);
13291 return ret;
13292 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13293 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13294 ret, "q", "", "=r", 8);
13295 return ret;
13296 case 10:
13297 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13298 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13299 ret, "q", "", "=r", 10);
13300 if (unlikely(ret))
13301 return ret;
13302 __get_user_asm(*(u16 *)(8 + (char *)dst),
13303 - (u16 __user *)(8 + (char __user *)src),
13304 + (const u16 __user *)(8 + (const char __user *)src),
13305 ret, "w", "w", "=r", 2);
13306 return ret;
13307 case 16:
13308 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13309 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13310 ret, "q", "", "=r", 16);
13311 if (unlikely(ret))
13312 return ret;
13313 __get_user_asm(*(u64 *)(8 + (char *)dst),
13314 - (u64 __user *)(8 + (char __user *)src),
13315 + (const u64 __user *)(8 + (const char __user *)src),
13316 ret, "q", "", "=r", 8);
13317 return ret;
13318 default:
13319 - return copy_user_generic(dst, (__force void *)src, size);
13320 +
13321 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13322 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13323 + src += PAX_USER_SHADOW_BASE;
13324 +#endif
13325 +
13326 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13327 }
13328 }
13329
13330 static __always_inline __must_check
13331 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13332 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13333 {
13334 - int ret = 0;
13335 + int sz = __compiletime_object_size(src);
13336 + unsigned ret = 0;
13337
13338 might_fault();
13339 - if (!__builtin_constant_p(size))
13340 - return copy_user_generic((__force void *)dst, src, size);
13341 +
13342 + if (size > INT_MAX)
13343 + return size;
13344 +
13345 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13346 + if (!__access_ok(VERIFY_WRITE, dst, size))
13347 + return size;
13348 +#endif
13349 +
13350 + if (unlikely(sz != -1 && sz < size)) {
13351 +#ifdef CONFIG_DEBUG_VM
13352 + WARN(1, "Buffer overflow detected!\n");
13353 +#endif
13354 + return size;
13355 + }
13356 +
13357 + if (!__builtin_constant_p(size)) {
13358 + check_object_size(src, size, true);
13359 +
13360 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13361 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13362 + dst += PAX_USER_SHADOW_BASE;
13363 +#endif
13364 +
13365 + return copy_user_generic((__force_kernel void *)dst, src, size);
13366 + }
13367 switch (size) {
13368 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13369 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13370 ret, "b", "b", "iq", 1);
13371 return ret;
13372 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13373 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13374 ret, "w", "w", "ir", 2);
13375 return ret;
13376 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13377 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13378 ret, "l", "k", "ir", 4);
13379 return ret;
13380 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13381 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13382 ret, "q", "", "er", 8);
13383 return ret;
13384 case 10:
13385 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13386 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13387 ret, "q", "", "er", 10);
13388 if (unlikely(ret))
13389 return ret;
13390 asm("":::"memory");
13391 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13392 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13393 ret, "w", "w", "ir", 2);
13394 return ret;
13395 case 16:
13396 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13397 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13398 ret, "q", "", "er", 16);
13399 if (unlikely(ret))
13400 return ret;
13401 asm("":::"memory");
13402 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13403 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13404 ret, "q", "", "er", 8);
13405 return ret;
13406 default:
13407 - return copy_user_generic((__force void *)dst, src, size);
13408 +
13409 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13410 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13411 + dst += PAX_USER_SHADOW_BASE;
13412 +#endif
13413 +
13414 + return copy_user_generic((__force_kernel void *)dst, src, size);
13415 }
13416 }
13417
13418 static __always_inline __must_check
13419 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13420 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13421 {
13422 - int ret = 0;
13423 + unsigned ret = 0;
13424
13425 might_fault();
13426 - if (!__builtin_constant_p(size))
13427 - return copy_user_generic((__force void *)dst,
13428 - (__force void *)src, size);
13429 +
13430 + if (size > INT_MAX)
13431 + return size;
13432 +
13433 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13434 + if (!__access_ok(VERIFY_READ, src, size))
13435 + return size;
13436 + if (!__access_ok(VERIFY_WRITE, dst, size))
13437 + return size;
13438 +#endif
13439 +
13440 + if (!__builtin_constant_p(size)) {
13441 +
13442 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13443 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13444 + src += PAX_USER_SHADOW_BASE;
13445 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13446 + dst += PAX_USER_SHADOW_BASE;
13447 +#endif
13448 +
13449 + return copy_user_generic((__force_kernel void *)dst,
13450 + (__force_kernel const void *)src, size);
13451 + }
13452 switch (size) {
13453 case 1: {
13454 u8 tmp;
13455 - __get_user_asm(tmp, (u8 __user *)src,
13456 + __get_user_asm(tmp, (const u8 __user *)src,
13457 ret, "b", "b", "=q", 1);
13458 if (likely(!ret))
13459 __put_user_asm(tmp, (u8 __user *)dst,
13460 @@ -176,7 +265,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13461 }
13462 case 2: {
13463 u16 tmp;
13464 - __get_user_asm(tmp, (u16 __user *)src,
13465 + __get_user_asm(tmp, (const u16 __user *)src,
13466 ret, "w", "w", "=r", 2);
13467 if (likely(!ret))
13468 __put_user_asm(tmp, (u16 __user *)dst,
13469 @@ -186,7 +275,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13470
13471 case 4: {
13472 u32 tmp;
13473 - __get_user_asm(tmp, (u32 __user *)src,
13474 + __get_user_asm(tmp, (const u32 __user *)src,
13475 ret, "l", "k", "=r", 4);
13476 if (likely(!ret))
13477 __put_user_asm(tmp, (u32 __user *)dst,
13478 @@ -195,7 +284,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13479 }
13480 case 8: {
13481 u64 tmp;
13482 - __get_user_asm(tmp, (u64 __user *)src,
13483 + __get_user_asm(tmp, (const u64 __user *)src,
13484 ret, "q", "", "=r", 8);
13485 if (likely(!ret))
13486 __put_user_asm(tmp, (u64 __user *)dst,
13487 @@ -203,8 +292,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13488 return ret;
13489 }
13490 default:
13491 - return copy_user_generic((__force void *)dst,
13492 - (__force void *)src, size);
13493 +
13494 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13495 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13496 + src += PAX_USER_SHADOW_BASE;
13497 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13498 + dst += PAX_USER_SHADOW_BASE;
13499 +#endif
13500 +
13501 + return copy_user_generic((__force_kernel void *)dst,
13502 + (__force_kernel const void *)src, size);
13503 }
13504 }
13505
13506 @@ -215,39 +312,76 @@ __strncpy_from_user(char *dst, const char __user *src, long count);
13507 __must_check long strnlen_user(const char __user *str, long n);
13508 __must_check long __strnlen_user(const char __user *str, long n);
13509 __must_check long strlen_user(const char __user *str);
13510 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13511 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13512 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13513 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13514
13515 static __must_check __always_inline int
13516 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13517 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13518 {
13519 - return copy_user_generic(dst, (__force const void *)src, size);
13520 + if (size > INT_MAX)
13521 + return size;
13522 +
13523 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13524 + if (!__access_ok(VERIFY_READ, src, size))
13525 + return size;
13526 +
13527 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13528 + src += PAX_USER_SHADOW_BASE;
13529 +#endif
13530 +
13531 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13532 }
13533
13534 -static __must_check __always_inline int
13535 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13536 +static __must_check __always_inline unsigned long
13537 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13538 {
13539 - return copy_user_generic((__force void *)dst, src, size);
13540 + if (size > INT_MAX)
13541 + return size;
13542 +
13543 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13544 + if (!__access_ok(VERIFY_WRITE, dst, size))
13545 + return size;
13546 +
13547 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13548 + dst += PAX_USER_SHADOW_BASE;
13549 +#endif
13550 +
13551 + return copy_user_generic((__force_kernel void *)dst, src, size);
13552 }
13553
13554 -extern long __copy_user_nocache(void *dst, const void __user *src,
13555 - unsigned size, int zerorest);
13556 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13557 + unsigned long size, int zerorest) __size_overflow(3);
13558
13559 -static inline int
13560 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13561 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13562 {
13563 might_sleep();
13564 +
13565 + if (size > INT_MAX)
13566 + return size;
13567 +
13568 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13569 + if (!__access_ok(VERIFY_READ, src, size))
13570 + return size;
13571 +#endif
13572 +
13573 return __copy_user_nocache(dst, src, size, 1);
13574 }
13575
13576 -static inline int
13577 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13578 - unsigned size)
13579 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13580 + unsigned long size)
13581 {
13582 + if (size > INT_MAX)
13583 + return size;
13584 +
13585 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13586 + if (!__access_ok(VERIFY_READ, src, size))
13587 + return size;
13588 +#endif
13589 +
13590 return __copy_user_nocache(dst, src, size, 0);
13591 }
13592
13593 -unsigned long
13594 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13595 +extern unsigned long
13596 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13597
13598 #endif /* _ASM_X86_UACCESS_64_H */
13599 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13600 index bb05228..d763d5b 100644
13601 --- a/arch/x86/include/asm/vdso.h
13602 +++ b/arch/x86/include/asm/vdso.h
13603 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13604 #define VDSO32_SYMBOL(base, name) \
13605 ({ \
13606 extern const char VDSO32_##name[]; \
13607 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13608 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13609 })
13610 #endif
13611
13612 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13613 index a609c39..7a68dc7 100644
13614 --- a/arch/x86/include/asm/x86_init.h
13615 +++ b/arch/x86/include/asm/x86_init.h
13616 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13617 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13618 void (*find_smp_config)(void);
13619 void (*get_smp_config)(unsigned int early);
13620 -};
13621 +} __no_const;
13622
13623 /**
13624 * struct x86_init_resources - platform specific resource related ops
13625 @@ -43,7 +43,7 @@ struct x86_init_resources {
13626 void (*probe_roms)(void);
13627 void (*reserve_resources)(void);
13628 char *(*memory_setup)(void);
13629 -};
13630 +} __no_const;
13631
13632 /**
13633 * struct x86_init_irqs - platform specific interrupt setup
13634 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13635 void (*pre_vector_init)(void);
13636 void (*intr_init)(void);
13637 void (*trap_init)(void);
13638 -};
13639 +} __no_const;
13640
13641 /**
13642 * struct x86_init_oem - oem platform specific customizing functions
13643 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13644 struct x86_init_oem {
13645 void (*arch_setup)(void);
13646 void (*banner)(void);
13647 -};
13648 +} __no_const;
13649
13650 /**
13651 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13652 @@ -77,7 +77,7 @@ struct x86_init_oem {
13653 */
13654 struct x86_init_mapping {
13655 void (*pagetable_reserve)(u64 start, u64 end);
13656 -};
13657 +} __no_const;
13658
13659 /**
13660 * struct x86_init_paging - platform specific paging functions
13661 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13662 struct x86_init_paging {
13663 void (*pagetable_setup_start)(pgd_t *base);
13664 void (*pagetable_setup_done)(pgd_t *base);
13665 -};
13666 +} __no_const;
13667
13668 /**
13669 * struct x86_init_timers - platform specific timer setup
13670 @@ -102,7 +102,7 @@ struct x86_init_timers {
13671 void (*tsc_pre_init)(void);
13672 void (*timer_init)(void);
13673 void (*wallclock_init)(void);
13674 -};
13675 +} __no_const;
13676
13677 /**
13678 * struct x86_init_iommu - platform specific iommu setup
13679 @@ -110,7 +110,7 @@ struct x86_init_timers {
13680 */
13681 struct x86_init_iommu {
13682 int (*iommu_init)(void);
13683 -};
13684 +} __no_const;
13685
13686 /**
13687 * struct x86_init_pci - platform specific pci init functions
13688 @@ -124,7 +124,7 @@ struct x86_init_pci {
13689 int (*init)(void);
13690 void (*init_irq)(void);
13691 void (*fixup_irqs)(void);
13692 -};
13693 +} __no_const;
13694
13695 /**
13696 * struct x86_init_ops - functions for platform specific setup
13697 @@ -140,7 +140,7 @@ struct x86_init_ops {
13698 struct x86_init_timers timers;
13699 struct x86_init_iommu iommu;
13700 struct x86_init_pci pci;
13701 -};
13702 +} __no_const;
13703
13704 /**
13705 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13706 @@ -149,7 +149,7 @@ struct x86_init_ops {
13707 struct x86_cpuinit_ops {
13708 void (*setup_percpu_clockev)(void);
13709 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13710 -};
13711 +} __no_const;
13712
13713 /**
13714 * struct x86_platform_ops - platform specific runtime functions
13715 @@ -171,7 +171,7 @@ struct x86_platform_ops {
13716 void (*nmi_init)(void);
13717 unsigned char (*get_nmi_reason)(void);
13718 int (*i8042_detect)(void);
13719 -};
13720 +} __no_const;
13721
13722 struct pci_dev;
13723
13724 @@ -180,7 +180,7 @@ struct x86_msi_ops {
13725 void (*teardown_msi_irq)(unsigned int irq);
13726 void (*teardown_msi_irqs)(struct pci_dev *dev);
13727 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13728 -};
13729 +} __no_const;
13730
13731 extern struct x86_init_ops x86_init;
13732 extern struct x86_cpuinit_ops x86_cpuinit;
13733 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13734 index c6ce245..ffbdab7 100644
13735 --- a/arch/x86/include/asm/xsave.h
13736 +++ b/arch/x86/include/asm/xsave.h
13737 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13738 {
13739 int err;
13740
13741 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13742 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13743 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13744 +#endif
13745 +
13746 /*
13747 * Clear the xsave header first, so that reserved fields are
13748 * initialized to zero.
13749 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13750 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13751 {
13752 int err;
13753 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13754 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13755 u32 lmask = mask;
13756 u32 hmask = mask >> 32;
13757
13758 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13759 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13760 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13761 +#endif
13762 +
13763 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13764 "2:\n"
13765 ".section .fixup,\"ax\"\n"
13766 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13767 index 6a564ac..9b1340c 100644
13768 --- a/arch/x86/kernel/acpi/realmode/Makefile
13769 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13770 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13771 $(call cc-option, -fno-stack-protector) \
13772 $(call cc-option, -mpreferred-stack-boundary=2)
13773 KBUILD_CFLAGS += $(call cc-option, -m32)
13774 +ifdef CONSTIFY_PLUGIN
13775 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13776 +endif
13777 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13778 GCOV_PROFILE := n
13779
13780 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13781 index b4fd836..4358fe3 100644
13782 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13783 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13784 @@ -108,6 +108,9 @@ wakeup_code:
13785 /* Do any other stuff... */
13786
13787 #ifndef CONFIG_64BIT
13788 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13789 + call verify_cpu
13790 +
13791 /* This could also be done in C code... */
13792 movl pmode_cr3, %eax
13793 movl %eax, %cr3
13794 @@ -131,6 +134,7 @@ wakeup_code:
13795 movl pmode_cr0, %eax
13796 movl %eax, %cr0
13797 jmp pmode_return
13798 +# include "../../verify_cpu.S"
13799 #else
13800 pushw $0
13801 pushw trampoline_segment
13802 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13803 index 103b6ab..2004d0a 100644
13804 --- a/arch/x86/kernel/acpi/sleep.c
13805 +++ b/arch/x86/kernel/acpi/sleep.c
13806 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13807 header->trampoline_segment = trampoline_address() >> 4;
13808 #ifdef CONFIG_SMP
13809 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13810 +
13811 + pax_open_kernel();
13812 early_gdt_descr.address =
13813 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13814 + pax_close_kernel();
13815 +
13816 initial_gs = per_cpu_offset(smp_processor_id());
13817 #endif
13818 initial_code = (unsigned long)wakeup_long64;
13819 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13820 index 13ab720..95d5442 100644
13821 --- a/arch/x86/kernel/acpi/wakeup_32.S
13822 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13823 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13824 # and restore the stack ... but you need gdt for this to work
13825 movl saved_context_esp, %esp
13826
13827 - movl %cs:saved_magic, %eax
13828 - cmpl $0x12345678, %eax
13829 + cmpl $0x12345678, saved_magic
13830 jne bogus_magic
13831
13832 # jump to place where we left off
13833 - movl saved_eip, %eax
13834 - jmp *%eax
13835 + jmp *(saved_eip)
13836
13837 bogus_magic:
13838 jmp bogus_magic
13839 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13840 index 1f84794..e23f862 100644
13841 --- a/arch/x86/kernel/alternative.c
13842 +++ b/arch/x86/kernel/alternative.c
13843 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13844 */
13845 for (a = start; a < end; a++) {
13846 instr = (u8 *)&a->instr_offset + a->instr_offset;
13847 +
13848 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13849 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13850 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13851 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13852 +#endif
13853 +
13854 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13855 BUG_ON(a->replacementlen > a->instrlen);
13856 BUG_ON(a->instrlen > sizeof(insnbuf));
13857 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13858 for (poff = start; poff < end; poff++) {
13859 u8 *ptr = (u8 *)poff + *poff;
13860
13861 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13862 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13863 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13864 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13865 +#endif
13866 +
13867 if (!*poff || ptr < text || ptr >= text_end)
13868 continue;
13869 /* turn DS segment override prefix into lock prefix */
13870 - if (*ptr == 0x3e)
13871 + if (*ktla_ktva(ptr) == 0x3e)
13872 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13873 };
13874 mutex_unlock(&text_mutex);
13875 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13876 for (poff = start; poff < end; poff++) {
13877 u8 *ptr = (u8 *)poff + *poff;
13878
13879 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13880 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13881 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13882 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13883 +#endif
13884 +
13885 if (!*poff || ptr < text || ptr >= text_end)
13886 continue;
13887 /* turn lock prefix into DS segment override prefix */
13888 - if (*ptr == 0xf0)
13889 + if (*ktla_ktva(ptr) == 0xf0)
13890 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13891 };
13892 mutex_unlock(&text_mutex);
13893 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13894
13895 BUG_ON(p->len > MAX_PATCH_LEN);
13896 /* prep the buffer with the original instructions */
13897 - memcpy(insnbuf, p->instr, p->len);
13898 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13899 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13900 (unsigned long)p->instr, p->len);
13901
13902 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13903 if (smp_alt_once)
13904 free_init_pages("SMP alternatives",
13905 (unsigned long)__smp_locks,
13906 - (unsigned long)__smp_locks_end);
13907 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13908
13909 restart_nmi();
13910 }
13911 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13912 * instructions. And on the local CPU you need to be protected again NMI or MCE
13913 * handlers seeing an inconsistent instruction while you patch.
13914 */
13915 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13916 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13917 size_t len)
13918 {
13919 unsigned long flags;
13920 local_irq_save(flags);
13921 - memcpy(addr, opcode, len);
13922 +
13923 + pax_open_kernel();
13924 + memcpy(ktla_ktva(addr), opcode, len);
13925 sync_core();
13926 + pax_close_kernel();
13927 +
13928 local_irq_restore(flags);
13929 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13930 that causes hangs on some VIA CPUs. */
13931 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13932 */
13933 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13934 {
13935 - unsigned long flags;
13936 - char *vaddr;
13937 + unsigned char *vaddr = ktla_ktva(addr);
13938 struct page *pages[2];
13939 - int i;
13940 + size_t i;
13941
13942 if (!core_kernel_text((unsigned long)addr)) {
13943 - pages[0] = vmalloc_to_page(addr);
13944 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13945 + pages[0] = vmalloc_to_page(vaddr);
13946 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13947 } else {
13948 - pages[0] = virt_to_page(addr);
13949 + pages[0] = virt_to_page(vaddr);
13950 WARN_ON(!PageReserved(pages[0]));
13951 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13952 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13953 }
13954 BUG_ON(!pages[0]);
13955 - local_irq_save(flags);
13956 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13957 - if (pages[1])
13958 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13959 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13960 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13961 - clear_fixmap(FIX_TEXT_POKE0);
13962 - if (pages[1])
13963 - clear_fixmap(FIX_TEXT_POKE1);
13964 - local_flush_tlb();
13965 - sync_core();
13966 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13967 - that causes hangs on some VIA CPUs. */
13968 + text_poke_early(addr, opcode, len);
13969 for (i = 0; i < len; i++)
13970 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13971 - local_irq_restore(flags);
13972 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13973 return addr;
13974 }
13975
13976 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13977 index 5b3f88e..61232b4 100644
13978 --- a/arch/x86/kernel/apic/apic.c
13979 +++ b/arch/x86/kernel/apic/apic.c
13980 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13981 /*
13982 * Debug level, exported for io_apic.c
13983 */
13984 -unsigned int apic_verbosity;
13985 +int apic_verbosity;
13986
13987 int pic_mode;
13988
13989 @@ -1912,7 +1912,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13990 apic_write(APIC_ESR, 0);
13991 v1 = apic_read(APIC_ESR);
13992 ack_APIC_irq();
13993 - atomic_inc(&irq_err_count);
13994 + atomic_inc_unchecked(&irq_err_count);
13995
13996 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13997 smp_processor_id(), v0 , v1);
13998 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13999 index fb07275..e06bb59 100644
14000 --- a/arch/x86/kernel/apic/io_apic.c
14001 +++ b/arch/x86/kernel/apic/io_apic.c
14002 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14003 }
14004 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14005
14006 -void lock_vector_lock(void)
14007 +void lock_vector_lock(void) __acquires(vector_lock)
14008 {
14009 /* Used to the online set of cpus does not change
14010 * during assign_irq_vector.
14011 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
14012 raw_spin_lock(&vector_lock);
14013 }
14014
14015 -void unlock_vector_lock(void)
14016 +void unlock_vector_lock(void) __releases(vector_lock)
14017 {
14018 raw_spin_unlock(&vector_lock);
14019 }
14020 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
14021 ack_APIC_irq();
14022 }
14023
14024 -atomic_t irq_mis_count;
14025 +atomic_unchecked_t irq_mis_count;
14026
14027 static void ack_apic_level(struct irq_data *data)
14028 {
14029 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
14030 * at the cpu.
14031 */
14032 if (!(v & (1 << (i & 0x1f)))) {
14033 - atomic_inc(&irq_mis_count);
14034 + atomic_inc_unchecked(&irq_mis_count);
14035
14036 eoi_ioapic_irq(irq, cfg);
14037 }
14038 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14039 index f76623c..aab694f 100644
14040 --- a/arch/x86/kernel/apm_32.c
14041 +++ b/arch/x86/kernel/apm_32.c
14042 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
14043 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14044 * even though they are called in protected mode.
14045 */
14046 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14047 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14048 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14049
14050 static const char driver_version[] = "1.16ac"; /* no spaces */
14051 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
14052 BUG_ON(cpu != 0);
14053 gdt = get_cpu_gdt_table(cpu);
14054 save_desc_40 = gdt[0x40 / 8];
14055 +
14056 + pax_open_kernel();
14057 gdt[0x40 / 8] = bad_bios_desc;
14058 + pax_close_kernel();
14059
14060 apm_irq_save(flags);
14061 APM_DO_SAVE_SEGS;
14062 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
14063 &call->esi);
14064 APM_DO_RESTORE_SEGS;
14065 apm_irq_restore(flags);
14066 +
14067 + pax_open_kernel();
14068 gdt[0x40 / 8] = save_desc_40;
14069 + pax_close_kernel();
14070 +
14071 put_cpu();
14072
14073 return call->eax & 0xff;
14074 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
14075 BUG_ON(cpu != 0);
14076 gdt = get_cpu_gdt_table(cpu);
14077 save_desc_40 = gdt[0x40 / 8];
14078 +
14079 + pax_open_kernel();
14080 gdt[0x40 / 8] = bad_bios_desc;
14081 + pax_close_kernel();
14082
14083 apm_irq_save(flags);
14084 APM_DO_SAVE_SEGS;
14085 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
14086 &call->eax);
14087 APM_DO_RESTORE_SEGS;
14088 apm_irq_restore(flags);
14089 +
14090 + pax_open_kernel();
14091 gdt[0x40 / 8] = save_desc_40;
14092 + pax_close_kernel();
14093 +
14094 put_cpu();
14095 return error;
14096 }
14097 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
14098 * code to that CPU.
14099 */
14100 gdt = get_cpu_gdt_table(0);
14101 +
14102 + pax_open_kernel();
14103 set_desc_base(&gdt[APM_CS >> 3],
14104 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14105 set_desc_base(&gdt[APM_CS_16 >> 3],
14106 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14107 set_desc_base(&gdt[APM_DS >> 3],
14108 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14109 + pax_close_kernel();
14110
14111 proc_create("apm", 0, NULL, &apm_file_ops);
14112
14113 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14114 index 68de2dc..1f3c720 100644
14115 --- a/arch/x86/kernel/asm-offsets.c
14116 +++ b/arch/x86/kernel/asm-offsets.c
14117 @@ -33,6 +33,8 @@ void common(void) {
14118 OFFSET(TI_status, thread_info, status);
14119 OFFSET(TI_addr_limit, thread_info, addr_limit);
14120 OFFSET(TI_preempt_count, thread_info, preempt_count);
14121 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14122 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14123
14124 BLANK();
14125 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14126 @@ -53,8 +55,26 @@ void common(void) {
14127 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14128 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14129 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14130 +
14131 +#ifdef CONFIG_PAX_KERNEXEC
14132 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14133 #endif
14134
14135 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14136 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14137 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14138 +#ifdef CONFIG_X86_64
14139 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14140 +#endif
14141 +#endif
14142 +
14143 +#endif
14144 +
14145 + BLANK();
14146 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14147 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14148 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14149 +
14150 #ifdef CONFIG_XEN
14151 BLANK();
14152 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14153 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14154 index 834e897..dacddc8 100644
14155 --- a/arch/x86/kernel/asm-offsets_64.c
14156 +++ b/arch/x86/kernel/asm-offsets_64.c
14157 @@ -70,6 +70,7 @@ int main(void)
14158 BLANK();
14159 #undef ENTRY
14160
14161 + DEFINE(TSS_size, sizeof(struct tss_struct));
14162 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14163 BLANK();
14164
14165 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14166 index 25f24dc..4094a7f 100644
14167 --- a/arch/x86/kernel/cpu/Makefile
14168 +++ b/arch/x86/kernel/cpu/Makefile
14169 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14170 CFLAGS_REMOVE_perf_event.o = -pg
14171 endif
14172
14173 -# Make sure load_percpu_segment has no stackprotector
14174 -nostackp := $(call cc-option, -fno-stack-protector)
14175 -CFLAGS_common.o := $(nostackp)
14176 -
14177 obj-y := intel_cacheinfo.o scattered.o topology.o
14178 obj-y += proc.o capflags.o powerflags.o common.o
14179 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14180 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14181 index 80ab83d..0a7b34e 100644
14182 --- a/arch/x86/kernel/cpu/amd.c
14183 +++ b/arch/x86/kernel/cpu/amd.c
14184 @@ -670,7 +670,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14185 unsigned int size)
14186 {
14187 /* AMD errata T13 (order #21922) */
14188 - if ((c->x86 == 6)) {
14189 + if (c->x86 == 6) {
14190 /* Duron Rev A0 */
14191 if (c->x86_model == 3 && c->x86_mask == 0)
14192 size = 64;
14193 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14194 index 1a810e4..9fa8201 100644
14195 --- a/arch/x86/kernel/cpu/common.c
14196 +++ b/arch/x86/kernel/cpu/common.c
14197 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14198
14199 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14200
14201 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14202 -#ifdef CONFIG_X86_64
14203 - /*
14204 - * We need valid kernel segments for data and code in long mode too
14205 - * IRET will check the segment types kkeil 2000/10/28
14206 - * Also sysret mandates a special GDT layout
14207 - *
14208 - * TLS descriptors are currently at a different place compared to i386.
14209 - * Hopefully nobody expects them at a fixed place (Wine?)
14210 - */
14211 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14212 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14213 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14214 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14215 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14216 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14217 -#else
14218 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14219 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14220 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14221 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14222 - /*
14223 - * Segments used for calling PnP BIOS have byte granularity.
14224 - * They code segments and data segments have fixed 64k limits,
14225 - * the transfer segment sizes are set at run time.
14226 - */
14227 - /* 32-bit code */
14228 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14229 - /* 16-bit code */
14230 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14231 - /* 16-bit data */
14232 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14233 - /* 16-bit data */
14234 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14235 - /* 16-bit data */
14236 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14237 - /*
14238 - * The APM segments have byte granularity and their bases
14239 - * are set at run time. All have 64k limits.
14240 - */
14241 - /* 32-bit code */
14242 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14243 - /* 16-bit code */
14244 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14245 - /* data */
14246 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14247 -
14248 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14249 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14250 - GDT_STACK_CANARY_INIT
14251 -#endif
14252 -} };
14253 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14254 -
14255 static int __init x86_xsave_setup(char *s)
14256 {
14257 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14258 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
14259 {
14260 struct desc_ptr gdt_descr;
14261
14262 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14263 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14264 gdt_descr.size = GDT_SIZE - 1;
14265 load_gdt(&gdt_descr);
14266 /* Reload the per-cpu base */
14267 @@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14268 /* Filter out anything that depends on CPUID levels we don't have */
14269 filter_cpuid_features(c, true);
14270
14271 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14272 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14273 +#endif
14274 +
14275 /* If the model name is still unset, do table lookup. */
14276 if (!c->x86_model_id[0]) {
14277 const char *p;
14278 @@ -1019,10 +969,12 @@ static __init int setup_disablecpuid(char *arg)
14279 }
14280 __setup("clearcpuid=", setup_disablecpuid);
14281
14282 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14283 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14284 +
14285 #ifdef CONFIG_X86_64
14286 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14287 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14288 - (unsigned long) nmi_idt_table };
14289 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14290
14291 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14292 irq_stack_union) __aligned(PAGE_SIZE);
14293 @@ -1036,7 +988,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14294 EXPORT_PER_CPU_SYMBOL(current_task);
14295
14296 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14297 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14298 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14299 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14300
14301 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14302 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14303 {
14304 memset(regs, 0, sizeof(struct pt_regs));
14305 regs->fs = __KERNEL_PERCPU;
14306 - regs->gs = __KERNEL_STACK_CANARY;
14307 + savesegment(gs, regs->gs);
14308
14309 return regs;
14310 }
14311 @@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14312 int i;
14313
14314 cpu = stack_smp_processor_id();
14315 - t = &per_cpu(init_tss, cpu);
14316 + t = init_tss + cpu;
14317 oist = &per_cpu(orig_ist, cpu);
14318
14319 #ifdef CONFIG_NUMA
14320 @@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14321 switch_to_new_gdt(cpu);
14322 loadsegment(fs, 0);
14323
14324 - load_idt((const struct desc_ptr *)&idt_descr);
14325 + load_idt(&idt_descr);
14326
14327 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14328 syscall_init();
14329 @@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14330 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14331 barrier();
14332
14333 - x86_configure_nx();
14334 if (cpu != 0)
14335 enable_x2apic();
14336
14337 @@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14338 {
14339 int cpu = smp_processor_id();
14340 struct task_struct *curr = current;
14341 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14342 + struct tss_struct *t = init_tss + cpu;
14343 struct thread_struct *thread = &curr->thread;
14344
14345 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14346 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14347 index 3e6ff6c..54b4992 100644
14348 --- a/arch/x86/kernel/cpu/intel.c
14349 +++ b/arch/x86/kernel/cpu/intel.c
14350 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14351 * Update the IDT descriptor and reload the IDT so that
14352 * it uses the read-only mapped virtual address.
14353 */
14354 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14355 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14356 load_idt(&idt_descr);
14357 }
14358 #endif
14359 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14360 index 5a11ae2..a1a1c8a 100644
14361 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14362 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14363 @@ -42,6 +42,7 @@
14364 #include <asm/processor.h>
14365 #include <asm/mce.h>
14366 #include <asm/msr.h>
14367 +#include <asm/local.h>
14368
14369 #include "mce-internal.h"
14370
14371 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14372 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14373 m->cs, m->ip);
14374
14375 - if (m->cs == __KERNEL_CS)
14376 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14377 print_symbol("{%s}", m->ip);
14378 pr_cont("\n");
14379 }
14380 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14381
14382 #define PANIC_TIMEOUT 5 /* 5 seconds */
14383
14384 -static atomic_t mce_paniced;
14385 +static atomic_unchecked_t mce_paniced;
14386
14387 static int fake_panic;
14388 -static atomic_t mce_fake_paniced;
14389 +static atomic_unchecked_t mce_fake_paniced;
14390
14391 /* Panic in progress. Enable interrupts and wait for final IPI */
14392 static void wait_for_panic(void)
14393 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14394 /*
14395 * Make sure only one CPU runs in machine check panic
14396 */
14397 - if (atomic_inc_return(&mce_paniced) > 1)
14398 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14399 wait_for_panic();
14400 barrier();
14401
14402 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14403 console_verbose();
14404 } else {
14405 /* Don't log too much for fake panic */
14406 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14407 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14408 return;
14409 }
14410 /* First print corrected ones that are still unlogged */
14411 @@ -658,7 +659,7 @@ static int mce_timed_out(u64 *t)
14412 * might have been modified by someone else.
14413 */
14414 rmb();
14415 - if (atomic_read(&mce_paniced))
14416 + if (atomic_read_unchecked(&mce_paniced))
14417 wait_for_panic();
14418 if (!monarch_timeout)
14419 goto out;
14420 @@ -1446,7 +1447,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14421 }
14422
14423 /* Call the installed machine check handler for this CPU setup. */
14424 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14425 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14426 unexpected_machine_check;
14427
14428 /*
14429 @@ -1469,7 +1470,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14430 return;
14431 }
14432
14433 + pax_open_kernel();
14434 machine_check_vector = do_machine_check;
14435 + pax_close_kernel();
14436
14437 __mcheck_cpu_init_generic();
14438 __mcheck_cpu_init_vendor(c);
14439 @@ -1483,7 +1486,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14440 */
14441
14442 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14443 -static int mce_chrdev_open_count; /* #times opened */
14444 +static local_t mce_chrdev_open_count; /* #times opened */
14445 static int mce_chrdev_open_exclu; /* already open exclusive? */
14446
14447 static int mce_chrdev_open(struct inode *inode, struct file *file)
14448 @@ -1491,7 +1494,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14449 spin_lock(&mce_chrdev_state_lock);
14450
14451 if (mce_chrdev_open_exclu ||
14452 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14453 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14454 spin_unlock(&mce_chrdev_state_lock);
14455
14456 return -EBUSY;
14457 @@ -1499,7 +1502,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14458
14459 if (file->f_flags & O_EXCL)
14460 mce_chrdev_open_exclu = 1;
14461 - mce_chrdev_open_count++;
14462 + local_inc(&mce_chrdev_open_count);
14463
14464 spin_unlock(&mce_chrdev_state_lock);
14465
14466 @@ -1510,7 +1513,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14467 {
14468 spin_lock(&mce_chrdev_state_lock);
14469
14470 - mce_chrdev_open_count--;
14471 + local_dec(&mce_chrdev_open_count);
14472 mce_chrdev_open_exclu = 0;
14473
14474 spin_unlock(&mce_chrdev_state_lock);
14475 @@ -2229,7 +2232,7 @@ struct dentry *mce_get_debugfs_dir(void)
14476 static void mce_reset(void)
14477 {
14478 cpu_missing = 0;
14479 - atomic_set(&mce_fake_paniced, 0);
14480 + atomic_set_unchecked(&mce_fake_paniced, 0);
14481 atomic_set(&mce_executing, 0);
14482 atomic_set(&mce_callin, 0);
14483 atomic_set(&global_nwo, 0);
14484 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14485 index 5c0e653..0882b0a 100644
14486 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14487 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14488 @@ -12,6 +12,7 @@
14489 #include <asm/system.h>
14490 #include <asm/mce.h>
14491 #include <asm/msr.h>
14492 +#include <asm/pgtable.h>
14493
14494 /* By default disabled */
14495 int mce_p5_enabled __read_mostly;
14496 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14497 if (!cpu_has(c, X86_FEATURE_MCE))
14498 return;
14499
14500 + pax_open_kernel();
14501 machine_check_vector = pentium_machine_check;
14502 + pax_close_kernel();
14503 /* Make sure the vector pointer is visible before we enable MCEs: */
14504 wmb();
14505
14506 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14507 index 54060f5..c1a7577 100644
14508 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14509 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14510 @@ -11,6 +11,7 @@
14511 #include <asm/system.h>
14512 #include <asm/mce.h>
14513 #include <asm/msr.h>
14514 +#include <asm/pgtable.h>
14515
14516 /* Machine check handler for WinChip C6: */
14517 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14518 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14519 {
14520 u32 lo, hi;
14521
14522 + pax_open_kernel();
14523 machine_check_vector = winchip_machine_check;
14524 + pax_close_kernel();
14525 /* Make sure the vector pointer is visible before we enable MCEs: */
14526 wmb();
14527
14528 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14529 index 6b96110..0da73eb 100644
14530 --- a/arch/x86/kernel/cpu/mtrr/main.c
14531 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14532 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14533 u64 size_or_mask, size_and_mask;
14534 static bool mtrr_aps_delayed_init;
14535
14536 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14537 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14538
14539 const struct mtrr_ops *mtrr_if;
14540
14541 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14542 index df5e41f..816c719 100644
14543 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14544 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14545 @@ -25,7 +25,7 @@ struct mtrr_ops {
14546 int (*validate_add_page)(unsigned long base, unsigned long size,
14547 unsigned int type);
14548 int (*have_wrcomb)(void);
14549 -};
14550 +} __do_const;
14551
14552 extern int generic_get_free_region(unsigned long base, unsigned long size,
14553 int replace_reg);
14554 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14555 index 5adce10..99284ec 100644
14556 --- a/arch/x86/kernel/cpu/perf_event.c
14557 +++ b/arch/x86/kernel/cpu/perf_event.c
14558 @@ -1665,7 +1665,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14559 break;
14560
14561 perf_callchain_store(entry, frame.return_address);
14562 - fp = frame.next_frame;
14563 + fp = (const void __force_user *)frame.next_frame;
14564 }
14565 }
14566
14567 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14568 index 13ad899..f642b9a 100644
14569 --- a/arch/x86/kernel/crash.c
14570 +++ b/arch/x86/kernel/crash.c
14571 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14572 {
14573 #ifdef CONFIG_X86_32
14574 struct pt_regs fixed_regs;
14575 -#endif
14576
14577 -#ifdef CONFIG_X86_32
14578 - if (!user_mode_vm(regs)) {
14579 + if (!user_mode(regs)) {
14580 crash_fixup_ss_esp(&fixed_regs, regs);
14581 regs = &fixed_regs;
14582 }
14583 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14584 index 37250fe..bf2ec74 100644
14585 --- a/arch/x86/kernel/doublefault_32.c
14586 +++ b/arch/x86/kernel/doublefault_32.c
14587 @@ -11,7 +11,7 @@
14588
14589 #define DOUBLEFAULT_STACKSIZE (1024)
14590 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14591 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14592 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14593
14594 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14595
14596 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14597 unsigned long gdt, tss;
14598
14599 store_gdt(&gdt_desc);
14600 - gdt = gdt_desc.address;
14601 + gdt = (unsigned long)gdt_desc.address;
14602
14603 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14604
14605 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14606 /* 0x2 bit is always set */
14607 .flags = X86_EFLAGS_SF | 0x2,
14608 .sp = STACK_START,
14609 - .es = __USER_DS,
14610 + .es = __KERNEL_DS,
14611 .cs = __KERNEL_CS,
14612 .ss = __KERNEL_DS,
14613 - .ds = __USER_DS,
14614 + .ds = __KERNEL_DS,
14615 .fs = __KERNEL_PERCPU,
14616
14617 .__cr3 = __pa_nodebug(swapper_pg_dir),
14618 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14619 index 4025fe4..d8451c6 100644
14620 --- a/arch/x86/kernel/dumpstack.c
14621 +++ b/arch/x86/kernel/dumpstack.c
14622 @@ -2,6 +2,9 @@
14623 * Copyright (C) 1991, 1992 Linus Torvalds
14624 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14625 */
14626 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14627 +#define __INCLUDED_BY_HIDESYM 1
14628 +#endif
14629 #include <linux/kallsyms.h>
14630 #include <linux/kprobes.h>
14631 #include <linux/uaccess.h>
14632 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
14633 static void
14634 print_ftrace_graph_addr(unsigned long addr, void *data,
14635 const struct stacktrace_ops *ops,
14636 - struct thread_info *tinfo, int *graph)
14637 + struct task_struct *task, int *graph)
14638 {
14639 - struct task_struct *task = tinfo->task;
14640 unsigned long ret_addr;
14641 int index = task->curr_ret_stack;
14642
14643 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14644 static inline void
14645 print_ftrace_graph_addr(unsigned long addr, void *data,
14646 const struct stacktrace_ops *ops,
14647 - struct thread_info *tinfo, int *graph)
14648 + struct task_struct *task, int *graph)
14649 { }
14650 #endif
14651
14652 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14653 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14654 */
14655
14656 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14657 - void *p, unsigned int size, void *end)
14658 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14659 {
14660 - void *t = tinfo;
14661 if (end) {
14662 if (p < end && p >= (end-THREAD_SIZE))
14663 return 1;
14664 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14665 }
14666
14667 unsigned long
14668 -print_context_stack(struct thread_info *tinfo,
14669 +print_context_stack(struct task_struct *task, void *stack_start,
14670 unsigned long *stack, unsigned long bp,
14671 const struct stacktrace_ops *ops, void *data,
14672 unsigned long *end, int *graph)
14673 {
14674 struct stack_frame *frame = (struct stack_frame *)bp;
14675
14676 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14677 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14678 unsigned long addr;
14679
14680 addr = *stack;
14681 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
14682 } else {
14683 ops->address(data, addr, 0);
14684 }
14685 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14686 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14687 }
14688 stack++;
14689 }
14690 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
14691 EXPORT_SYMBOL_GPL(print_context_stack);
14692
14693 unsigned long
14694 -print_context_stack_bp(struct thread_info *tinfo,
14695 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14696 unsigned long *stack, unsigned long bp,
14697 const struct stacktrace_ops *ops, void *data,
14698 unsigned long *end, int *graph)
14699 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14700 struct stack_frame *frame = (struct stack_frame *)bp;
14701 unsigned long *ret_addr = &frame->return_address;
14702
14703 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14704 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14705 unsigned long addr = *ret_addr;
14706
14707 if (!__kernel_text_address(addr))
14708 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14709 ops->address(data, addr, 1);
14710 frame = frame->next_frame;
14711 ret_addr = &frame->return_address;
14712 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14713 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14714 }
14715
14716 return (unsigned long)frame;
14717 @@ -186,7 +186,7 @@ void dump_stack(void)
14718
14719 bp = stack_frame(current, NULL);
14720 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14721 - current->pid, current->comm, print_tainted(),
14722 + task_pid_nr(current), current->comm, print_tainted(),
14723 init_utsname()->release,
14724 (int)strcspn(init_utsname()->version, " "),
14725 init_utsname()->version);
14726 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
14727 }
14728 EXPORT_SYMBOL_GPL(oops_begin);
14729
14730 +extern void gr_handle_kernel_exploit(void);
14731 +
14732 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14733 {
14734 if (regs && kexec_should_crash(current))
14735 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14736 panic("Fatal exception in interrupt");
14737 if (panic_on_oops)
14738 panic("Fatal exception");
14739 - do_exit(signr);
14740 +
14741 + gr_handle_kernel_exploit();
14742 +
14743 + do_group_exit(signr);
14744 }
14745
14746 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14747 @@ -270,7 +275,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14748
14749 show_registers(regs);
14750 #ifdef CONFIG_X86_32
14751 - if (user_mode_vm(regs)) {
14752 + if (user_mode(regs)) {
14753 sp = regs->sp;
14754 ss = regs->ss & 0xffff;
14755 } else {
14756 @@ -298,7 +303,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14757 unsigned long flags = oops_begin();
14758 int sig = SIGSEGV;
14759
14760 - if (!user_mode_vm(regs))
14761 + if (!user_mode(regs))
14762 report_bug(regs->ip, regs);
14763
14764 if (__die(str, regs, err))
14765 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14766 index c99f9ed..2a15d80 100644
14767 --- a/arch/x86/kernel/dumpstack_32.c
14768 +++ b/arch/x86/kernel/dumpstack_32.c
14769 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14770 bp = stack_frame(task, regs);
14771
14772 for (;;) {
14773 - struct thread_info *context;
14774 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14775
14776 - context = (struct thread_info *)
14777 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14778 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14779 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14780
14781 - stack = (unsigned long *)context->previous_esp;
14782 - if (!stack)
14783 + if (stack_start == task_stack_page(task))
14784 break;
14785 + stack = *(unsigned long **)stack_start;
14786 if (ops->stack(data, "IRQ") < 0)
14787 break;
14788 touch_nmi_watchdog();
14789 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14790 * When in-kernel, we also print out the stack and code at the
14791 * time of the fault..
14792 */
14793 - if (!user_mode_vm(regs)) {
14794 + if (!user_mode(regs)) {
14795 unsigned int code_prologue = code_bytes * 43 / 64;
14796 unsigned int code_len = code_bytes;
14797 unsigned char c;
14798 u8 *ip;
14799 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14800
14801 printk(KERN_EMERG "Stack:\n");
14802 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14803
14804 printk(KERN_EMERG "Code: ");
14805
14806 - ip = (u8 *)regs->ip - code_prologue;
14807 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14808 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14809 /* try starting at IP */
14810 - ip = (u8 *)regs->ip;
14811 + ip = (u8 *)regs->ip + cs_base;
14812 code_len = code_len - code_prologue + 1;
14813 }
14814 for (i = 0; i < code_len; i++, ip++) {
14815 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14816 printk(KERN_CONT " Bad EIP value.");
14817 break;
14818 }
14819 - if (ip == (u8 *)regs->ip)
14820 + if (ip == (u8 *)regs->ip + cs_base)
14821 printk(KERN_CONT "<%02x> ", c);
14822 else
14823 printk(KERN_CONT "%02x ", c);
14824 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14825 {
14826 unsigned short ud2;
14827
14828 + ip = ktla_ktva(ip);
14829 if (ip < PAGE_OFFSET)
14830 return 0;
14831 if (probe_kernel_address((unsigned short *)ip, ud2))
14832 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14833
14834 return ud2 == 0x0b0f;
14835 }
14836 +
14837 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14838 +void pax_check_alloca(unsigned long size)
14839 +{
14840 + unsigned long sp = (unsigned long)&sp, stack_left;
14841 +
14842 + /* all kernel stacks are of the same size */
14843 + stack_left = sp & (THREAD_SIZE - 1);
14844 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14845 +}
14846 +EXPORT_SYMBOL(pax_check_alloca);
14847 +#endif
14848 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14849 index 17107bd..9623722 100644
14850 --- a/arch/x86/kernel/dumpstack_64.c
14851 +++ b/arch/x86/kernel/dumpstack_64.c
14852 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14853 unsigned long *irq_stack_end =
14854 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14855 unsigned used = 0;
14856 - struct thread_info *tinfo;
14857 int graph = 0;
14858 unsigned long dummy;
14859 + void *stack_start;
14860
14861 if (!task)
14862 task = current;
14863 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14864 * current stack address. If the stacks consist of nested
14865 * exceptions
14866 */
14867 - tinfo = task_thread_info(task);
14868 for (;;) {
14869 char *id;
14870 unsigned long *estack_end;
14871 +
14872 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14873 &used, &id);
14874
14875 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14876 if (ops->stack(data, id) < 0)
14877 break;
14878
14879 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14880 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14881 data, estack_end, &graph);
14882 ops->stack(data, "<EOE>");
14883 /*
14884 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14885 * second-to-last pointer (index -2 to end) in the
14886 * exception stack:
14887 */
14888 + if ((u16)estack_end[-1] != __KERNEL_DS)
14889 + goto out;
14890 stack = (unsigned long *) estack_end[-2];
14891 continue;
14892 }
14893 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14894 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14895 if (ops->stack(data, "IRQ") < 0)
14896 break;
14897 - bp = ops->walk_stack(tinfo, stack, bp,
14898 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14899 ops, data, irq_stack_end, &graph);
14900 /*
14901 * We link to the next stack (which would be
14902 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14903 /*
14904 * This handles the process stack:
14905 */
14906 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14907 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14908 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14909 +out:
14910 put_cpu();
14911 }
14912 EXPORT_SYMBOL(dump_trace);
14913 @@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14914
14915 return ud2 == 0x0b0f;
14916 }
14917 +
14918 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14919 +void pax_check_alloca(unsigned long size)
14920 +{
14921 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14922 + unsigned cpu, used;
14923 + char *id;
14924 +
14925 + /* check the process stack first */
14926 + stack_start = (unsigned long)task_stack_page(current);
14927 + stack_end = stack_start + THREAD_SIZE;
14928 + if (likely(stack_start <= sp && sp < stack_end)) {
14929 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14930 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14931 + return;
14932 + }
14933 +
14934 + cpu = get_cpu();
14935 +
14936 + /* check the irq stacks */
14937 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14938 + stack_start = stack_end - IRQ_STACK_SIZE;
14939 + if (stack_start <= sp && sp < stack_end) {
14940 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14941 + put_cpu();
14942 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14943 + return;
14944 + }
14945 +
14946 + /* check the exception stacks */
14947 + used = 0;
14948 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14949 + stack_start = stack_end - EXCEPTION_STKSZ;
14950 + if (stack_end && stack_start <= sp && sp < stack_end) {
14951 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14952 + put_cpu();
14953 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14954 + return;
14955 + }
14956 +
14957 + put_cpu();
14958 +
14959 + /* unknown stack */
14960 + BUG();
14961 +}
14962 +EXPORT_SYMBOL(pax_check_alloca);
14963 +#endif
14964 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14965 index 9b9f18b..9fcaa04 100644
14966 --- a/arch/x86/kernel/early_printk.c
14967 +++ b/arch/x86/kernel/early_printk.c
14968 @@ -7,6 +7,7 @@
14969 #include <linux/pci_regs.h>
14970 #include <linux/pci_ids.h>
14971 #include <linux/errno.h>
14972 +#include <linux/sched.h>
14973 #include <asm/io.h>
14974 #include <asm/processor.h>
14975 #include <asm/fcntl.h>
14976 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14977 index 7b784f4..db6b628 100644
14978 --- a/arch/x86/kernel/entry_32.S
14979 +++ b/arch/x86/kernel/entry_32.S
14980 @@ -179,13 +179,146 @@
14981 /*CFI_REL_OFFSET gs, PT_GS*/
14982 .endm
14983 .macro SET_KERNEL_GS reg
14984 +
14985 +#ifdef CONFIG_CC_STACKPROTECTOR
14986 movl $(__KERNEL_STACK_CANARY), \reg
14987 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14988 + movl $(__USER_DS), \reg
14989 +#else
14990 + xorl \reg, \reg
14991 +#endif
14992 +
14993 movl \reg, %gs
14994 .endm
14995
14996 #endif /* CONFIG_X86_32_LAZY_GS */
14997
14998 -.macro SAVE_ALL
14999 +.macro pax_enter_kernel
15000 +#ifdef CONFIG_PAX_KERNEXEC
15001 + call pax_enter_kernel
15002 +#endif
15003 +.endm
15004 +
15005 +.macro pax_exit_kernel
15006 +#ifdef CONFIG_PAX_KERNEXEC
15007 + call pax_exit_kernel
15008 +#endif
15009 +.endm
15010 +
15011 +#ifdef CONFIG_PAX_KERNEXEC
15012 +ENTRY(pax_enter_kernel)
15013 +#ifdef CONFIG_PARAVIRT
15014 + pushl %eax
15015 + pushl %ecx
15016 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15017 + mov %eax, %esi
15018 +#else
15019 + mov %cr0, %esi
15020 +#endif
15021 + bts $16, %esi
15022 + jnc 1f
15023 + mov %cs, %esi
15024 + cmp $__KERNEL_CS, %esi
15025 + jz 3f
15026 + ljmp $__KERNEL_CS, $3f
15027 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15028 +2:
15029 +#ifdef CONFIG_PARAVIRT
15030 + mov %esi, %eax
15031 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15032 +#else
15033 + mov %esi, %cr0
15034 +#endif
15035 +3:
15036 +#ifdef CONFIG_PARAVIRT
15037 + popl %ecx
15038 + popl %eax
15039 +#endif
15040 + ret
15041 +ENDPROC(pax_enter_kernel)
15042 +
15043 +ENTRY(pax_exit_kernel)
15044 +#ifdef CONFIG_PARAVIRT
15045 + pushl %eax
15046 + pushl %ecx
15047 +#endif
15048 + mov %cs, %esi
15049 + cmp $__KERNEXEC_KERNEL_CS, %esi
15050 + jnz 2f
15051 +#ifdef CONFIG_PARAVIRT
15052 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15053 + mov %eax, %esi
15054 +#else
15055 + mov %cr0, %esi
15056 +#endif
15057 + btr $16, %esi
15058 + ljmp $__KERNEL_CS, $1f
15059 +1:
15060 +#ifdef CONFIG_PARAVIRT
15061 + mov %esi, %eax
15062 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15063 +#else
15064 + mov %esi, %cr0
15065 +#endif
15066 +2:
15067 +#ifdef CONFIG_PARAVIRT
15068 + popl %ecx
15069 + popl %eax
15070 +#endif
15071 + ret
15072 +ENDPROC(pax_exit_kernel)
15073 +#endif
15074 +
15075 +.macro pax_erase_kstack
15076 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15077 + call pax_erase_kstack
15078 +#endif
15079 +.endm
15080 +
15081 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15082 +/*
15083 + * ebp: thread_info
15084 + * ecx, edx: can be clobbered
15085 + */
15086 +ENTRY(pax_erase_kstack)
15087 + pushl %edi
15088 + pushl %eax
15089 +
15090 + mov TI_lowest_stack(%ebp), %edi
15091 + mov $-0xBEEF, %eax
15092 + std
15093 +
15094 +1: mov %edi, %ecx
15095 + and $THREAD_SIZE_asm - 1, %ecx
15096 + shr $2, %ecx
15097 + repne scasl
15098 + jecxz 2f
15099 +
15100 + cmp $2*16, %ecx
15101 + jc 2f
15102 +
15103 + mov $2*16, %ecx
15104 + repe scasl
15105 + jecxz 2f
15106 + jne 1b
15107 +
15108 +2: cld
15109 + mov %esp, %ecx
15110 + sub %edi, %ecx
15111 + shr $2, %ecx
15112 + rep stosl
15113 +
15114 + mov TI_task_thread_sp0(%ebp), %edi
15115 + sub $128, %edi
15116 + mov %edi, TI_lowest_stack(%ebp)
15117 +
15118 + popl %eax
15119 + popl %edi
15120 + ret
15121 +ENDPROC(pax_erase_kstack)
15122 +#endif
15123 +
15124 +.macro __SAVE_ALL _DS
15125 cld
15126 PUSH_GS
15127 pushl_cfi %fs
15128 @@ -208,7 +341,7 @@
15129 CFI_REL_OFFSET ecx, 0
15130 pushl_cfi %ebx
15131 CFI_REL_OFFSET ebx, 0
15132 - movl $(__USER_DS), %edx
15133 + movl $\_DS, %edx
15134 movl %edx, %ds
15135 movl %edx, %es
15136 movl $(__KERNEL_PERCPU), %edx
15137 @@ -216,6 +349,15 @@
15138 SET_KERNEL_GS %edx
15139 .endm
15140
15141 +.macro SAVE_ALL
15142 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15143 + __SAVE_ALL __KERNEL_DS
15144 + pax_enter_kernel
15145 +#else
15146 + __SAVE_ALL __USER_DS
15147 +#endif
15148 +.endm
15149 +
15150 .macro RESTORE_INT_REGS
15151 popl_cfi %ebx
15152 CFI_RESTORE ebx
15153 @@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15154 popfl_cfi
15155 jmp syscall_exit
15156 CFI_ENDPROC
15157 -END(ret_from_fork)
15158 +ENDPROC(ret_from_fork)
15159
15160 /*
15161 * Interrupt exit functions should be protected against kprobes
15162 @@ -335,7 +477,15 @@ resume_userspace_sig:
15163 andl $SEGMENT_RPL_MASK, %eax
15164 #endif
15165 cmpl $USER_RPL, %eax
15166 +
15167 +#ifdef CONFIG_PAX_KERNEXEC
15168 + jae resume_userspace
15169 +
15170 + pax_exit_kernel
15171 + jmp resume_kernel
15172 +#else
15173 jb resume_kernel # not returning to v8086 or userspace
15174 +#endif
15175
15176 ENTRY(resume_userspace)
15177 LOCKDEP_SYS_EXIT
15178 @@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15179 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15180 # int/exception return?
15181 jne work_pending
15182 - jmp restore_all
15183 -END(ret_from_exception)
15184 + jmp restore_all_pax
15185 +ENDPROC(ret_from_exception)
15186
15187 #ifdef CONFIG_PREEMPT
15188 ENTRY(resume_kernel)
15189 @@ -363,7 +513,7 @@ need_resched:
15190 jz restore_all
15191 call preempt_schedule_irq
15192 jmp need_resched
15193 -END(resume_kernel)
15194 +ENDPROC(resume_kernel)
15195 #endif
15196 CFI_ENDPROC
15197 /*
15198 @@ -397,23 +547,34 @@ sysenter_past_esp:
15199 /*CFI_REL_OFFSET cs, 0*/
15200 /*
15201 * Push current_thread_info()->sysenter_return to the stack.
15202 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15203 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15204 */
15205 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15206 + pushl_cfi $0
15207 CFI_REL_OFFSET eip, 0
15208
15209 pushl_cfi %eax
15210 SAVE_ALL
15211 + GET_THREAD_INFO(%ebp)
15212 + movl TI_sysenter_return(%ebp),%ebp
15213 + movl %ebp,PT_EIP(%esp)
15214 ENABLE_INTERRUPTS(CLBR_NONE)
15215
15216 /*
15217 * Load the potential sixth argument from user stack.
15218 * Careful about security.
15219 */
15220 + movl PT_OLDESP(%esp),%ebp
15221 +
15222 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15223 + mov PT_OLDSS(%esp),%ds
15224 +1: movl %ds:(%ebp),%ebp
15225 + push %ss
15226 + pop %ds
15227 +#else
15228 cmpl $__PAGE_OFFSET-3,%ebp
15229 jae syscall_fault
15230 1: movl (%ebp),%ebp
15231 +#endif
15232 +
15233 movl %ebp,PT_EBP(%esp)
15234 .section __ex_table,"a"
15235 .align 4
15236 @@ -436,12 +597,24 @@ sysenter_do_call:
15237 testl $_TIF_ALLWORK_MASK, %ecx
15238 jne sysexit_audit
15239 sysenter_exit:
15240 +
15241 +#ifdef CONFIG_PAX_RANDKSTACK
15242 + pushl_cfi %eax
15243 + movl %esp, %eax
15244 + call pax_randomize_kstack
15245 + popl_cfi %eax
15246 +#endif
15247 +
15248 + pax_erase_kstack
15249 +
15250 /* if something modifies registers it must also disable sysexit */
15251 movl PT_EIP(%esp), %edx
15252 movl PT_OLDESP(%esp), %ecx
15253 xorl %ebp,%ebp
15254 TRACE_IRQS_ON
15255 1: mov PT_FS(%esp), %fs
15256 +2: mov PT_DS(%esp), %ds
15257 +3: mov PT_ES(%esp), %es
15258 PTGS_TO_GS
15259 ENABLE_INTERRUPTS_SYSEXIT
15260
15261 @@ -458,6 +631,9 @@ sysenter_audit:
15262 movl %eax,%edx /* 2nd arg: syscall number */
15263 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15264 call __audit_syscall_entry
15265 +
15266 + pax_erase_kstack
15267 +
15268 pushl_cfi %ebx
15269 movl PT_EAX(%esp),%eax /* reload syscall number */
15270 jmp sysenter_do_call
15271 @@ -483,11 +659,17 @@ sysexit_audit:
15272
15273 CFI_ENDPROC
15274 .pushsection .fixup,"ax"
15275 -2: movl $0,PT_FS(%esp)
15276 +4: movl $0,PT_FS(%esp)
15277 + jmp 1b
15278 +5: movl $0,PT_DS(%esp)
15279 + jmp 1b
15280 +6: movl $0,PT_ES(%esp)
15281 jmp 1b
15282 .section __ex_table,"a"
15283 .align 4
15284 - .long 1b,2b
15285 + .long 1b,4b
15286 + .long 2b,5b
15287 + .long 3b,6b
15288 .popsection
15289 PTGS_TO_GS_EX
15290 ENDPROC(ia32_sysenter_target)
15291 @@ -520,6 +702,15 @@ syscall_exit:
15292 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15293 jne syscall_exit_work
15294
15295 +restore_all_pax:
15296 +
15297 +#ifdef CONFIG_PAX_RANDKSTACK
15298 + movl %esp, %eax
15299 + call pax_randomize_kstack
15300 +#endif
15301 +
15302 + pax_erase_kstack
15303 +
15304 restore_all:
15305 TRACE_IRQS_IRET
15306 restore_all_notrace:
15307 @@ -579,14 +770,34 @@ ldt_ss:
15308 * compensating for the offset by changing to the ESPFIX segment with
15309 * a base address that matches for the difference.
15310 */
15311 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15312 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15313 mov %esp, %edx /* load kernel esp */
15314 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15315 mov %dx, %ax /* eax: new kernel esp */
15316 sub %eax, %edx /* offset (low word is 0) */
15317 +#ifdef CONFIG_SMP
15318 + movl PER_CPU_VAR(cpu_number), %ebx
15319 + shll $PAGE_SHIFT_asm, %ebx
15320 + addl $cpu_gdt_table, %ebx
15321 +#else
15322 + movl $cpu_gdt_table, %ebx
15323 +#endif
15324 shr $16, %edx
15325 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15326 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15327 +
15328 +#ifdef CONFIG_PAX_KERNEXEC
15329 + mov %cr0, %esi
15330 + btr $16, %esi
15331 + mov %esi, %cr0
15332 +#endif
15333 +
15334 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15335 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15336 +
15337 +#ifdef CONFIG_PAX_KERNEXEC
15338 + bts $16, %esi
15339 + mov %esi, %cr0
15340 +#endif
15341 +
15342 pushl_cfi $__ESPFIX_SS
15343 pushl_cfi %eax /* new kernel esp */
15344 /* Disable interrupts, but do not irqtrace this section: we
15345 @@ -615,38 +826,30 @@ work_resched:
15346 movl TI_flags(%ebp), %ecx
15347 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15348 # than syscall tracing?
15349 - jz restore_all
15350 + jz restore_all_pax
15351 testb $_TIF_NEED_RESCHED, %cl
15352 jnz work_resched
15353
15354 work_notifysig: # deal with pending signals and
15355 # notify-resume requests
15356 + movl %esp, %eax
15357 #ifdef CONFIG_VM86
15358 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15359 - movl %esp, %eax
15360 - jne work_notifysig_v86 # returning to kernel-space or
15361 + jz 1f # returning to kernel-space or
15362 # vm86-space
15363 - TRACE_IRQS_ON
15364 - ENABLE_INTERRUPTS(CLBR_NONE)
15365 - xorl %edx, %edx
15366 - call do_notify_resume
15367 - jmp resume_userspace_sig
15368
15369 - ALIGN
15370 -work_notifysig_v86:
15371 pushl_cfi %ecx # save ti_flags for do_notify_resume
15372 call save_v86_state # %eax contains pt_regs pointer
15373 popl_cfi %ecx
15374 movl %eax, %esp
15375 -#else
15376 - movl %esp, %eax
15377 +1:
15378 #endif
15379 TRACE_IRQS_ON
15380 ENABLE_INTERRUPTS(CLBR_NONE)
15381 xorl %edx, %edx
15382 call do_notify_resume
15383 jmp resume_userspace_sig
15384 -END(work_pending)
15385 +ENDPROC(work_pending)
15386
15387 # perform syscall exit tracing
15388 ALIGN
15389 @@ -654,11 +857,14 @@ syscall_trace_entry:
15390 movl $-ENOSYS,PT_EAX(%esp)
15391 movl %esp, %eax
15392 call syscall_trace_enter
15393 +
15394 + pax_erase_kstack
15395 +
15396 /* What it returned is what we'll actually use. */
15397 cmpl $(NR_syscalls), %eax
15398 jnae syscall_call
15399 jmp syscall_exit
15400 -END(syscall_trace_entry)
15401 +ENDPROC(syscall_trace_entry)
15402
15403 # perform syscall exit tracing
15404 ALIGN
15405 @@ -671,20 +877,24 @@ syscall_exit_work:
15406 movl %esp, %eax
15407 call syscall_trace_leave
15408 jmp resume_userspace
15409 -END(syscall_exit_work)
15410 +ENDPROC(syscall_exit_work)
15411 CFI_ENDPROC
15412
15413 RING0_INT_FRAME # can't unwind into user space anyway
15414 syscall_fault:
15415 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15416 + push %ss
15417 + pop %ds
15418 +#endif
15419 GET_THREAD_INFO(%ebp)
15420 movl $-EFAULT,PT_EAX(%esp)
15421 jmp resume_userspace
15422 -END(syscall_fault)
15423 +ENDPROC(syscall_fault)
15424
15425 syscall_badsys:
15426 movl $-ENOSYS,PT_EAX(%esp)
15427 jmp resume_userspace
15428 -END(syscall_badsys)
15429 +ENDPROC(syscall_badsys)
15430 CFI_ENDPROC
15431 /*
15432 * End of kprobes section
15433 @@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15434 CFI_ENDPROC
15435 ENDPROC(ptregs_clone)
15436
15437 + ALIGN;
15438 +ENTRY(kernel_execve)
15439 + CFI_STARTPROC
15440 + pushl_cfi %ebp
15441 + sub $PT_OLDSS+4,%esp
15442 + pushl_cfi %edi
15443 + pushl_cfi %ecx
15444 + pushl_cfi %eax
15445 + lea 3*4(%esp),%edi
15446 + mov $PT_OLDSS/4+1,%ecx
15447 + xorl %eax,%eax
15448 + rep stosl
15449 + popl_cfi %eax
15450 + popl_cfi %ecx
15451 + popl_cfi %edi
15452 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15453 + pushl_cfi %esp
15454 + call sys_execve
15455 + add $4,%esp
15456 + CFI_ADJUST_CFA_OFFSET -4
15457 + GET_THREAD_INFO(%ebp)
15458 + test %eax,%eax
15459 + jz syscall_exit
15460 + add $PT_OLDSS+4,%esp
15461 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15462 + popl_cfi %ebp
15463 + ret
15464 + CFI_ENDPROC
15465 +ENDPROC(kernel_execve)
15466 +
15467 .macro FIXUP_ESPFIX_STACK
15468 /*
15469 * Switch back for ESPFIX stack to the normal zerobased stack
15470 @@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15471 * normal stack and adjusts ESP with the matching offset.
15472 */
15473 /* fixup the stack */
15474 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15475 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15476 +#ifdef CONFIG_SMP
15477 + movl PER_CPU_VAR(cpu_number), %ebx
15478 + shll $PAGE_SHIFT_asm, %ebx
15479 + addl $cpu_gdt_table, %ebx
15480 +#else
15481 + movl $cpu_gdt_table, %ebx
15482 +#endif
15483 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15484 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15485 shl $16, %eax
15486 addl %esp, %eax /* the adjusted stack pointer */
15487 pushl_cfi $__KERNEL_DS
15488 @@ -819,7 +1066,7 @@ vector=vector+1
15489 .endr
15490 2: jmp common_interrupt
15491 .endr
15492 -END(irq_entries_start)
15493 +ENDPROC(irq_entries_start)
15494
15495 .previous
15496 END(interrupt)
15497 @@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15498 pushl_cfi $do_coprocessor_error
15499 jmp error_code
15500 CFI_ENDPROC
15501 -END(coprocessor_error)
15502 +ENDPROC(coprocessor_error)
15503
15504 ENTRY(simd_coprocessor_error)
15505 RING0_INT_FRAME
15506 @@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15507 #endif
15508 jmp error_code
15509 CFI_ENDPROC
15510 -END(simd_coprocessor_error)
15511 +ENDPROC(simd_coprocessor_error)
15512
15513 ENTRY(device_not_available)
15514 RING0_INT_FRAME
15515 @@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15516 pushl_cfi $do_device_not_available
15517 jmp error_code
15518 CFI_ENDPROC
15519 -END(device_not_available)
15520 +ENDPROC(device_not_available)
15521
15522 #ifdef CONFIG_PARAVIRT
15523 ENTRY(native_iret)
15524 @@ -905,12 +1152,12 @@ ENTRY(native_iret)
15525 .align 4
15526 .long native_iret, iret_exc
15527 .previous
15528 -END(native_iret)
15529 +ENDPROC(native_iret)
15530
15531 ENTRY(native_irq_enable_sysexit)
15532 sti
15533 sysexit
15534 -END(native_irq_enable_sysexit)
15535 +ENDPROC(native_irq_enable_sysexit)
15536 #endif
15537
15538 ENTRY(overflow)
15539 @@ -919,7 +1166,7 @@ ENTRY(overflow)
15540 pushl_cfi $do_overflow
15541 jmp error_code
15542 CFI_ENDPROC
15543 -END(overflow)
15544 +ENDPROC(overflow)
15545
15546 ENTRY(bounds)
15547 RING0_INT_FRAME
15548 @@ -927,7 +1174,7 @@ ENTRY(bounds)
15549 pushl_cfi $do_bounds
15550 jmp error_code
15551 CFI_ENDPROC
15552 -END(bounds)
15553 +ENDPROC(bounds)
15554
15555 ENTRY(invalid_op)
15556 RING0_INT_FRAME
15557 @@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15558 pushl_cfi $do_invalid_op
15559 jmp error_code
15560 CFI_ENDPROC
15561 -END(invalid_op)
15562 +ENDPROC(invalid_op)
15563
15564 ENTRY(coprocessor_segment_overrun)
15565 RING0_INT_FRAME
15566 @@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15567 pushl_cfi $do_coprocessor_segment_overrun
15568 jmp error_code
15569 CFI_ENDPROC
15570 -END(coprocessor_segment_overrun)
15571 +ENDPROC(coprocessor_segment_overrun)
15572
15573 ENTRY(invalid_TSS)
15574 RING0_EC_FRAME
15575 pushl_cfi $do_invalid_TSS
15576 jmp error_code
15577 CFI_ENDPROC
15578 -END(invalid_TSS)
15579 +ENDPROC(invalid_TSS)
15580
15581 ENTRY(segment_not_present)
15582 RING0_EC_FRAME
15583 pushl_cfi $do_segment_not_present
15584 jmp error_code
15585 CFI_ENDPROC
15586 -END(segment_not_present)
15587 +ENDPROC(segment_not_present)
15588
15589 ENTRY(stack_segment)
15590 RING0_EC_FRAME
15591 pushl_cfi $do_stack_segment
15592 jmp error_code
15593 CFI_ENDPROC
15594 -END(stack_segment)
15595 +ENDPROC(stack_segment)
15596
15597 ENTRY(alignment_check)
15598 RING0_EC_FRAME
15599 pushl_cfi $do_alignment_check
15600 jmp error_code
15601 CFI_ENDPROC
15602 -END(alignment_check)
15603 +ENDPROC(alignment_check)
15604
15605 ENTRY(divide_error)
15606 RING0_INT_FRAME
15607 @@ -979,7 +1226,7 @@ ENTRY(divide_error)
15608 pushl_cfi $do_divide_error
15609 jmp error_code
15610 CFI_ENDPROC
15611 -END(divide_error)
15612 +ENDPROC(divide_error)
15613
15614 #ifdef CONFIG_X86_MCE
15615 ENTRY(machine_check)
15616 @@ -988,7 +1235,7 @@ ENTRY(machine_check)
15617 pushl_cfi machine_check_vector
15618 jmp error_code
15619 CFI_ENDPROC
15620 -END(machine_check)
15621 +ENDPROC(machine_check)
15622 #endif
15623
15624 ENTRY(spurious_interrupt_bug)
15625 @@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15626 pushl_cfi $do_spurious_interrupt_bug
15627 jmp error_code
15628 CFI_ENDPROC
15629 -END(spurious_interrupt_bug)
15630 +ENDPROC(spurious_interrupt_bug)
15631 /*
15632 * End of kprobes section
15633 */
15634 @@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15635
15636 ENTRY(mcount)
15637 ret
15638 -END(mcount)
15639 +ENDPROC(mcount)
15640
15641 ENTRY(ftrace_caller)
15642 cmpl $0, function_trace_stop
15643 @@ -1141,7 +1388,7 @@ ftrace_graph_call:
15644 .globl ftrace_stub
15645 ftrace_stub:
15646 ret
15647 -END(ftrace_caller)
15648 +ENDPROC(ftrace_caller)
15649
15650 #else /* ! CONFIG_DYNAMIC_FTRACE */
15651
15652 @@ -1177,7 +1424,7 @@ trace:
15653 popl %ecx
15654 popl %eax
15655 jmp ftrace_stub
15656 -END(mcount)
15657 +ENDPROC(mcount)
15658 #endif /* CONFIG_DYNAMIC_FTRACE */
15659 #endif /* CONFIG_FUNCTION_TRACER */
15660
15661 @@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15662 popl %ecx
15663 popl %eax
15664 ret
15665 -END(ftrace_graph_caller)
15666 +ENDPROC(ftrace_graph_caller)
15667
15668 .globl return_to_handler
15669 return_to_handler:
15670 @@ -1253,15 +1500,18 @@ error_code:
15671 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15672 REG_TO_PTGS %ecx
15673 SET_KERNEL_GS %ecx
15674 - movl $(__USER_DS), %ecx
15675 + movl $(__KERNEL_DS), %ecx
15676 movl %ecx, %ds
15677 movl %ecx, %es
15678 +
15679 + pax_enter_kernel
15680 +
15681 TRACE_IRQS_OFF
15682 movl %esp,%eax # pt_regs pointer
15683 call *%edi
15684 jmp ret_from_exception
15685 CFI_ENDPROC
15686 -END(page_fault)
15687 +ENDPROC(page_fault)
15688
15689 /*
15690 * Debug traps and NMI can happen at the one SYSENTER instruction
15691 @@ -1303,7 +1553,7 @@ debug_stack_correct:
15692 call do_debug
15693 jmp ret_from_exception
15694 CFI_ENDPROC
15695 -END(debug)
15696 +ENDPROC(debug)
15697
15698 /*
15699 * NMI is doubly nasty. It can happen _while_ we're handling
15700 @@ -1340,6 +1590,9 @@ nmi_stack_correct:
15701 xorl %edx,%edx # zero error code
15702 movl %esp,%eax # pt_regs pointer
15703 call do_nmi
15704 +
15705 + pax_exit_kernel
15706 +
15707 jmp restore_all_notrace
15708 CFI_ENDPROC
15709
15710 @@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15711 FIXUP_ESPFIX_STACK # %eax == %esp
15712 xorl %edx,%edx # zero error code
15713 call do_nmi
15714 +
15715 + pax_exit_kernel
15716 +
15717 RESTORE_REGS
15718 lss 12+4(%esp), %esp # back to espfix stack
15719 CFI_ADJUST_CFA_OFFSET -24
15720 jmp irq_return
15721 CFI_ENDPROC
15722 -END(nmi)
15723 +ENDPROC(nmi)
15724
15725 ENTRY(int3)
15726 RING0_INT_FRAME
15727 @@ -1393,14 +1649,14 @@ ENTRY(int3)
15728 call do_int3
15729 jmp ret_from_exception
15730 CFI_ENDPROC
15731 -END(int3)
15732 +ENDPROC(int3)
15733
15734 ENTRY(general_protection)
15735 RING0_EC_FRAME
15736 pushl_cfi $do_general_protection
15737 jmp error_code
15738 CFI_ENDPROC
15739 -END(general_protection)
15740 +ENDPROC(general_protection)
15741
15742 #ifdef CONFIG_KVM_GUEST
15743 ENTRY(async_page_fault)
15744 @@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15745 pushl_cfi $do_async_page_fault
15746 jmp error_code
15747 CFI_ENDPROC
15748 -END(async_page_fault)
15749 +ENDPROC(async_page_fault)
15750 #endif
15751
15752 /*
15753 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15754 index 1333d98..b340ca2 100644
15755 --- a/arch/x86/kernel/entry_64.S
15756 +++ b/arch/x86/kernel/entry_64.S
15757 @@ -56,6 +56,8 @@
15758 #include <asm/ftrace.h>
15759 #include <asm/percpu.h>
15760 #include <linux/err.h>
15761 +#include <asm/pgtable.h>
15762 +#include <asm/alternative-asm.h>
15763
15764 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15765 #include <linux/elf-em.h>
15766 @@ -69,8 +71,9 @@
15767 #ifdef CONFIG_FUNCTION_TRACER
15768 #ifdef CONFIG_DYNAMIC_FTRACE
15769 ENTRY(mcount)
15770 + pax_force_retaddr
15771 retq
15772 -END(mcount)
15773 +ENDPROC(mcount)
15774
15775 ENTRY(ftrace_caller)
15776 cmpl $0, function_trace_stop
15777 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15778 #endif
15779
15780 GLOBAL(ftrace_stub)
15781 + pax_force_retaddr
15782 retq
15783 -END(ftrace_caller)
15784 +ENDPROC(ftrace_caller)
15785
15786 #else /* ! CONFIG_DYNAMIC_FTRACE */
15787 ENTRY(mcount)
15788 @@ -113,6 +117,7 @@ ENTRY(mcount)
15789 #endif
15790
15791 GLOBAL(ftrace_stub)
15792 + pax_force_retaddr
15793 retq
15794
15795 trace:
15796 @@ -122,12 +127,13 @@ trace:
15797 movq 8(%rbp), %rsi
15798 subq $MCOUNT_INSN_SIZE, %rdi
15799
15800 + pax_force_fptr ftrace_trace_function
15801 call *ftrace_trace_function
15802
15803 MCOUNT_RESTORE_FRAME
15804
15805 jmp ftrace_stub
15806 -END(mcount)
15807 +ENDPROC(mcount)
15808 #endif /* CONFIG_DYNAMIC_FTRACE */
15809 #endif /* CONFIG_FUNCTION_TRACER */
15810
15811 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15812
15813 MCOUNT_RESTORE_FRAME
15814
15815 + pax_force_retaddr
15816 retq
15817 -END(ftrace_graph_caller)
15818 +ENDPROC(ftrace_graph_caller)
15819
15820 GLOBAL(return_to_handler)
15821 subq $24, %rsp
15822 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15823 movq 8(%rsp), %rdx
15824 movq (%rsp), %rax
15825 addq $24, %rsp
15826 + pax_force_fptr %rdi
15827 jmp *%rdi
15828 #endif
15829
15830 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15831 ENDPROC(native_usergs_sysret64)
15832 #endif /* CONFIG_PARAVIRT */
15833
15834 + .macro ljmpq sel, off
15835 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15836 + .byte 0x48; ljmp *1234f(%rip)
15837 + .pushsection .rodata
15838 + .align 16
15839 + 1234: .quad \off; .word \sel
15840 + .popsection
15841 +#else
15842 + pushq $\sel
15843 + pushq $\off
15844 + lretq
15845 +#endif
15846 + .endm
15847 +
15848 + .macro pax_enter_kernel
15849 + pax_set_fptr_mask
15850 +#ifdef CONFIG_PAX_KERNEXEC
15851 + call pax_enter_kernel
15852 +#endif
15853 + .endm
15854 +
15855 + .macro pax_exit_kernel
15856 +#ifdef CONFIG_PAX_KERNEXEC
15857 + call pax_exit_kernel
15858 +#endif
15859 + .endm
15860 +
15861 +#ifdef CONFIG_PAX_KERNEXEC
15862 +ENTRY(pax_enter_kernel)
15863 + pushq %rdi
15864 +
15865 +#ifdef CONFIG_PARAVIRT
15866 + PV_SAVE_REGS(CLBR_RDI)
15867 +#endif
15868 +
15869 + GET_CR0_INTO_RDI
15870 + bts $16,%rdi
15871 + jnc 3f
15872 + mov %cs,%edi
15873 + cmp $__KERNEL_CS,%edi
15874 + jnz 2f
15875 +1:
15876 +
15877 +#ifdef CONFIG_PARAVIRT
15878 + PV_RESTORE_REGS(CLBR_RDI)
15879 +#endif
15880 +
15881 + popq %rdi
15882 + pax_force_retaddr
15883 + retq
15884 +
15885 +2: ljmpq __KERNEL_CS,1f
15886 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15887 +4: SET_RDI_INTO_CR0
15888 + jmp 1b
15889 +ENDPROC(pax_enter_kernel)
15890 +
15891 +ENTRY(pax_exit_kernel)
15892 + pushq %rdi
15893 +
15894 +#ifdef CONFIG_PARAVIRT
15895 + PV_SAVE_REGS(CLBR_RDI)
15896 +#endif
15897 +
15898 + mov %cs,%rdi
15899 + cmp $__KERNEXEC_KERNEL_CS,%edi
15900 + jz 2f
15901 +1:
15902 +
15903 +#ifdef CONFIG_PARAVIRT
15904 + PV_RESTORE_REGS(CLBR_RDI);
15905 +#endif
15906 +
15907 + popq %rdi
15908 + pax_force_retaddr
15909 + retq
15910 +
15911 +2: GET_CR0_INTO_RDI
15912 + btr $16,%rdi
15913 + ljmpq __KERNEL_CS,3f
15914 +3: SET_RDI_INTO_CR0
15915 + jmp 1b
15916 +#ifdef CONFIG_PARAVIRT
15917 + PV_RESTORE_REGS(CLBR_RDI);
15918 +#endif
15919 +
15920 + popq %rdi
15921 + pax_force_retaddr
15922 + retq
15923 +ENDPROC(pax_exit_kernel)
15924 +#endif
15925 +
15926 + .macro pax_enter_kernel_user
15927 + pax_set_fptr_mask
15928 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15929 + call pax_enter_kernel_user
15930 +#endif
15931 + .endm
15932 +
15933 + .macro pax_exit_kernel_user
15934 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15935 + call pax_exit_kernel_user
15936 +#endif
15937 +#ifdef CONFIG_PAX_RANDKSTACK
15938 + pushq %rax
15939 + call pax_randomize_kstack
15940 + popq %rax
15941 +#endif
15942 + .endm
15943 +
15944 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15945 +ENTRY(pax_enter_kernel_user)
15946 + pushq %rdi
15947 + pushq %rbx
15948 +
15949 +#ifdef CONFIG_PARAVIRT
15950 + PV_SAVE_REGS(CLBR_RDI)
15951 +#endif
15952 +
15953 + GET_CR3_INTO_RDI
15954 + mov %rdi,%rbx
15955 + add $__START_KERNEL_map,%rbx
15956 + sub phys_base(%rip),%rbx
15957 +
15958 +#ifdef CONFIG_PARAVIRT
15959 + pushq %rdi
15960 + cmpl $0, pv_info+PARAVIRT_enabled
15961 + jz 1f
15962 + i = 0
15963 + .rept USER_PGD_PTRS
15964 + mov i*8(%rbx),%rsi
15965 + mov $0,%sil
15966 + lea i*8(%rbx),%rdi
15967 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15968 + i = i + 1
15969 + .endr
15970 + jmp 2f
15971 +1:
15972 +#endif
15973 +
15974 + i = 0
15975 + .rept USER_PGD_PTRS
15976 + movb $0,i*8(%rbx)
15977 + i = i + 1
15978 + .endr
15979 +
15980 +#ifdef CONFIG_PARAVIRT
15981 +2: popq %rdi
15982 +#endif
15983 + SET_RDI_INTO_CR3
15984 +
15985 +#ifdef CONFIG_PAX_KERNEXEC
15986 + GET_CR0_INTO_RDI
15987 + bts $16,%rdi
15988 + SET_RDI_INTO_CR0
15989 +#endif
15990 +
15991 +#ifdef CONFIG_PARAVIRT
15992 + PV_RESTORE_REGS(CLBR_RDI)
15993 +#endif
15994 +
15995 + popq %rbx
15996 + popq %rdi
15997 + pax_force_retaddr
15998 + retq
15999 +ENDPROC(pax_enter_kernel_user)
16000 +
16001 +ENTRY(pax_exit_kernel_user)
16002 + push %rdi
16003 +
16004 +#ifdef CONFIG_PARAVIRT
16005 + pushq %rbx
16006 + PV_SAVE_REGS(CLBR_RDI)
16007 +#endif
16008 +
16009 +#ifdef CONFIG_PAX_KERNEXEC
16010 + GET_CR0_INTO_RDI
16011 + btr $16,%rdi
16012 + SET_RDI_INTO_CR0
16013 +#endif
16014 +
16015 + GET_CR3_INTO_RDI
16016 + add $__START_KERNEL_map,%rdi
16017 + sub phys_base(%rip),%rdi
16018 +
16019 +#ifdef CONFIG_PARAVIRT
16020 + cmpl $0, pv_info+PARAVIRT_enabled
16021 + jz 1f
16022 + mov %rdi,%rbx
16023 + i = 0
16024 + .rept USER_PGD_PTRS
16025 + mov i*8(%rbx),%rsi
16026 + mov $0x67,%sil
16027 + lea i*8(%rbx),%rdi
16028 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16029 + i = i + 1
16030 + .endr
16031 + jmp 2f
16032 +1:
16033 +#endif
16034 +
16035 + i = 0
16036 + .rept USER_PGD_PTRS
16037 + movb $0x67,i*8(%rdi)
16038 + i = i + 1
16039 + .endr
16040 +
16041 +#ifdef CONFIG_PARAVIRT
16042 +2: PV_RESTORE_REGS(CLBR_RDI)
16043 + popq %rbx
16044 +#endif
16045 +
16046 + popq %rdi
16047 + pax_force_retaddr
16048 + retq
16049 +ENDPROC(pax_exit_kernel_user)
16050 +#endif
16051 +
16052 +.macro pax_erase_kstack
16053 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16054 + call pax_erase_kstack
16055 +#endif
16056 +.endm
16057 +
16058 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16059 +/*
16060 + * r11: thread_info
16061 + * rcx, rdx: can be clobbered
16062 + */
16063 +ENTRY(pax_erase_kstack)
16064 + pushq %rdi
16065 + pushq %rax
16066 + pushq %r11
16067 +
16068 + GET_THREAD_INFO(%r11)
16069 + mov TI_lowest_stack(%r11), %rdi
16070 + mov $-0xBEEF, %rax
16071 + std
16072 +
16073 +1: mov %edi, %ecx
16074 + and $THREAD_SIZE_asm - 1, %ecx
16075 + shr $3, %ecx
16076 + repne scasq
16077 + jecxz 2f
16078 +
16079 + cmp $2*8, %ecx
16080 + jc 2f
16081 +
16082 + mov $2*8, %ecx
16083 + repe scasq
16084 + jecxz 2f
16085 + jne 1b
16086 +
16087 +2: cld
16088 + mov %esp, %ecx
16089 + sub %edi, %ecx
16090 +
16091 + cmp $THREAD_SIZE_asm, %rcx
16092 + jb 3f
16093 + ud2
16094 +3:
16095 +
16096 + shr $3, %ecx
16097 + rep stosq
16098 +
16099 + mov TI_task_thread_sp0(%r11), %rdi
16100 + sub $256, %rdi
16101 + mov %rdi, TI_lowest_stack(%r11)
16102 +
16103 + popq %r11
16104 + popq %rax
16105 + popq %rdi
16106 + pax_force_retaddr
16107 + ret
16108 +ENDPROC(pax_erase_kstack)
16109 +#endif
16110
16111 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16112 #ifdef CONFIG_TRACE_IRQFLAGS
16113 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
16114 .endm
16115
16116 .macro UNFAKE_STACK_FRAME
16117 - addq $8*6, %rsp
16118 - CFI_ADJUST_CFA_OFFSET -(6*8)
16119 + addq $8*6 + ARG_SKIP, %rsp
16120 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16121 .endm
16122
16123 /*
16124 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
16125 movq %rsp, %rsi
16126
16127 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16128 - testl $3, CS(%rdi)
16129 + testb $3, CS(%rdi)
16130 je 1f
16131 SWAPGS
16132 /*
16133 @@ -356,9 +640,10 @@ ENTRY(save_rest)
16134 movq_cfi r15, R15+16
16135 movq %r11, 8(%rsp) /* return address */
16136 FIXUP_TOP_OF_STACK %r11, 16
16137 + pax_force_retaddr
16138 ret
16139 CFI_ENDPROC
16140 -END(save_rest)
16141 +ENDPROC(save_rest)
16142
16143 /* save complete stack frame */
16144 .pushsection .kprobes.text, "ax"
16145 @@ -387,9 +672,10 @@ ENTRY(save_paranoid)
16146 js 1f /* negative -> in kernel */
16147 SWAPGS
16148 xorl %ebx,%ebx
16149 -1: ret
16150 +1: pax_force_retaddr_bts
16151 + ret
16152 CFI_ENDPROC
16153 -END(save_paranoid)
16154 +ENDPROC(save_paranoid)
16155 .popsection
16156
16157 /*
16158 @@ -411,7 +697,7 @@ ENTRY(ret_from_fork)
16159
16160 RESTORE_REST
16161
16162 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16163 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16164 jz retint_restore_args
16165
16166 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16167 @@ -421,7 +707,7 @@ ENTRY(ret_from_fork)
16168 jmp ret_from_sys_call # go to the SYSRET fastpath
16169
16170 CFI_ENDPROC
16171 -END(ret_from_fork)
16172 +ENDPROC(ret_from_fork)
16173
16174 /*
16175 * System call entry. Up to 6 arguments in registers are supported.
16176 @@ -457,7 +743,7 @@ END(ret_from_fork)
16177 ENTRY(system_call)
16178 CFI_STARTPROC simple
16179 CFI_SIGNAL_FRAME
16180 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16181 + CFI_DEF_CFA rsp,0
16182 CFI_REGISTER rip,rcx
16183 /*CFI_REGISTER rflags,r11*/
16184 SWAPGS_UNSAFE_STACK
16185 @@ -470,21 +756,23 @@ GLOBAL(system_call_after_swapgs)
16186
16187 movq %rsp,PER_CPU_VAR(old_rsp)
16188 movq PER_CPU_VAR(kernel_stack),%rsp
16189 + SAVE_ARGS 8*6,0
16190 + pax_enter_kernel_user
16191 /*
16192 * No need to follow this irqs off/on section - it's straight
16193 * and short:
16194 */
16195 ENABLE_INTERRUPTS(CLBR_NONE)
16196 - SAVE_ARGS 8,0
16197 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16198 movq %rcx,RIP-ARGOFFSET(%rsp)
16199 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16200 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16201 + GET_THREAD_INFO(%rcx)
16202 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16203 jnz tracesys
16204 system_call_fastpath:
16205 cmpq $__NR_syscall_max,%rax
16206 ja badsys
16207 - movq %r10,%rcx
16208 + movq R10-ARGOFFSET(%rsp),%rcx
16209 call *sys_call_table(,%rax,8) # XXX: rip relative
16210 movq %rax,RAX-ARGOFFSET(%rsp)
16211 /*
16212 @@ -498,10 +786,13 @@ sysret_check:
16213 LOCKDEP_SYS_EXIT
16214 DISABLE_INTERRUPTS(CLBR_NONE)
16215 TRACE_IRQS_OFF
16216 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16217 + GET_THREAD_INFO(%rcx)
16218 + movl TI_flags(%rcx),%edx
16219 andl %edi,%edx
16220 jnz sysret_careful
16221 CFI_REMEMBER_STATE
16222 + pax_exit_kernel_user
16223 + pax_erase_kstack
16224 /*
16225 * sysretq will re-enable interrupts:
16226 */
16227 @@ -553,14 +844,18 @@ badsys:
16228 * jump back to the normal fast path.
16229 */
16230 auditsys:
16231 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16232 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16233 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16234 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16235 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16236 movq %rax,%rsi /* 2nd arg: syscall number */
16237 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16238 call __audit_syscall_entry
16239 +
16240 + pax_erase_kstack
16241 +
16242 LOAD_ARGS 0 /* reload call-clobbered registers */
16243 + pax_set_fptr_mask
16244 jmp system_call_fastpath
16245
16246 /*
16247 @@ -581,7 +876,7 @@ sysret_audit:
16248 /* Do syscall tracing */
16249 tracesys:
16250 #ifdef CONFIG_AUDITSYSCALL
16251 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16252 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16253 jz auditsys
16254 #endif
16255 SAVE_REST
16256 @@ -589,16 +884,20 @@ tracesys:
16257 FIXUP_TOP_OF_STACK %rdi
16258 movq %rsp,%rdi
16259 call syscall_trace_enter
16260 +
16261 + pax_erase_kstack
16262 +
16263 /*
16264 * Reload arg registers from stack in case ptrace changed them.
16265 * We don't reload %rax because syscall_trace_enter() returned
16266 * the value it wants us to use in the table lookup.
16267 */
16268 LOAD_ARGS ARGOFFSET, 1
16269 + pax_set_fptr_mask
16270 RESTORE_REST
16271 cmpq $__NR_syscall_max,%rax
16272 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16273 - movq %r10,%rcx /* fixup for C */
16274 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16275 call *sys_call_table(,%rax,8)
16276 movq %rax,RAX-ARGOFFSET(%rsp)
16277 /* Use IRET because user could have changed frame */
16278 @@ -619,6 +918,7 @@ GLOBAL(int_with_check)
16279 andl %edi,%edx
16280 jnz int_careful
16281 andl $~TS_COMPAT,TI_status(%rcx)
16282 + pax_erase_kstack
16283 jmp retint_swapgs
16284
16285 /* Either reschedule or signal or syscall exit tracking needed. */
16286 @@ -665,7 +965,7 @@ int_restore_rest:
16287 TRACE_IRQS_OFF
16288 jmp int_with_check
16289 CFI_ENDPROC
16290 -END(system_call)
16291 +ENDPROC(system_call)
16292
16293 /*
16294 * Certain special system calls that need to save a complete full stack frame.
16295 @@ -681,7 +981,7 @@ ENTRY(\label)
16296 call \func
16297 jmp ptregscall_common
16298 CFI_ENDPROC
16299 -END(\label)
16300 +ENDPROC(\label)
16301 .endm
16302
16303 PTREGSCALL stub_clone, sys_clone, %r8
16304 @@ -699,9 +999,10 @@ ENTRY(ptregscall_common)
16305 movq_cfi_restore R12+8, r12
16306 movq_cfi_restore RBP+8, rbp
16307 movq_cfi_restore RBX+8, rbx
16308 + pax_force_retaddr
16309 ret $REST_SKIP /* pop extended registers */
16310 CFI_ENDPROC
16311 -END(ptregscall_common)
16312 +ENDPROC(ptregscall_common)
16313
16314 ENTRY(stub_execve)
16315 CFI_STARTPROC
16316 @@ -716,7 +1017,7 @@ ENTRY(stub_execve)
16317 RESTORE_REST
16318 jmp int_ret_from_sys_call
16319 CFI_ENDPROC
16320 -END(stub_execve)
16321 +ENDPROC(stub_execve)
16322
16323 /*
16324 * sigreturn is special because it needs to restore all registers on return.
16325 @@ -734,7 +1035,7 @@ ENTRY(stub_rt_sigreturn)
16326 RESTORE_REST
16327 jmp int_ret_from_sys_call
16328 CFI_ENDPROC
16329 -END(stub_rt_sigreturn)
16330 +ENDPROC(stub_rt_sigreturn)
16331
16332 /*
16333 * Build the entry stubs and pointer table with some assembler magic.
16334 @@ -769,7 +1070,7 @@ vector=vector+1
16335 2: jmp common_interrupt
16336 .endr
16337 CFI_ENDPROC
16338 -END(irq_entries_start)
16339 +ENDPROC(irq_entries_start)
16340
16341 .previous
16342 END(interrupt)
16343 @@ -789,6 +1090,16 @@ END(interrupt)
16344 subq $ORIG_RAX-RBP, %rsp
16345 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16346 SAVE_ARGS_IRQ
16347 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16348 + testb $3, CS(%rdi)
16349 + jnz 1f
16350 + pax_enter_kernel
16351 + jmp 2f
16352 +1: pax_enter_kernel_user
16353 +2:
16354 +#else
16355 + pax_enter_kernel
16356 +#endif
16357 call \func
16358 .endm
16359
16360 @@ -820,7 +1131,7 @@ ret_from_intr:
16361
16362 exit_intr:
16363 GET_THREAD_INFO(%rcx)
16364 - testl $3,CS-ARGOFFSET(%rsp)
16365 + testb $3,CS-ARGOFFSET(%rsp)
16366 je retint_kernel
16367
16368 /* Interrupt came from user space */
16369 @@ -842,12 +1153,15 @@ retint_swapgs: /* return to user-space */
16370 * The iretq could re-enable interrupts:
16371 */
16372 DISABLE_INTERRUPTS(CLBR_ANY)
16373 + pax_exit_kernel_user
16374 TRACE_IRQS_IRETQ
16375 SWAPGS
16376 jmp restore_args
16377
16378 retint_restore_args: /* return to kernel space */
16379 DISABLE_INTERRUPTS(CLBR_ANY)
16380 + pax_exit_kernel
16381 + pax_force_retaddr RIP-ARGOFFSET
16382 /*
16383 * The iretq could re-enable interrupts:
16384 */
16385 @@ -936,7 +1250,7 @@ ENTRY(retint_kernel)
16386 #endif
16387
16388 CFI_ENDPROC
16389 -END(common_interrupt)
16390 +ENDPROC(common_interrupt)
16391 /*
16392 * End of kprobes section
16393 */
16394 @@ -953,7 +1267,7 @@ ENTRY(\sym)
16395 interrupt \do_sym
16396 jmp ret_from_intr
16397 CFI_ENDPROC
16398 -END(\sym)
16399 +ENDPROC(\sym)
16400 .endm
16401
16402 #ifdef CONFIG_SMP
16403 @@ -1026,12 +1340,22 @@ ENTRY(\sym)
16404 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16405 call error_entry
16406 DEFAULT_FRAME 0
16407 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16408 + testb $3, CS(%rsp)
16409 + jnz 1f
16410 + pax_enter_kernel
16411 + jmp 2f
16412 +1: pax_enter_kernel_user
16413 +2:
16414 +#else
16415 + pax_enter_kernel
16416 +#endif
16417 movq %rsp,%rdi /* pt_regs pointer */
16418 xorl %esi,%esi /* no error code */
16419 call \do_sym
16420 jmp error_exit /* %ebx: no swapgs flag */
16421 CFI_ENDPROC
16422 -END(\sym)
16423 +ENDPROC(\sym)
16424 .endm
16425
16426 .macro paranoidzeroentry sym do_sym
16427 @@ -1043,15 +1367,25 @@ ENTRY(\sym)
16428 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16429 call save_paranoid
16430 TRACE_IRQS_OFF
16431 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16432 + testb $3, CS(%rsp)
16433 + jnz 1f
16434 + pax_enter_kernel
16435 + jmp 2f
16436 +1: pax_enter_kernel_user
16437 +2:
16438 +#else
16439 + pax_enter_kernel
16440 +#endif
16441 movq %rsp,%rdi /* pt_regs pointer */
16442 xorl %esi,%esi /* no error code */
16443 call \do_sym
16444 jmp paranoid_exit /* %ebx: no swapgs flag */
16445 CFI_ENDPROC
16446 -END(\sym)
16447 +ENDPROC(\sym)
16448 .endm
16449
16450 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16451 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16452 .macro paranoidzeroentry_ist sym do_sym ist
16453 ENTRY(\sym)
16454 INTR_FRAME
16455 @@ -1061,14 +1395,30 @@ ENTRY(\sym)
16456 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16457 call save_paranoid
16458 TRACE_IRQS_OFF
16459 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16460 + testb $3, CS(%rsp)
16461 + jnz 1f
16462 + pax_enter_kernel
16463 + jmp 2f
16464 +1: pax_enter_kernel_user
16465 +2:
16466 +#else
16467 + pax_enter_kernel
16468 +#endif
16469 movq %rsp,%rdi /* pt_regs pointer */
16470 xorl %esi,%esi /* no error code */
16471 +#ifdef CONFIG_SMP
16472 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16473 + lea init_tss(%r12), %r12
16474 +#else
16475 + lea init_tss(%rip), %r12
16476 +#endif
16477 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16478 call \do_sym
16479 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16480 jmp paranoid_exit /* %ebx: no swapgs flag */
16481 CFI_ENDPROC
16482 -END(\sym)
16483 +ENDPROC(\sym)
16484 .endm
16485
16486 .macro errorentry sym do_sym
16487 @@ -1079,13 +1429,23 @@ ENTRY(\sym)
16488 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16489 call error_entry
16490 DEFAULT_FRAME 0
16491 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16492 + testb $3, CS(%rsp)
16493 + jnz 1f
16494 + pax_enter_kernel
16495 + jmp 2f
16496 +1: pax_enter_kernel_user
16497 +2:
16498 +#else
16499 + pax_enter_kernel
16500 +#endif
16501 movq %rsp,%rdi /* pt_regs pointer */
16502 movq ORIG_RAX(%rsp),%rsi /* get error code */
16503 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16504 call \do_sym
16505 jmp error_exit /* %ebx: no swapgs flag */
16506 CFI_ENDPROC
16507 -END(\sym)
16508 +ENDPROC(\sym)
16509 .endm
16510
16511 /* error code is on the stack already */
16512 @@ -1098,13 +1458,23 @@ ENTRY(\sym)
16513 call save_paranoid
16514 DEFAULT_FRAME 0
16515 TRACE_IRQS_OFF
16516 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16517 + testb $3, CS(%rsp)
16518 + jnz 1f
16519 + pax_enter_kernel
16520 + jmp 2f
16521 +1: pax_enter_kernel_user
16522 +2:
16523 +#else
16524 + pax_enter_kernel
16525 +#endif
16526 movq %rsp,%rdi /* pt_regs pointer */
16527 movq ORIG_RAX(%rsp),%rsi /* get error code */
16528 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16529 call \do_sym
16530 jmp paranoid_exit /* %ebx: no swapgs flag */
16531 CFI_ENDPROC
16532 -END(\sym)
16533 +ENDPROC(\sym)
16534 .endm
16535
16536 zeroentry divide_error do_divide_error
16537 @@ -1134,9 +1504,10 @@ gs_change:
16538 2: mfence /* workaround */
16539 SWAPGS
16540 popfq_cfi
16541 + pax_force_retaddr
16542 ret
16543 CFI_ENDPROC
16544 -END(native_load_gs_index)
16545 +ENDPROC(native_load_gs_index)
16546
16547 .section __ex_table,"a"
16548 .align 8
16549 @@ -1158,13 +1529,14 @@ ENTRY(kernel_thread_helper)
16550 * Here we are in the child and the registers are set as they were
16551 * at kernel_thread() invocation in the parent.
16552 */
16553 + pax_force_fptr %rsi
16554 call *%rsi
16555 # exit
16556 mov %eax, %edi
16557 call do_exit
16558 ud2 # padding for call trace
16559 CFI_ENDPROC
16560 -END(kernel_thread_helper)
16561 +ENDPROC(kernel_thread_helper)
16562
16563 /*
16564 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16565 @@ -1191,11 +1563,11 @@ ENTRY(kernel_execve)
16566 RESTORE_REST
16567 testq %rax,%rax
16568 je int_ret_from_sys_call
16569 - RESTORE_ARGS
16570 UNFAKE_STACK_FRAME
16571 + pax_force_retaddr
16572 ret
16573 CFI_ENDPROC
16574 -END(kernel_execve)
16575 +ENDPROC(kernel_execve)
16576
16577 /* Call softirq on interrupt stack. Interrupts are off. */
16578 ENTRY(call_softirq)
16579 @@ -1213,9 +1585,10 @@ ENTRY(call_softirq)
16580 CFI_DEF_CFA_REGISTER rsp
16581 CFI_ADJUST_CFA_OFFSET -8
16582 decl PER_CPU_VAR(irq_count)
16583 + pax_force_retaddr
16584 ret
16585 CFI_ENDPROC
16586 -END(call_softirq)
16587 +ENDPROC(call_softirq)
16588
16589 #ifdef CONFIG_XEN
16590 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16591 @@ -1253,7 +1626,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16592 decl PER_CPU_VAR(irq_count)
16593 jmp error_exit
16594 CFI_ENDPROC
16595 -END(xen_do_hypervisor_callback)
16596 +ENDPROC(xen_do_hypervisor_callback)
16597
16598 /*
16599 * Hypervisor uses this for application faults while it executes.
16600 @@ -1312,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
16601 SAVE_ALL
16602 jmp error_exit
16603 CFI_ENDPROC
16604 -END(xen_failsafe_callback)
16605 +ENDPROC(xen_failsafe_callback)
16606
16607 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16608 xen_hvm_callback_vector xen_evtchn_do_upcall
16609 @@ -1361,16 +1734,31 @@ ENTRY(paranoid_exit)
16610 TRACE_IRQS_OFF
16611 testl %ebx,%ebx /* swapgs needed? */
16612 jnz paranoid_restore
16613 - testl $3,CS(%rsp)
16614 + testb $3,CS(%rsp)
16615 jnz paranoid_userspace
16616 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16617 + pax_exit_kernel
16618 + TRACE_IRQS_IRETQ 0
16619 + SWAPGS_UNSAFE_STACK
16620 + RESTORE_ALL 8
16621 + pax_force_retaddr_bts
16622 + jmp irq_return
16623 +#endif
16624 paranoid_swapgs:
16625 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16626 + pax_exit_kernel_user
16627 +#else
16628 + pax_exit_kernel
16629 +#endif
16630 TRACE_IRQS_IRETQ 0
16631 SWAPGS_UNSAFE_STACK
16632 RESTORE_ALL 8
16633 jmp irq_return
16634 paranoid_restore:
16635 + pax_exit_kernel
16636 TRACE_IRQS_IRETQ 0
16637 RESTORE_ALL 8
16638 + pax_force_retaddr_bts
16639 jmp irq_return
16640 paranoid_userspace:
16641 GET_THREAD_INFO(%rcx)
16642 @@ -1399,7 +1787,7 @@ paranoid_schedule:
16643 TRACE_IRQS_OFF
16644 jmp paranoid_userspace
16645 CFI_ENDPROC
16646 -END(paranoid_exit)
16647 +ENDPROC(paranoid_exit)
16648
16649 /*
16650 * Exception entry point. This expects an error code/orig_rax on the stack.
16651 @@ -1426,12 +1814,13 @@ ENTRY(error_entry)
16652 movq_cfi r14, R14+8
16653 movq_cfi r15, R15+8
16654 xorl %ebx,%ebx
16655 - testl $3,CS+8(%rsp)
16656 + testb $3,CS+8(%rsp)
16657 je error_kernelspace
16658 error_swapgs:
16659 SWAPGS
16660 error_sti:
16661 TRACE_IRQS_OFF
16662 + pax_force_retaddr_bts
16663 ret
16664
16665 /*
16666 @@ -1458,7 +1847,7 @@ bstep_iret:
16667 movq %rcx,RIP+8(%rsp)
16668 jmp error_swapgs
16669 CFI_ENDPROC
16670 -END(error_entry)
16671 +ENDPROC(error_entry)
16672
16673
16674 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16675 @@ -1478,7 +1867,7 @@ ENTRY(error_exit)
16676 jnz retint_careful
16677 jmp retint_swapgs
16678 CFI_ENDPROC
16679 -END(error_exit)
16680 +ENDPROC(error_exit)
16681
16682 /*
16683 * Test if a given stack is an NMI stack or not.
16684 @@ -1535,9 +1924,11 @@ ENTRY(nmi)
16685 * If %cs was not the kernel segment, then the NMI triggered in user
16686 * space, which means it is definitely not nested.
16687 */
16688 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16689 + je 1f
16690 cmpl $__KERNEL_CS, 16(%rsp)
16691 jne first_nmi
16692 -
16693 +1:
16694 /*
16695 * Check the special variable on the stack to see if NMIs are
16696 * executing.
16697 @@ -1659,6 +2050,16 @@ restart_nmi:
16698 */
16699 call save_paranoid
16700 DEFAULT_FRAME 0
16701 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16702 + testb $3, CS(%rsp)
16703 + jnz 1f
16704 + pax_enter_kernel
16705 + jmp 2f
16706 +1: pax_enter_kernel_user
16707 +2:
16708 +#else
16709 + pax_enter_kernel
16710 +#endif
16711 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16712 movq %rsp,%rdi
16713 movq $-1,%rsi
16714 @@ -1666,14 +2067,25 @@ restart_nmi:
16715 testl %ebx,%ebx /* swapgs needed? */
16716 jnz nmi_restore
16717 nmi_swapgs:
16718 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16719 + pax_exit_kernel_user
16720 +#else
16721 + pax_exit_kernel
16722 +#endif
16723 SWAPGS_UNSAFE_STACK
16724 + RESTORE_ALL 8
16725 + /* Clear the NMI executing stack variable */
16726 + movq $0, 10*8(%rsp)
16727 + jmp irq_return
16728 nmi_restore:
16729 + pax_exit_kernel
16730 RESTORE_ALL 8
16731 + pax_force_retaddr_bts
16732 /* Clear the NMI executing stack variable */
16733 movq $0, 10*8(%rsp)
16734 jmp irq_return
16735 CFI_ENDPROC
16736 -END(nmi)
16737 +ENDPROC(nmi)
16738
16739 /*
16740 * If an NMI hit an iret because of an exception or breakpoint,
16741 @@ -1700,7 +2112,7 @@ ENTRY(ignore_sysret)
16742 mov $-ENOSYS,%eax
16743 sysret
16744 CFI_ENDPROC
16745 -END(ignore_sysret)
16746 +ENDPROC(ignore_sysret)
16747
16748 /*
16749 * End of kprobes section
16750 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16751 index c9a281f..ce2f317 100644
16752 --- a/arch/x86/kernel/ftrace.c
16753 +++ b/arch/x86/kernel/ftrace.c
16754 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16755 static const void *mod_code_newcode; /* holds the text to write to the IP */
16756
16757 static unsigned nmi_wait_count;
16758 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16759 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16760
16761 int ftrace_arch_read_dyn_info(char *buf, int size)
16762 {
16763 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16764
16765 r = snprintf(buf, size, "%u %u",
16766 nmi_wait_count,
16767 - atomic_read(&nmi_update_count));
16768 + atomic_read_unchecked(&nmi_update_count));
16769 return r;
16770 }
16771
16772 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16773
16774 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16775 smp_rmb();
16776 + pax_open_kernel();
16777 ftrace_mod_code();
16778 - atomic_inc(&nmi_update_count);
16779 + pax_close_kernel();
16780 + atomic_inc_unchecked(&nmi_update_count);
16781 }
16782 /* Must have previous changes seen before executions */
16783 smp_mb();
16784 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16785 {
16786 unsigned char replaced[MCOUNT_INSN_SIZE];
16787
16788 + ip = ktla_ktva(ip);
16789 +
16790 /*
16791 * Note: Due to modules and __init, code can
16792 * disappear and change, we need to protect against faulting
16793 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16794 unsigned char old[MCOUNT_INSN_SIZE], *new;
16795 int ret;
16796
16797 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16798 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16799 new = ftrace_call_replace(ip, (unsigned long)func);
16800 ret = ftrace_modify_code(ip, old, new);
16801
16802 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16803 {
16804 unsigned char code[MCOUNT_INSN_SIZE];
16805
16806 + ip = ktla_ktva(ip);
16807 +
16808 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16809 return -EFAULT;
16810
16811 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16812 index 51ff186..9e77418 100644
16813 --- a/arch/x86/kernel/head32.c
16814 +++ b/arch/x86/kernel/head32.c
16815 @@ -19,6 +19,7 @@
16816 #include <asm/io_apic.h>
16817 #include <asm/bios_ebda.h>
16818 #include <asm/tlbflush.h>
16819 +#include <asm/boot.h>
16820
16821 static void __init i386_default_early_setup(void)
16822 {
16823 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16824
16825 void __init i386_start_kernel(void)
16826 {
16827 - memblock_reserve(__pa_symbol(&_text),
16828 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16829 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16830
16831 #ifdef CONFIG_BLK_DEV_INITRD
16832 /* Reserve INITRD */
16833 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16834 index ce0be7c..c41476e 100644
16835 --- a/arch/x86/kernel/head_32.S
16836 +++ b/arch/x86/kernel/head_32.S
16837 @@ -25,6 +25,12 @@
16838 /* Physical address */
16839 #define pa(X) ((X) - __PAGE_OFFSET)
16840
16841 +#ifdef CONFIG_PAX_KERNEXEC
16842 +#define ta(X) (X)
16843 +#else
16844 +#define ta(X) ((X) - __PAGE_OFFSET)
16845 +#endif
16846 +
16847 /*
16848 * References to members of the new_cpu_data structure.
16849 */
16850 @@ -54,11 +60,7 @@
16851 * and small than max_low_pfn, otherwise will waste some page table entries
16852 */
16853
16854 -#if PTRS_PER_PMD > 1
16855 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16856 -#else
16857 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16858 -#endif
16859 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16860
16861 /* Number of possible pages in the lowmem region */
16862 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16863 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16864 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16865
16866 /*
16867 + * Real beginning of normal "text" segment
16868 + */
16869 +ENTRY(stext)
16870 +ENTRY(_stext)
16871 +
16872 +/*
16873 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16874 * %esi points to the real-mode code as a 32-bit pointer.
16875 * CS and DS must be 4 GB flat segments, but we don't depend on
16876 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16877 * can.
16878 */
16879 __HEAD
16880 +
16881 +#ifdef CONFIG_PAX_KERNEXEC
16882 + jmp startup_32
16883 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16884 +.fill PAGE_SIZE-5,1,0xcc
16885 +#endif
16886 +
16887 ENTRY(startup_32)
16888 movl pa(stack_start),%ecx
16889
16890 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16891 2:
16892 leal -__PAGE_OFFSET(%ecx),%esp
16893
16894 +#ifdef CONFIG_SMP
16895 + movl $pa(cpu_gdt_table),%edi
16896 + movl $__per_cpu_load,%eax
16897 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16898 + rorl $16,%eax
16899 + movb %al,__KERNEL_PERCPU + 4(%edi)
16900 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16901 + movl $__per_cpu_end - 1,%eax
16902 + subl $__per_cpu_start,%eax
16903 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16904 +#endif
16905 +
16906 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16907 + movl $NR_CPUS,%ecx
16908 + movl $pa(cpu_gdt_table),%edi
16909 +1:
16910 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16911 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16912 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16913 + addl $PAGE_SIZE_asm,%edi
16914 + loop 1b
16915 +#endif
16916 +
16917 +#ifdef CONFIG_PAX_KERNEXEC
16918 + movl $pa(boot_gdt),%edi
16919 + movl $__LOAD_PHYSICAL_ADDR,%eax
16920 + movw %ax,__BOOT_CS + 2(%edi)
16921 + rorl $16,%eax
16922 + movb %al,__BOOT_CS + 4(%edi)
16923 + movb %ah,__BOOT_CS + 7(%edi)
16924 + rorl $16,%eax
16925 +
16926 + ljmp $(__BOOT_CS),$1f
16927 +1:
16928 +
16929 + movl $NR_CPUS,%ecx
16930 + movl $pa(cpu_gdt_table),%edi
16931 + addl $__PAGE_OFFSET,%eax
16932 +1:
16933 + movw %ax,__KERNEL_CS + 2(%edi)
16934 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16935 + rorl $16,%eax
16936 + movb %al,__KERNEL_CS + 4(%edi)
16937 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16938 + movb %ah,__KERNEL_CS + 7(%edi)
16939 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16940 + rorl $16,%eax
16941 + addl $PAGE_SIZE_asm,%edi
16942 + loop 1b
16943 +#endif
16944 +
16945 /*
16946 * Clear BSS first so that there are no surprises...
16947 */
16948 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16949 movl %eax, pa(max_pfn_mapped)
16950
16951 /* Do early initialization of the fixmap area */
16952 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16953 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16954 +#ifdef CONFIG_COMPAT_VDSO
16955 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16956 +#else
16957 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16958 +#endif
16959 #else /* Not PAE */
16960
16961 page_pde_offset = (__PAGE_OFFSET >> 20);
16962 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16963 movl %eax, pa(max_pfn_mapped)
16964
16965 /* Do early initialization of the fixmap area */
16966 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16967 - movl %eax,pa(initial_page_table+0xffc)
16968 +#ifdef CONFIG_COMPAT_VDSO
16969 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16970 +#else
16971 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16972 +#endif
16973 #endif
16974
16975 #ifdef CONFIG_PARAVIRT
16976 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16977 cmpl $num_subarch_entries, %eax
16978 jae bad_subarch
16979
16980 - movl pa(subarch_entries)(,%eax,4), %eax
16981 - subl $__PAGE_OFFSET, %eax
16982 - jmp *%eax
16983 + jmp *pa(subarch_entries)(,%eax,4)
16984
16985 bad_subarch:
16986 WEAK(lguest_entry)
16987 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16988 __INITDATA
16989
16990 subarch_entries:
16991 - .long default_entry /* normal x86/PC */
16992 - .long lguest_entry /* lguest hypervisor */
16993 - .long xen_entry /* Xen hypervisor */
16994 - .long default_entry /* Moorestown MID */
16995 + .long ta(default_entry) /* normal x86/PC */
16996 + .long ta(lguest_entry) /* lguest hypervisor */
16997 + .long ta(xen_entry) /* Xen hypervisor */
16998 + .long ta(default_entry) /* Moorestown MID */
16999 num_subarch_entries = (. - subarch_entries) / 4
17000 .previous
17001 #else
17002 @@ -312,6 +382,7 @@ default_entry:
17003 orl %edx,%eax
17004 movl %eax,%cr4
17005
17006 +#ifdef CONFIG_X86_PAE
17007 testb $X86_CR4_PAE, %al # check if PAE is enabled
17008 jz 6f
17009
17010 @@ -340,6 +411,9 @@ default_entry:
17011 /* Make changes effective */
17012 wrmsr
17013
17014 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17015 +#endif
17016 +
17017 6:
17018
17019 /*
17020 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
17021 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17022 movl %eax,%ss # after changing gdt.
17023
17024 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
17025 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17026 movl %eax,%ds
17027 movl %eax,%es
17028
17029 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
17030 */
17031 cmpb $0,ready
17032 jne 1f
17033 - movl $gdt_page,%eax
17034 + movl $cpu_gdt_table,%eax
17035 movl $stack_canary,%ecx
17036 +#ifdef CONFIG_SMP
17037 + addl $__per_cpu_load,%ecx
17038 +#endif
17039 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17040 shrl $16, %ecx
17041 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17042 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17043 1:
17044 -#endif
17045 movl $(__KERNEL_STACK_CANARY),%eax
17046 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17047 + movl $(__USER_DS),%eax
17048 +#else
17049 + xorl %eax,%eax
17050 +#endif
17051 movl %eax,%gs
17052
17053 xorl %eax,%eax # Clear LDT
17054 @@ -558,22 +639,22 @@ early_page_fault:
17055 jmp early_fault
17056
17057 early_fault:
17058 - cld
17059 #ifdef CONFIG_PRINTK
17060 + cmpl $1,%ss:early_recursion_flag
17061 + je hlt_loop
17062 + incl %ss:early_recursion_flag
17063 + cld
17064 pusha
17065 movl $(__KERNEL_DS),%eax
17066 movl %eax,%ds
17067 movl %eax,%es
17068 - cmpl $2,early_recursion_flag
17069 - je hlt_loop
17070 - incl early_recursion_flag
17071 movl %cr2,%eax
17072 pushl %eax
17073 pushl %edx /* trapno */
17074 pushl $fault_msg
17075 call printk
17076 +; call dump_stack
17077 #endif
17078 - call dump_stack
17079 hlt_loop:
17080 hlt
17081 jmp hlt_loop
17082 @@ -581,8 +662,11 @@ hlt_loop:
17083 /* This is the default interrupt "handler" :-) */
17084 ALIGN
17085 ignore_int:
17086 - cld
17087 #ifdef CONFIG_PRINTK
17088 + cmpl $2,%ss:early_recursion_flag
17089 + je hlt_loop
17090 + incl %ss:early_recursion_flag
17091 + cld
17092 pushl %eax
17093 pushl %ecx
17094 pushl %edx
17095 @@ -591,9 +675,6 @@ ignore_int:
17096 movl $(__KERNEL_DS),%eax
17097 movl %eax,%ds
17098 movl %eax,%es
17099 - cmpl $2,early_recursion_flag
17100 - je hlt_loop
17101 - incl early_recursion_flag
17102 pushl 16(%esp)
17103 pushl 24(%esp)
17104 pushl 32(%esp)
17105 @@ -622,29 +703,43 @@ ENTRY(initial_code)
17106 /*
17107 * BSS section
17108 */
17109 -__PAGE_ALIGNED_BSS
17110 - .align PAGE_SIZE
17111 #ifdef CONFIG_X86_PAE
17112 +.section .initial_pg_pmd,"a",@progbits
17113 initial_pg_pmd:
17114 .fill 1024*KPMDS,4,0
17115 #else
17116 +.section .initial_page_table,"a",@progbits
17117 ENTRY(initial_page_table)
17118 .fill 1024,4,0
17119 #endif
17120 +.section .initial_pg_fixmap,"a",@progbits
17121 initial_pg_fixmap:
17122 .fill 1024,4,0
17123 +.section .empty_zero_page,"a",@progbits
17124 ENTRY(empty_zero_page)
17125 .fill 4096,1,0
17126 +.section .swapper_pg_dir,"a",@progbits
17127 ENTRY(swapper_pg_dir)
17128 +#ifdef CONFIG_X86_PAE
17129 + .fill 4,8,0
17130 +#else
17131 .fill 1024,4,0
17132 +#endif
17133 +
17134 +/*
17135 + * The IDT has to be page-aligned to simplify the Pentium
17136 + * F0 0F bug workaround.. We have a special link segment
17137 + * for this.
17138 + */
17139 +.section .idt,"a",@progbits
17140 +ENTRY(idt_table)
17141 + .fill 256,8,0
17142
17143 /*
17144 * This starts the data section.
17145 */
17146 #ifdef CONFIG_X86_PAE
17147 -__PAGE_ALIGNED_DATA
17148 - /* Page-aligned for the benefit of paravirt? */
17149 - .align PAGE_SIZE
17150 +.section .initial_page_table,"a",@progbits
17151 ENTRY(initial_page_table)
17152 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17153 # if KPMDS == 3
17154 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17155 # error "Kernel PMDs should be 1, 2 or 3"
17156 # endif
17157 .align PAGE_SIZE /* needs to be page-sized too */
17158 +
17159 +#ifdef CONFIG_PAX_PER_CPU_PGD
17160 +ENTRY(cpu_pgd)
17161 + .rept NR_CPUS
17162 + .fill 4,8,0
17163 + .endr
17164 +#endif
17165 +
17166 #endif
17167
17168 .data
17169 .balign 4
17170 ENTRY(stack_start)
17171 - .long init_thread_union+THREAD_SIZE
17172 + .long init_thread_union+THREAD_SIZE-8
17173
17174 +ready: .byte 0
17175 +
17176 +.section .rodata,"a",@progbits
17177 early_recursion_flag:
17178 .long 0
17179
17180 -ready: .byte 0
17181 -
17182 int_msg:
17183 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17184
17185 @@ -707,7 +811,7 @@ fault_msg:
17186 .word 0 # 32 bit align gdt_desc.address
17187 boot_gdt_descr:
17188 .word __BOOT_DS+7
17189 - .long boot_gdt - __PAGE_OFFSET
17190 + .long pa(boot_gdt)
17191
17192 .word 0 # 32-bit align idt_desc.address
17193 idt_descr:
17194 @@ -718,7 +822,7 @@ idt_descr:
17195 .word 0 # 32 bit align gdt_desc.address
17196 ENTRY(early_gdt_descr)
17197 .word GDT_ENTRIES*8-1
17198 - .long gdt_page /* Overwritten for secondary CPUs */
17199 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17200
17201 /*
17202 * The boot_gdt must mirror the equivalent in setup.S and is
17203 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17204 .align L1_CACHE_BYTES
17205 ENTRY(boot_gdt)
17206 .fill GDT_ENTRY_BOOT_CS,8,0
17207 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17208 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17209 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17210 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17211 +
17212 + .align PAGE_SIZE_asm
17213 +ENTRY(cpu_gdt_table)
17214 + .rept NR_CPUS
17215 + .quad 0x0000000000000000 /* NULL descriptor */
17216 + .quad 0x0000000000000000 /* 0x0b reserved */
17217 + .quad 0x0000000000000000 /* 0x13 reserved */
17218 + .quad 0x0000000000000000 /* 0x1b reserved */
17219 +
17220 +#ifdef CONFIG_PAX_KERNEXEC
17221 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17222 +#else
17223 + .quad 0x0000000000000000 /* 0x20 unused */
17224 +#endif
17225 +
17226 + .quad 0x0000000000000000 /* 0x28 unused */
17227 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17228 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17229 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17230 + .quad 0x0000000000000000 /* 0x4b reserved */
17231 + .quad 0x0000000000000000 /* 0x53 reserved */
17232 + .quad 0x0000000000000000 /* 0x5b reserved */
17233 +
17234 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17235 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17236 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17237 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17238 +
17239 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17240 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17241 +
17242 + /*
17243 + * Segments used for calling PnP BIOS have byte granularity.
17244 + * The code segments and data segments have fixed 64k limits,
17245 + * the transfer segment sizes are set at run time.
17246 + */
17247 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17248 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17249 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17250 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17251 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17252 +
17253 + /*
17254 + * The APM segments have byte granularity and their bases
17255 + * are set at run time. All have 64k limits.
17256 + */
17257 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17258 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17259 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17260 +
17261 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17262 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17263 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17264 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17265 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17266 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17267 +
17268 + /* Be sure this is zeroed to avoid false validations in Xen */
17269 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17270 + .endr
17271 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17272 index 40f4eb3..6d24d9d 100644
17273 --- a/arch/x86/kernel/head_64.S
17274 +++ b/arch/x86/kernel/head_64.S
17275 @@ -19,6 +19,8 @@
17276 #include <asm/cache.h>
17277 #include <asm/processor-flags.h>
17278 #include <asm/percpu.h>
17279 +#include <asm/cpufeature.h>
17280 +#include <asm/alternative-asm.h>
17281
17282 #ifdef CONFIG_PARAVIRT
17283 #include <asm/asm-offsets.h>
17284 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17285 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17286 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17287 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17288 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17289 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17290 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17291 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17292 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17293 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17294
17295 .text
17296 __HEAD
17297 @@ -85,35 +93,23 @@ startup_64:
17298 */
17299 addq %rbp, init_level4_pgt + 0(%rip)
17300 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17301 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17302 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17303 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17304 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17305
17306 addq %rbp, level3_ident_pgt + 0(%rip)
17307 +#ifndef CONFIG_XEN
17308 + addq %rbp, level3_ident_pgt + 8(%rip)
17309 +#endif
17310
17311 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17312 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17313 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17314 +
17315 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17316 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17317
17318 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17319 -
17320 - /* Add an Identity mapping if I am above 1G */
17321 - leaq _text(%rip), %rdi
17322 - andq $PMD_PAGE_MASK, %rdi
17323 -
17324 - movq %rdi, %rax
17325 - shrq $PUD_SHIFT, %rax
17326 - andq $(PTRS_PER_PUD - 1), %rax
17327 - jz ident_complete
17328 -
17329 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17330 - leaq level3_ident_pgt(%rip), %rbx
17331 - movq %rdx, 0(%rbx, %rax, 8)
17332 -
17333 - movq %rdi, %rax
17334 - shrq $PMD_SHIFT, %rax
17335 - andq $(PTRS_PER_PMD - 1), %rax
17336 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17337 - leaq level2_spare_pgt(%rip), %rbx
17338 - movq %rdx, 0(%rbx, %rax, 8)
17339 -ident_complete:
17340 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17341
17342 /*
17343 * Fixup the kernel text+data virtual addresses. Note that
17344 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17345 * after the boot processor executes this code.
17346 */
17347
17348 - /* Enable PAE mode and PGE */
17349 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17350 + /* Enable PAE mode and PSE/PGE */
17351 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17352 movq %rax, %cr4
17353
17354 /* Setup early boot stage 4 level pagetables. */
17355 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17356 movl $MSR_EFER, %ecx
17357 rdmsr
17358 btsl $_EFER_SCE, %eax /* Enable System Call */
17359 - btl $20,%edi /* No Execute supported? */
17360 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17361 jnc 1f
17362 btsl $_EFER_NX, %eax
17363 + leaq init_level4_pgt(%rip), %rdi
17364 +#ifndef CONFIG_EFI
17365 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17366 +#endif
17367 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17368 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17369 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17370 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17371 1: wrmsr /* Make changes effective */
17372
17373 /* Setup cr0 */
17374 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17375 * jump. In addition we need to ensure %cs is set so we make this
17376 * a far return.
17377 */
17378 + pax_set_fptr_mask
17379 movq initial_code(%rip),%rax
17380 pushq $0 # fake return address to stop unwinder
17381 pushq $__KERNEL_CS # set correct cs
17382 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17383 bad_address:
17384 jmp bad_address
17385
17386 - .section ".init.text","ax"
17387 + __INIT
17388 #ifdef CONFIG_EARLY_PRINTK
17389 .globl early_idt_handlers
17390 early_idt_handlers:
17391 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17392 #endif /* EARLY_PRINTK */
17393 1: hlt
17394 jmp 1b
17395 + .previous
17396
17397 #ifdef CONFIG_EARLY_PRINTK
17398 + __INITDATA
17399 early_recursion_flag:
17400 .long 0
17401 + .previous
17402
17403 + .section .rodata,"a",@progbits
17404 early_idt_msg:
17405 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17406 early_idt_ripmsg:
17407 .asciz "RIP %s\n"
17408 + .previous
17409 #endif /* CONFIG_EARLY_PRINTK */
17410 - .previous
17411
17412 + .section .rodata,"a",@progbits
17413 #define NEXT_PAGE(name) \
17414 .balign PAGE_SIZE; \
17415 ENTRY(name)
17416 @@ -338,7 +348,6 @@ ENTRY(name)
17417 i = i + 1 ; \
17418 .endr
17419
17420 - .data
17421 /*
17422 * This default setting generates an ident mapping at address 0x100000
17423 * and a mapping for the kernel that precisely maps virtual address
17424 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17425 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17426 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17427 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17428 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17429 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17430 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17431 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17432 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17433 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17434 .org init_level4_pgt + L4_START_KERNEL*8, 0
17435 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17436 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17437
17438 +#ifdef CONFIG_PAX_PER_CPU_PGD
17439 +NEXT_PAGE(cpu_pgd)
17440 + .rept NR_CPUS
17441 + .fill 512,8,0
17442 + .endr
17443 +#endif
17444 +
17445 NEXT_PAGE(level3_ident_pgt)
17446 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17447 +#ifdef CONFIG_XEN
17448 .fill 511,8,0
17449 +#else
17450 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17451 + .fill 510,8,0
17452 +#endif
17453 +
17454 +NEXT_PAGE(level3_vmalloc_start_pgt)
17455 + .fill 512,8,0
17456 +
17457 +NEXT_PAGE(level3_vmalloc_end_pgt)
17458 + .fill 512,8,0
17459 +
17460 +NEXT_PAGE(level3_vmemmap_pgt)
17461 + .fill L3_VMEMMAP_START,8,0
17462 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17463
17464 NEXT_PAGE(level3_kernel_pgt)
17465 .fill L3_START_KERNEL,8,0
17466 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17467 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17468 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17469
17470 +NEXT_PAGE(level2_vmemmap_pgt)
17471 + .fill 512,8,0
17472 +
17473 NEXT_PAGE(level2_fixmap_pgt)
17474 - .fill 506,8,0
17475 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17476 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17477 - .fill 5,8,0
17478 + .fill 507,8,0
17479 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17480 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17481 + .fill 4,8,0
17482
17483 -NEXT_PAGE(level1_fixmap_pgt)
17484 +NEXT_PAGE(level1_vsyscall_pgt)
17485 .fill 512,8,0
17486
17487 -NEXT_PAGE(level2_ident_pgt)
17488 - /* Since I easily can, map the first 1G.
17489 + /* Since I easily can, map the first 2G.
17490 * Don't set NX because code runs from these pages.
17491 */
17492 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17493 +NEXT_PAGE(level2_ident_pgt)
17494 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17495
17496 NEXT_PAGE(level2_kernel_pgt)
17497 /*
17498 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17499 * If you want to increase this then increase MODULES_VADDR
17500 * too.)
17501 */
17502 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17503 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17504 -
17505 -NEXT_PAGE(level2_spare_pgt)
17506 - .fill 512, 8, 0
17507 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17508
17509 #undef PMDS
17510 #undef NEXT_PAGE
17511
17512 - .data
17513 + .align PAGE_SIZE
17514 +ENTRY(cpu_gdt_table)
17515 + .rept NR_CPUS
17516 + .quad 0x0000000000000000 /* NULL descriptor */
17517 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17518 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17519 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17520 + .quad 0x00cffb000000ffff /* __USER32_CS */
17521 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17522 + .quad 0x00affb000000ffff /* __USER_CS */
17523 +
17524 +#ifdef CONFIG_PAX_KERNEXEC
17525 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17526 +#else
17527 + .quad 0x0 /* unused */
17528 +#endif
17529 +
17530 + .quad 0,0 /* TSS */
17531 + .quad 0,0 /* LDT */
17532 + .quad 0,0,0 /* three TLS descriptors */
17533 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17534 + /* asm/segment.h:GDT_ENTRIES must match this */
17535 +
17536 + /* zero the remaining page */
17537 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17538 + .endr
17539 +
17540 .align 16
17541 .globl early_gdt_descr
17542 early_gdt_descr:
17543 .word GDT_ENTRIES*8-1
17544 early_gdt_descr_base:
17545 - .quad INIT_PER_CPU_VAR(gdt_page)
17546 + .quad cpu_gdt_table
17547
17548 ENTRY(phys_base)
17549 /* This must match the first entry in level2_kernel_pgt */
17550 .quad 0x0000000000000000
17551
17552 #include "../../x86/xen/xen-head.S"
17553 -
17554 - .section .bss, "aw", @nobits
17555 +
17556 + .section .rodata,"a",@progbits
17557 .align L1_CACHE_BYTES
17558 ENTRY(idt_table)
17559 - .skip IDT_ENTRIES * 16
17560 + .fill 512,8,0
17561
17562 .align L1_CACHE_BYTES
17563 ENTRY(nmi_idt_table)
17564 - .skip IDT_ENTRIES * 16
17565 + .fill 512,8,0
17566
17567 __PAGE_ALIGNED_BSS
17568 .align PAGE_SIZE
17569 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17570 index 9c3bd4a..e1d9b35 100644
17571 --- a/arch/x86/kernel/i386_ksyms_32.c
17572 +++ b/arch/x86/kernel/i386_ksyms_32.c
17573 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17574 EXPORT_SYMBOL(cmpxchg8b_emu);
17575 #endif
17576
17577 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17578 +
17579 /* Networking helper routines. */
17580 EXPORT_SYMBOL(csum_partial_copy_generic);
17581 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17582 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17583
17584 EXPORT_SYMBOL(__get_user_1);
17585 EXPORT_SYMBOL(__get_user_2);
17586 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17587
17588 EXPORT_SYMBOL(csum_partial);
17589 EXPORT_SYMBOL(empty_zero_page);
17590 +
17591 +#ifdef CONFIG_PAX_KERNEXEC
17592 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17593 +#endif
17594 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17595 index 6104852..6114160 100644
17596 --- a/arch/x86/kernel/i8259.c
17597 +++ b/arch/x86/kernel/i8259.c
17598 @@ -210,7 +210,7 @@ spurious_8259A_irq:
17599 "spurious 8259A interrupt: IRQ%d.\n", irq);
17600 spurious_irq_mask |= irqmask;
17601 }
17602 - atomic_inc(&irq_err_count);
17603 + atomic_inc_unchecked(&irq_err_count);
17604 /*
17605 * Theoretically we do not have to handle this IRQ,
17606 * but in Linux this does not cause problems and is
17607 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17608 index 43e9ccf..44ccf6f 100644
17609 --- a/arch/x86/kernel/init_task.c
17610 +++ b/arch/x86/kernel/init_task.c
17611 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17612 * way process stacks are handled. This is done by having a special
17613 * "init_task" linker map entry..
17614 */
17615 -union thread_union init_thread_union __init_task_data =
17616 - { INIT_THREAD_INFO(init_task) };
17617 +union thread_union init_thread_union __init_task_data;
17618
17619 /*
17620 * Initial task structure.
17621 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17622 * section. Since TSS's are completely CPU-local, we want them
17623 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17624 */
17625 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17626 -
17627 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17628 +EXPORT_SYMBOL(init_tss);
17629 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17630 index 8c96897..be66bfa 100644
17631 --- a/arch/x86/kernel/ioport.c
17632 +++ b/arch/x86/kernel/ioport.c
17633 @@ -6,6 +6,7 @@
17634 #include <linux/sched.h>
17635 #include <linux/kernel.h>
17636 #include <linux/capability.h>
17637 +#include <linux/security.h>
17638 #include <linux/errno.h>
17639 #include <linux/types.h>
17640 #include <linux/ioport.h>
17641 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17642
17643 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17644 return -EINVAL;
17645 +#ifdef CONFIG_GRKERNSEC_IO
17646 + if (turn_on && grsec_disable_privio) {
17647 + gr_handle_ioperm();
17648 + return -EPERM;
17649 + }
17650 +#endif
17651 if (turn_on && !capable(CAP_SYS_RAWIO))
17652 return -EPERM;
17653
17654 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17655 * because the ->io_bitmap_max value must match the bitmap
17656 * contents:
17657 */
17658 - tss = &per_cpu(init_tss, get_cpu());
17659 + tss = init_tss + get_cpu();
17660
17661 if (turn_on)
17662 bitmap_clear(t->io_bitmap_ptr, from, num);
17663 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17664 return -EINVAL;
17665 /* Trying to gain more privileges? */
17666 if (level > old) {
17667 +#ifdef CONFIG_GRKERNSEC_IO
17668 + if (grsec_disable_privio) {
17669 + gr_handle_iopl();
17670 + return -EPERM;
17671 + }
17672 +#endif
17673 if (!capable(CAP_SYS_RAWIO))
17674 return -EPERM;
17675 }
17676 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17677 index 7943e0c..dd32c5c 100644
17678 --- a/arch/x86/kernel/irq.c
17679 +++ b/arch/x86/kernel/irq.c
17680 @@ -18,7 +18,7 @@
17681 #include <asm/mce.h>
17682 #include <asm/hw_irq.h>
17683
17684 -atomic_t irq_err_count;
17685 +atomic_unchecked_t irq_err_count;
17686
17687 /* Function pointer for generic interrupt vector handling */
17688 void (*x86_platform_ipi_callback)(void) = NULL;
17689 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17690 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17691 seq_printf(p, " Machine check polls\n");
17692 #endif
17693 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17694 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17695 #if defined(CONFIG_X86_IO_APIC)
17696 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17697 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17698 #endif
17699 return 0;
17700 }
17701 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17702
17703 u64 arch_irq_stat(void)
17704 {
17705 - u64 sum = atomic_read(&irq_err_count);
17706 + u64 sum = atomic_read_unchecked(&irq_err_count);
17707
17708 #ifdef CONFIG_X86_IO_APIC
17709 - sum += atomic_read(&irq_mis_count);
17710 + sum += atomic_read_unchecked(&irq_mis_count);
17711 #endif
17712 return sum;
17713 }
17714 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17715 index 40fc861..9b8739b 100644
17716 --- a/arch/x86/kernel/irq_32.c
17717 +++ b/arch/x86/kernel/irq_32.c
17718 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17719 __asm__ __volatile__("andl %%esp,%0" :
17720 "=r" (sp) : "0" (THREAD_SIZE - 1));
17721
17722 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17723 + return sp < STACK_WARN;
17724 }
17725
17726 static void print_stack_overflow(void)
17727 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17728 * per-CPU IRQ handling contexts (thread information and stack)
17729 */
17730 union irq_ctx {
17731 - struct thread_info tinfo;
17732 - u32 stack[THREAD_SIZE/sizeof(u32)];
17733 + unsigned long previous_esp;
17734 + u32 stack[THREAD_SIZE/sizeof(u32)];
17735 } __attribute__((aligned(THREAD_SIZE)));
17736
17737 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17738 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17739 static inline int
17740 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17741 {
17742 - union irq_ctx *curctx, *irqctx;
17743 + union irq_ctx *irqctx;
17744 u32 *isp, arg1, arg2;
17745
17746 - curctx = (union irq_ctx *) current_thread_info();
17747 irqctx = __this_cpu_read(hardirq_ctx);
17748
17749 /*
17750 @@ -92,21 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17751 * handler) we can't do that and just have to keep using the
17752 * current stack (which is the irq stack already after all)
17753 */
17754 - if (unlikely(curctx == irqctx))
17755 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17756 return 0;
17757
17758 /* build the stack frame on the IRQ stack */
17759 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17760 - irqctx->tinfo.task = curctx->tinfo.task;
17761 - irqctx->tinfo.previous_esp = current_stack_pointer;
17762 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17763 + irqctx->previous_esp = current_stack_pointer;
17764
17765 - /*
17766 - * Copy the softirq bits in preempt_count so that the
17767 - * softirq checks work in the hardirq context.
17768 - */
17769 - irqctx->tinfo.preempt_count =
17770 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17771 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17772 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17773 + __set_fs(MAKE_MM_SEG(0));
17774 +#endif
17775
17776 if (unlikely(overflow))
17777 call_on_stack(print_stack_overflow, isp);
17778 @@ -118,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17779 : "0" (irq), "1" (desc), "2" (isp),
17780 "D" (desc->handle_irq)
17781 : "memory", "cc", "ecx");
17782 +
17783 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17784 + __set_fs(current_thread_info()->addr_limit);
17785 +#endif
17786 +
17787 return 1;
17788 }
17789
17790 @@ -126,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17791 */
17792 void __cpuinit irq_ctx_init(int cpu)
17793 {
17794 - union irq_ctx *irqctx;
17795 -
17796 if (per_cpu(hardirq_ctx, cpu))
17797 return;
17798
17799 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17800 - THREAD_FLAGS,
17801 - THREAD_ORDER));
17802 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17803 - irqctx->tinfo.cpu = cpu;
17804 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17805 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17806 -
17807 - per_cpu(hardirq_ctx, cpu) = irqctx;
17808 -
17809 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17810 - THREAD_FLAGS,
17811 - THREAD_ORDER));
17812 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17813 - irqctx->tinfo.cpu = cpu;
17814 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17815 -
17816 - per_cpu(softirq_ctx, cpu) = irqctx;
17817 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17818 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17819
17820 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17821 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17822 @@ -157,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17823 asmlinkage void do_softirq(void)
17824 {
17825 unsigned long flags;
17826 - struct thread_info *curctx;
17827 union irq_ctx *irqctx;
17828 u32 *isp;
17829
17830 @@ -167,15 +147,22 @@ asmlinkage void do_softirq(void)
17831 local_irq_save(flags);
17832
17833 if (local_softirq_pending()) {
17834 - curctx = current_thread_info();
17835 irqctx = __this_cpu_read(softirq_ctx);
17836 - irqctx->tinfo.task = curctx->task;
17837 - irqctx->tinfo.previous_esp = current_stack_pointer;
17838 + irqctx->previous_esp = current_stack_pointer;
17839
17840 /* build the stack frame on the softirq stack */
17841 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17842 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17843 +
17844 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17845 + __set_fs(MAKE_MM_SEG(0));
17846 +#endif
17847
17848 call_on_stack(__do_softirq, isp);
17849 +
17850 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17851 + __set_fs(current_thread_info()->addr_limit);
17852 +#endif
17853 +
17854 /*
17855 * Shouldn't happen, we returned above if in_interrupt():
17856 */
17857 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17858 index d04d3ec..ea4b374 100644
17859 --- a/arch/x86/kernel/irq_64.c
17860 +++ b/arch/x86/kernel/irq_64.c
17861 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17862 u64 estack_top, estack_bottom;
17863 u64 curbase = (u64)task_stack_page(current);
17864
17865 - if (user_mode_vm(regs))
17866 + if (user_mode(regs))
17867 return;
17868
17869 if (regs->sp >= curbase + sizeof(struct thread_info) +
17870 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17871 index 90fcf62..e682cdd 100644
17872 --- a/arch/x86/kernel/kdebugfs.c
17873 +++ b/arch/x86/kernel/kdebugfs.c
17874 @@ -28,6 +28,8 @@ struct setup_data_node {
17875 };
17876
17877 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17878 + size_t count, loff_t *ppos) __size_overflow(3);
17879 +static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17880 size_t count, loff_t *ppos)
17881 {
17882 struct setup_data_node *node = file->private_data;
17883 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17884 index 2f45c4c..d95504f 100644
17885 --- a/arch/x86/kernel/kgdb.c
17886 +++ b/arch/x86/kernel/kgdb.c
17887 @@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17888 #ifdef CONFIG_X86_32
17889 switch (regno) {
17890 case GDB_SS:
17891 - if (!user_mode_vm(regs))
17892 + if (!user_mode(regs))
17893 *(unsigned long *)mem = __KERNEL_DS;
17894 break;
17895 case GDB_SP:
17896 - if (!user_mode_vm(regs))
17897 + if (!user_mode(regs))
17898 *(unsigned long *)mem = kernel_stack_pointer(regs);
17899 break;
17900 case GDB_GS:
17901 @@ -475,12 +475,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17902 case 'k':
17903 /* clear the trace bit */
17904 linux_regs->flags &= ~X86_EFLAGS_TF;
17905 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17906 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17907
17908 /* set the trace bit if we're stepping */
17909 if (remcomInBuffer[0] == 's') {
17910 linux_regs->flags |= X86_EFLAGS_TF;
17911 - atomic_set(&kgdb_cpu_doing_single_step,
17912 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17913 raw_smp_processor_id());
17914 }
17915
17916 @@ -545,7 +545,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17917
17918 switch (cmd) {
17919 case DIE_DEBUG:
17920 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17921 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17922 if (user_mode(regs))
17923 return single_step_cont(regs, args);
17924 break;
17925 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17926 index 7da647d..56fe348 100644
17927 --- a/arch/x86/kernel/kprobes.c
17928 +++ b/arch/x86/kernel/kprobes.c
17929 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17930 } __attribute__((packed)) *insn;
17931
17932 insn = (struct __arch_relative_insn *)from;
17933 +
17934 + pax_open_kernel();
17935 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17936 insn->op = op;
17937 + pax_close_kernel();
17938 }
17939
17940 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17941 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17942 kprobe_opcode_t opcode;
17943 kprobe_opcode_t *orig_opcodes = opcodes;
17944
17945 - if (search_exception_tables((unsigned long)opcodes))
17946 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17947 return 0; /* Page fault may occur on this address. */
17948
17949 retry:
17950 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17951 }
17952 }
17953 insn_get_length(&insn);
17954 + pax_open_kernel();
17955 memcpy(dest, insn.kaddr, insn.length);
17956 + pax_close_kernel();
17957
17958 #ifdef CONFIG_X86_64
17959 if (insn_rip_relative(&insn)) {
17960 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17961 (u8 *) dest;
17962 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17963 disp = (u8 *) dest + insn_offset_displacement(&insn);
17964 + pax_open_kernel();
17965 *(s32 *) disp = (s32) newdisp;
17966 + pax_close_kernel();
17967 }
17968 #endif
17969 return insn.length;
17970 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
17971 */
17972 __copy_instruction(p->ainsn.insn, p->addr, 0);
17973
17974 - if (can_boost(p->addr))
17975 + if (can_boost(ktla_ktva(p->addr)))
17976 p->ainsn.boostable = 0;
17977 else
17978 p->ainsn.boostable = -1;
17979
17980 - p->opcode = *p->addr;
17981 + p->opcode = *(ktla_ktva(p->addr));
17982 }
17983
17984 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17985 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17986 * nor set current_kprobe, because it doesn't use single
17987 * stepping.
17988 */
17989 - regs->ip = (unsigned long)p->ainsn.insn;
17990 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17991 preempt_enable_no_resched();
17992 return;
17993 }
17994 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17995 if (p->opcode == BREAKPOINT_INSTRUCTION)
17996 regs->ip = (unsigned long)p->addr;
17997 else
17998 - regs->ip = (unsigned long)p->ainsn.insn;
17999 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18000 }
18001
18002 /*
18003 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18004 setup_singlestep(p, regs, kcb, 0);
18005 return 1;
18006 }
18007 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
18008 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18009 /*
18010 * The breakpoint instruction was removed right
18011 * after we hit it. Another cpu has removed
18012 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18013 " movq %rax, 152(%rsp)\n"
18014 RESTORE_REGS_STRING
18015 " popfq\n"
18016 +#ifdef KERNEXEC_PLUGIN
18017 + " btsq $63,(%rsp)\n"
18018 +#endif
18019 #else
18020 " pushf\n"
18021 SAVE_REGS_STRING
18022 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18023 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18024 {
18025 unsigned long *tos = stack_addr(regs);
18026 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18027 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18028 unsigned long orig_ip = (unsigned long)p->addr;
18029 kprobe_opcode_t *insn = p->ainsn.insn;
18030
18031 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18032 struct die_args *args = data;
18033 int ret = NOTIFY_DONE;
18034
18035 - if (args->regs && user_mode_vm(args->regs))
18036 + if (args->regs && user_mode(args->regs))
18037 return ret;
18038
18039 switch (val) {
18040 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
18041 * Verify if the address gap is in 2GB range, because this uses
18042 * a relative jump.
18043 */
18044 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
18045 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
18046 if (abs(rel) > 0x7fffffff)
18047 return -ERANGE;
18048
18049 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
18050 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
18051
18052 /* Set probe function call */
18053 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
18054 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
18055
18056 /* Set returning jmp instruction at the tail of out-of-line buffer */
18057 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
18058 - (u8 *)op->kp.addr + op->optinsn.size);
18059 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
18060
18061 flush_icache_range((unsigned long) buf,
18062 (unsigned long) buf + TMPL_END_IDX +
18063 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
18064 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
18065
18066 /* Backup instructions which will be replaced by jump address */
18067 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
18068 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
18069 RELATIVE_ADDR_SIZE);
18070
18071 insn_buf[0] = RELATIVEJUMP_OPCODE;
18072 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18073 index ea69726..604d066 100644
18074 --- a/arch/x86/kernel/ldt.c
18075 +++ b/arch/x86/kernel/ldt.c
18076 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18077 if (reload) {
18078 #ifdef CONFIG_SMP
18079 preempt_disable();
18080 - load_LDT(pc);
18081 + load_LDT_nolock(pc);
18082 if (!cpumask_equal(mm_cpumask(current->mm),
18083 cpumask_of(smp_processor_id())))
18084 smp_call_function(flush_ldt, current->mm, 1);
18085 preempt_enable();
18086 #else
18087 - load_LDT(pc);
18088 + load_LDT_nolock(pc);
18089 #endif
18090 }
18091 if (oldsize) {
18092 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18093 return err;
18094
18095 for (i = 0; i < old->size; i++)
18096 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18097 + write_ldt_entry(new->ldt, i, old->ldt + i);
18098 return 0;
18099 }
18100
18101 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18102 retval = copy_ldt(&mm->context, &old_mm->context);
18103 mutex_unlock(&old_mm->context.lock);
18104 }
18105 +
18106 + if (tsk == current) {
18107 + mm->context.vdso = 0;
18108 +
18109 +#ifdef CONFIG_X86_32
18110 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18111 + mm->context.user_cs_base = 0UL;
18112 + mm->context.user_cs_limit = ~0UL;
18113 +
18114 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18115 + cpus_clear(mm->context.cpu_user_cs_mask);
18116 +#endif
18117 +
18118 +#endif
18119 +#endif
18120 +
18121 + }
18122 +
18123 return retval;
18124 }
18125
18126 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18127 }
18128 }
18129
18130 +#ifdef CONFIG_PAX_SEGMEXEC
18131 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18132 + error = -EINVAL;
18133 + goto out_unlock;
18134 + }
18135 +#endif
18136 +
18137 fill_ldt(&ldt, &ldt_info);
18138 if (oldmode)
18139 ldt.avl = 0;
18140 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18141 index a3fa43b..8966f4c 100644
18142 --- a/arch/x86/kernel/machine_kexec_32.c
18143 +++ b/arch/x86/kernel/machine_kexec_32.c
18144 @@ -27,7 +27,7 @@
18145 #include <asm/cacheflush.h>
18146 #include <asm/debugreg.h>
18147
18148 -static void set_idt(void *newidt, __u16 limit)
18149 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18150 {
18151 struct desc_ptr curidt;
18152
18153 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
18154 }
18155
18156
18157 -static void set_gdt(void *newgdt, __u16 limit)
18158 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18159 {
18160 struct desc_ptr curgdt;
18161
18162 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18163 }
18164
18165 control_page = page_address(image->control_code_page);
18166 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18167 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18168
18169 relocate_kernel_ptr = control_page;
18170 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18171 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18172 index 3ca42d0..7cff8cc 100644
18173 --- a/arch/x86/kernel/microcode_intel.c
18174 +++ b/arch/x86/kernel/microcode_intel.c
18175 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18176
18177 static int get_ucode_user(void *to, const void *from, size_t n)
18178 {
18179 - return copy_from_user(to, from, n);
18180 + return copy_from_user(to, (const void __force_user *)from, n);
18181 }
18182
18183 static enum ucode_state
18184 request_microcode_user(int cpu, const void __user *buf, size_t size)
18185 {
18186 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18187 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18188 }
18189
18190 static void microcode_fini_cpu(int cpu)
18191 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18192 index 925179f..267ac7a 100644
18193 --- a/arch/x86/kernel/module.c
18194 +++ b/arch/x86/kernel/module.c
18195 @@ -36,15 +36,60 @@
18196 #define DEBUGP(fmt...)
18197 #endif
18198
18199 -void *module_alloc(unsigned long size)
18200 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18201 {
18202 - if (PAGE_ALIGN(size) > MODULES_LEN)
18203 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18204 return NULL;
18205 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18206 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18207 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18208 -1, __builtin_return_address(0));
18209 }
18210
18211 +void *module_alloc(unsigned long size)
18212 +{
18213 +
18214 +#ifdef CONFIG_PAX_KERNEXEC
18215 + return __module_alloc(size, PAGE_KERNEL);
18216 +#else
18217 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18218 +#endif
18219 +
18220 +}
18221 +
18222 +#ifdef CONFIG_PAX_KERNEXEC
18223 +#ifdef CONFIG_X86_32
18224 +void *module_alloc_exec(unsigned long size)
18225 +{
18226 + struct vm_struct *area;
18227 +
18228 + if (size == 0)
18229 + return NULL;
18230 +
18231 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18232 + return area ? area->addr : NULL;
18233 +}
18234 +EXPORT_SYMBOL(module_alloc_exec);
18235 +
18236 +void module_free_exec(struct module *mod, void *module_region)
18237 +{
18238 + vunmap(module_region);
18239 +}
18240 +EXPORT_SYMBOL(module_free_exec);
18241 +#else
18242 +void module_free_exec(struct module *mod, void *module_region)
18243 +{
18244 + module_free(mod, module_region);
18245 +}
18246 +EXPORT_SYMBOL(module_free_exec);
18247 +
18248 +void *module_alloc_exec(unsigned long size)
18249 +{
18250 + return __module_alloc(size, PAGE_KERNEL_RX);
18251 +}
18252 +EXPORT_SYMBOL(module_alloc_exec);
18253 +#endif
18254 +#endif
18255 +
18256 #ifdef CONFIG_X86_32
18257 int apply_relocate(Elf32_Shdr *sechdrs,
18258 const char *strtab,
18259 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18260 unsigned int i;
18261 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18262 Elf32_Sym *sym;
18263 - uint32_t *location;
18264 + uint32_t *plocation, location;
18265
18266 DEBUGP("Applying relocate section %u to %u\n", relsec,
18267 sechdrs[relsec].sh_info);
18268 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18269 /* This is where to make the change */
18270 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18271 - + rel[i].r_offset;
18272 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18273 + location = (uint32_t)plocation;
18274 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18275 + plocation = ktla_ktva((void *)plocation);
18276 /* This is the symbol it is referring to. Note that all
18277 undefined symbols have been resolved. */
18278 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18279 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18280 switch (ELF32_R_TYPE(rel[i].r_info)) {
18281 case R_386_32:
18282 /* We add the value into the location given */
18283 - *location += sym->st_value;
18284 + pax_open_kernel();
18285 + *plocation += sym->st_value;
18286 + pax_close_kernel();
18287 break;
18288 case R_386_PC32:
18289 /* Add the value, subtract its postition */
18290 - *location += sym->st_value - (uint32_t)location;
18291 + pax_open_kernel();
18292 + *plocation += sym->st_value - location;
18293 + pax_close_kernel();
18294 break;
18295 default:
18296 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18297 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18298 case R_X86_64_NONE:
18299 break;
18300 case R_X86_64_64:
18301 + pax_open_kernel();
18302 *(u64 *)loc = val;
18303 + pax_close_kernel();
18304 break;
18305 case R_X86_64_32:
18306 + pax_open_kernel();
18307 *(u32 *)loc = val;
18308 + pax_close_kernel();
18309 if (val != *(u32 *)loc)
18310 goto overflow;
18311 break;
18312 case R_X86_64_32S:
18313 + pax_open_kernel();
18314 *(s32 *)loc = val;
18315 + pax_close_kernel();
18316 if ((s64)val != *(s32 *)loc)
18317 goto overflow;
18318 break;
18319 case R_X86_64_PC32:
18320 val -= (u64)loc;
18321 + pax_open_kernel();
18322 *(u32 *)loc = val;
18323 + pax_close_kernel();
18324 +
18325 #if 0
18326 if ((s64)val != *(s32 *)loc)
18327 goto overflow;
18328 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18329 index 47acaf3..ec48ab6 100644
18330 --- a/arch/x86/kernel/nmi.c
18331 +++ b/arch/x86/kernel/nmi.c
18332 @@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
18333 dotraplinkage notrace __kprobes void
18334 do_nmi(struct pt_regs *regs, long error_code)
18335 {
18336 +
18337 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18338 + if (!user_mode(regs)) {
18339 + unsigned long cs = regs->cs & 0xFFFF;
18340 + unsigned long ip = ktva_ktla(regs->ip);
18341 +
18342 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18343 + regs->ip = ip;
18344 + }
18345 +#endif
18346 +
18347 nmi_nesting_preprocess(regs);
18348
18349 nmi_enter();
18350 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18351 index 676b8c7..870ba04 100644
18352 --- a/arch/x86/kernel/paravirt-spinlocks.c
18353 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18354 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18355 arch_spin_lock(lock);
18356 }
18357
18358 -struct pv_lock_ops pv_lock_ops = {
18359 +struct pv_lock_ops pv_lock_ops __read_only = {
18360 #ifdef CONFIG_SMP
18361 .spin_is_locked = __ticket_spin_is_locked,
18362 .spin_is_contended = __ticket_spin_is_contended,
18363 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18364 index d90272e..6bb013b 100644
18365 --- a/arch/x86/kernel/paravirt.c
18366 +++ b/arch/x86/kernel/paravirt.c
18367 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18368 {
18369 return x;
18370 }
18371 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18372 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18373 +#endif
18374
18375 void __init default_banner(void)
18376 {
18377 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18378 if (opfunc == NULL)
18379 /* If there's no function, patch it with a ud2a (BUG) */
18380 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18381 - else if (opfunc == _paravirt_nop)
18382 + else if (opfunc == (void *)_paravirt_nop)
18383 /* If the operation is a nop, then nop the callsite */
18384 ret = paravirt_patch_nop();
18385
18386 /* identity functions just return their single argument */
18387 - else if (opfunc == _paravirt_ident_32)
18388 + else if (opfunc == (void *)_paravirt_ident_32)
18389 ret = paravirt_patch_ident_32(insnbuf, len);
18390 - else if (opfunc == _paravirt_ident_64)
18391 + else if (opfunc == (void *)_paravirt_ident_64)
18392 ret = paravirt_patch_ident_64(insnbuf, len);
18393 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18394 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18395 + ret = paravirt_patch_ident_64(insnbuf, len);
18396 +#endif
18397
18398 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18399 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18400 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18401 if (insn_len > len || start == NULL)
18402 insn_len = len;
18403 else
18404 - memcpy(insnbuf, start, insn_len);
18405 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18406
18407 return insn_len;
18408 }
18409 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
18410 preempt_enable();
18411 }
18412
18413 -struct pv_info pv_info = {
18414 +struct pv_info pv_info __read_only = {
18415 .name = "bare hardware",
18416 .paravirt_enabled = 0,
18417 .kernel_rpl = 0,
18418 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
18419 #endif
18420 };
18421
18422 -struct pv_init_ops pv_init_ops = {
18423 +struct pv_init_ops pv_init_ops __read_only = {
18424 .patch = native_patch,
18425 };
18426
18427 -struct pv_time_ops pv_time_ops = {
18428 +struct pv_time_ops pv_time_ops __read_only = {
18429 .sched_clock = native_sched_clock,
18430 .steal_clock = native_steal_clock,
18431 };
18432
18433 -struct pv_irq_ops pv_irq_ops = {
18434 +struct pv_irq_ops pv_irq_ops __read_only = {
18435 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18436 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18437 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18438 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
18439 #endif
18440 };
18441
18442 -struct pv_cpu_ops pv_cpu_ops = {
18443 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18444 .cpuid = native_cpuid,
18445 .get_debugreg = native_get_debugreg,
18446 .set_debugreg = native_set_debugreg,
18447 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18448 .end_context_switch = paravirt_nop,
18449 };
18450
18451 -struct pv_apic_ops pv_apic_ops = {
18452 +struct pv_apic_ops pv_apic_ops __read_only = {
18453 #ifdef CONFIG_X86_LOCAL_APIC
18454 .startup_ipi_hook = paravirt_nop,
18455 #endif
18456 };
18457
18458 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18459 +#ifdef CONFIG_X86_32
18460 +#ifdef CONFIG_X86_PAE
18461 +/* 64-bit pagetable entries */
18462 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18463 +#else
18464 /* 32-bit pagetable entries */
18465 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18466 +#endif
18467 #else
18468 /* 64-bit pagetable entries */
18469 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18470 #endif
18471
18472 -struct pv_mmu_ops pv_mmu_ops = {
18473 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18474
18475 .read_cr2 = native_read_cr2,
18476 .write_cr2 = native_write_cr2,
18477 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18478 .make_pud = PTE_IDENT,
18479
18480 .set_pgd = native_set_pgd,
18481 + .set_pgd_batched = native_set_pgd_batched,
18482 #endif
18483 #endif /* PAGETABLE_LEVELS >= 3 */
18484
18485 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18486 },
18487
18488 .set_fixmap = native_set_fixmap,
18489 +
18490 +#ifdef CONFIG_PAX_KERNEXEC
18491 + .pax_open_kernel = native_pax_open_kernel,
18492 + .pax_close_kernel = native_pax_close_kernel,
18493 +#endif
18494 +
18495 };
18496
18497 EXPORT_SYMBOL_GPL(pv_time_ops);
18498 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18499 index 35ccf75..7a15747 100644
18500 --- a/arch/x86/kernel/pci-iommu_table.c
18501 +++ b/arch/x86/kernel/pci-iommu_table.c
18502 @@ -2,7 +2,7 @@
18503 #include <asm/iommu_table.h>
18504 #include <linux/string.h>
18505 #include <linux/kallsyms.h>
18506 -
18507 +#include <linux/sched.h>
18508
18509 #define DEBUG 1
18510
18511 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18512 index 15763af..da59ada 100644
18513 --- a/arch/x86/kernel/process.c
18514 +++ b/arch/x86/kernel/process.c
18515 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
18516
18517 void free_thread_info(struct thread_info *ti)
18518 {
18519 - free_thread_xstate(ti->task);
18520 free_pages((unsigned long)ti, THREAD_ORDER);
18521 }
18522
18523 +static struct kmem_cache *task_struct_cachep;
18524 +
18525 void arch_task_cache_init(void)
18526 {
18527 - task_xstate_cachep =
18528 - kmem_cache_create("task_xstate", xstate_size,
18529 + /* create a slab on which task_structs can be allocated */
18530 + task_struct_cachep =
18531 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18532 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18533 +
18534 + task_xstate_cachep =
18535 + kmem_cache_create("task_xstate", xstate_size,
18536 __alignof__(union thread_xstate),
18537 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18538 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18539 +}
18540 +
18541 +struct task_struct *alloc_task_struct_node(int node)
18542 +{
18543 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18544 +}
18545 +
18546 +void free_task_struct(struct task_struct *task)
18547 +{
18548 + free_thread_xstate(task);
18549 + kmem_cache_free(task_struct_cachep, task);
18550 }
18551
18552 /*
18553 @@ -70,7 +87,7 @@ void exit_thread(void)
18554 unsigned long *bp = t->io_bitmap_ptr;
18555
18556 if (bp) {
18557 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18558 + struct tss_struct *tss = init_tss + get_cpu();
18559
18560 t->io_bitmap_ptr = NULL;
18561 clear_thread_flag(TIF_IO_BITMAP);
18562 @@ -106,7 +123,7 @@ void show_regs_common(void)
18563
18564 printk(KERN_CONT "\n");
18565 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18566 - current->pid, current->comm, print_tainted(),
18567 + task_pid_nr(current), current->comm, print_tainted(),
18568 init_utsname()->release,
18569 (int)strcspn(init_utsname()->version, " "),
18570 init_utsname()->version);
18571 @@ -120,6 +137,9 @@ void flush_thread(void)
18572 {
18573 struct task_struct *tsk = current;
18574
18575 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18576 + loadsegment(gs, 0);
18577 +#endif
18578 flush_ptrace_hw_breakpoint(tsk);
18579 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18580 /*
18581 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18582 regs.di = (unsigned long) arg;
18583
18584 #ifdef CONFIG_X86_32
18585 - regs.ds = __USER_DS;
18586 - regs.es = __USER_DS;
18587 + regs.ds = __KERNEL_DS;
18588 + regs.es = __KERNEL_DS;
18589 regs.fs = __KERNEL_PERCPU;
18590 - regs.gs = __KERNEL_STACK_CANARY;
18591 + savesegment(gs, regs.gs);
18592 #else
18593 regs.ss = __KERNEL_DS;
18594 #endif
18595 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
18596
18597 return ret;
18598 }
18599 -void stop_this_cpu(void *dummy)
18600 +__noreturn void stop_this_cpu(void *dummy)
18601 {
18602 local_irq_disable();
18603 /*
18604 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
18605 }
18606 early_param("idle", idle_setup);
18607
18608 -unsigned long arch_align_stack(unsigned long sp)
18609 +#ifdef CONFIG_PAX_RANDKSTACK
18610 +void pax_randomize_kstack(struct pt_regs *regs)
18611 {
18612 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18613 - sp -= get_random_int() % 8192;
18614 - return sp & ~0xf;
18615 -}
18616 + struct thread_struct *thread = &current->thread;
18617 + unsigned long time;
18618
18619 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18620 -{
18621 - unsigned long range_end = mm->brk + 0x02000000;
18622 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18623 -}
18624 + if (!randomize_va_space)
18625 + return;
18626 +
18627 + if (v8086_mode(regs))
18628 + return;
18629
18630 + rdtscl(time);
18631 +
18632 + /* P4 seems to return a 0 LSB, ignore it */
18633 +#ifdef CONFIG_MPENTIUM4
18634 + time &= 0x3EUL;
18635 + time <<= 2;
18636 +#elif defined(CONFIG_X86_64)
18637 + time &= 0xFUL;
18638 + time <<= 4;
18639 +#else
18640 + time &= 0x1FUL;
18641 + time <<= 3;
18642 +#endif
18643 +
18644 + thread->sp0 ^= time;
18645 + load_sp0(init_tss + smp_processor_id(), thread);
18646 +
18647 +#ifdef CONFIG_X86_64
18648 + percpu_write(kernel_stack, thread->sp0);
18649 +#endif
18650 +}
18651 +#endif
18652 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18653 index c08d1ff..6ae1c81 100644
18654 --- a/arch/x86/kernel/process_32.c
18655 +++ b/arch/x86/kernel/process_32.c
18656 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18657 unsigned long thread_saved_pc(struct task_struct *tsk)
18658 {
18659 return ((unsigned long *)tsk->thread.sp)[3];
18660 +//XXX return tsk->thread.eip;
18661 }
18662
18663 #ifndef CONFIG_SMP
18664 @@ -132,15 +133,14 @@ void __show_regs(struct pt_regs *regs, int all)
18665 unsigned long sp;
18666 unsigned short ss, gs;
18667
18668 - if (user_mode_vm(regs)) {
18669 + if (user_mode(regs)) {
18670 sp = regs->sp;
18671 ss = regs->ss & 0xffff;
18672 - gs = get_user_gs(regs);
18673 } else {
18674 sp = kernel_stack_pointer(regs);
18675 savesegment(ss, ss);
18676 - savesegment(gs, gs);
18677 }
18678 + gs = get_user_gs(regs);
18679
18680 show_regs_common();
18681
18682 @@ -202,13 +202,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18683 struct task_struct *tsk;
18684 int err;
18685
18686 - childregs = task_pt_regs(p);
18687 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18688 *childregs = *regs;
18689 childregs->ax = 0;
18690 childregs->sp = sp;
18691
18692 p->thread.sp = (unsigned long) childregs;
18693 p->thread.sp0 = (unsigned long) (childregs+1);
18694 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18695
18696 p->thread.ip = (unsigned long) ret_from_fork;
18697
18698 @@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18699 struct thread_struct *prev = &prev_p->thread,
18700 *next = &next_p->thread;
18701 int cpu = smp_processor_id();
18702 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18703 + struct tss_struct *tss = init_tss + cpu;
18704 fpu_switch_t fpu;
18705
18706 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18707 @@ -323,6 +324,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18708 */
18709 lazy_save_gs(prev->gs);
18710
18711 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18712 + __set_fs(task_thread_info(next_p)->addr_limit);
18713 +#endif
18714 +
18715 /*
18716 * Load the per-thread Thread-Local Storage descriptor.
18717 */
18718 @@ -353,6 +358,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18719 */
18720 arch_end_context_switch(next_p);
18721
18722 + percpu_write(current_task, next_p);
18723 + percpu_write(current_tinfo, &next_p->tinfo);
18724 +
18725 /*
18726 * Restore %gs if needed (which is common)
18727 */
18728 @@ -361,8 +369,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18729
18730 switch_fpu_finish(next_p, fpu);
18731
18732 - percpu_write(current_task, next_p);
18733 -
18734 return prev_p;
18735 }
18736
18737 @@ -392,4 +398,3 @@ unsigned long get_wchan(struct task_struct *p)
18738 } while (count++ < 16);
18739 return 0;
18740 }
18741 -
18742 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18743 index cfa5c90..4facd28 100644
18744 --- a/arch/x86/kernel/process_64.c
18745 +++ b/arch/x86/kernel/process_64.c
18746 @@ -89,7 +89,7 @@ static void __exit_idle(void)
18747 void exit_idle(void)
18748 {
18749 /* idle loop has pid 0 */
18750 - if (current->pid)
18751 + if (task_pid_nr(current))
18752 return;
18753 __exit_idle();
18754 }
18755 @@ -270,8 +270,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18756 struct pt_regs *childregs;
18757 struct task_struct *me = current;
18758
18759 - childregs = ((struct pt_regs *)
18760 - (THREAD_SIZE + task_stack_page(p))) - 1;
18761 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18762 *childregs = *regs;
18763
18764 childregs->ax = 0;
18765 @@ -283,6 +282,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18766 p->thread.sp = (unsigned long) childregs;
18767 p->thread.sp0 = (unsigned long) (childregs+1);
18768 p->thread.usersp = me->thread.usersp;
18769 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18770
18771 set_tsk_thread_flag(p, TIF_FORK);
18772
18773 @@ -385,7 +385,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18774 struct thread_struct *prev = &prev_p->thread;
18775 struct thread_struct *next = &next_p->thread;
18776 int cpu = smp_processor_id();
18777 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18778 + struct tss_struct *tss = init_tss + cpu;
18779 unsigned fsindex, gsindex;
18780 fpu_switch_t fpu;
18781
18782 @@ -467,10 +467,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18783 prev->usersp = percpu_read(old_rsp);
18784 percpu_write(old_rsp, next->usersp);
18785 percpu_write(current_task, next_p);
18786 + percpu_write(current_tinfo, &next_p->tinfo);
18787
18788 - percpu_write(kernel_stack,
18789 - (unsigned long)task_stack_page(next_p) +
18790 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18791 + percpu_write(kernel_stack, next->sp0);
18792
18793 /*
18794 * Now maybe reload the debug registers and handle I/O bitmaps
18795 @@ -525,12 +524,11 @@ unsigned long get_wchan(struct task_struct *p)
18796 if (!p || p == current || p->state == TASK_RUNNING)
18797 return 0;
18798 stack = (unsigned long)task_stack_page(p);
18799 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18800 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18801 return 0;
18802 fp = *(u64 *)(p->thread.sp);
18803 do {
18804 - if (fp < (unsigned long)stack ||
18805 - fp >= (unsigned long)stack+THREAD_SIZE)
18806 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18807 return 0;
18808 ip = *(u64 *)(fp+8);
18809 if (!in_sched_functions(ip))
18810 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18811 index 5026738..c5925c0 100644
18812 --- a/arch/x86/kernel/ptrace.c
18813 +++ b/arch/x86/kernel/ptrace.c
18814 @@ -823,7 +823,7 @@ long arch_ptrace(struct task_struct *child, long request,
18815 unsigned long addr, unsigned long data)
18816 {
18817 int ret;
18818 - unsigned long __user *datap = (unsigned long __user *)data;
18819 + unsigned long __user *datap = (__force unsigned long __user *)data;
18820
18821 switch (request) {
18822 /* read the word at location addr in the USER area. */
18823 @@ -908,14 +908,14 @@ long arch_ptrace(struct task_struct *child, long request,
18824 if ((int) addr < 0)
18825 return -EIO;
18826 ret = do_get_thread_area(child, addr,
18827 - (struct user_desc __user *)data);
18828 + (__force struct user_desc __user *) data);
18829 break;
18830
18831 case PTRACE_SET_THREAD_AREA:
18832 if ((int) addr < 0)
18833 return -EIO;
18834 ret = do_set_thread_area(child, addr,
18835 - (struct user_desc __user *)data, 0);
18836 + (__force struct user_desc __user *) data, 0);
18837 break;
18838 #endif
18839
18840 @@ -1332,7 +1332,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18841 memset(info, 0, sizeof(*info));
18842 info->si_signo = SIGTRAP;
18843 info->si_code = si_code;
18844 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18845 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18846 }
18847
18848 void user_single_step_siginfo(struct task_struct *tsk,
18849 @@ -1361,6 +1361,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18850 # define IS_IA32 0
18851 #endif
18852
18853 +#ifdef CONFIG_GRKERNSEC_SETXID
18854 +extern void gr_delayed_cred_worker(void);
18855 +#endif
18856 +
18857 /*
18858 * We must return the syscall number to actually look up in the table.
18859 * This can be -1L to skip running any syscall at all.
18860 @@ -1369,6 +1373,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18861 {
18862 long ret = 0;
18863
18864 +#ifdef CONFIG_GRKERNSEC_SETXID
18865 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18866 + gr_delayed_cred_worker();
18867 +#endif
18868 +
18869 /*
18870 * If we stepped into a sysenter/syscall insn, it trapped in
18871 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18872 @@ -1412,6 +1421,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18873 {
18874 bool step;
18875
18876 +#ifdef CONFIG_GRKERNSEC_SETXID
18877 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18878 + gr_delayed_cred_worker();
18879 +#endif
18880 +
18881 audit_syscall_exit(regs);
18882
18883 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18884 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18885 index 42eb330..139955c 100644
18886 --- a/arch/x86/kernel/pvclock.c
18887 +++ b/arch/x86/kernel/pvclock.c
18888 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18889 return pv_tsc_khz;
18890 }
18891
18892 -static atomic64_t last_value = ATOMIC64_INIT(0);
18893 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18894
18895 void pvclock_resume(void)
18896 {
18897 - atomic64_set(&last_value, 0);
18898 + atomic64_set_unchecked(&last_value, 0);
18899 }
18900
18901 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18902 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18903 * updating at the same time, and one of them could be slightly behind,
18904 * making the assumption that last_value always go forward fail to hold.
18905 */
18906 - last = atomic64_read(&last_value);
18907 + last = atomic64_read_unchecked(&last_value);
18908 do {
18909 if (ret < last)
18910 return last;
18911 - last = atomic64_cmpxchg(&last_value, last, ret);
18912 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18913 } while (unlikely(last != ret));
18914
18915 return ret;
18916 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18917 index d840e69..98e9581 100644
18918 --- a/arch/x86/kernel/reboot.c
18919 +++ b/arch/x86/kernel/reboot.c
18920 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18921 EXPORT_SYMBOL(pm_power_off);
18922
18923 static const struct desc_ptr no_idt = {};
18924 -static int reboot_mode;
18925 +static unsigned short reboot_mode;
18926 enum reboot_type reboot_type = BOOT_ACPI;
18927 int reboot_force;
18928
18929 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18930 extern const unsigned char machine_real_restart_asm[];
18931 extern const u64 machine_real_restart_gdt[3];
18932
18933 -void machine_real_restart(unsigned int type)
18934 +__noreturn void machine_real_restart(unsigned int type)
18935 {
18936 void *restart_va;
18937 unsigned long restart_pa;
18938 - void (*restart_lowmem)(unsigned int);
18939 + void (* __noreturn restart_lowmem)(unsigned int);
18940 u64 *lowmem_gdt;
18941
18942 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18943 + struct desc_struct *gdt;
18944 +#endif
18945 +
18946 local_irq_disable();
18947
18948 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18949 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18950 boot)". This seems like a fairly standard thing that gets set by
18951 REBOOT.COM programs, and the previous reset routine did this
18952 too. */
18953 - *((unsigned short *)0x472) = reboot_mode;
18954 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18955
18956 /* Patch the GDT in the low memory trampoline */
18957 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18958
18959 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18960 restart_pa = virt_to_phys(restart_va);
18961 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18962 + restart_lowmem = (void *)restart_pa;
18963
18964 /* GDT[0]: GDT self-pointer */
18965 lowmem_gdt[0] =
18966 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18967 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18968
18969 /* Jump to the identity-mapped low memory code */
18970 +
18971 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18972 + gdt = get_cpu_gdt_table(smp_processor_id());
18973 + pax_open_kernel();
18974 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18975 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18976 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18977 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18978 +#endif
18979 +#ifdef CONFIG_PAX_KERNEXEC
18980 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18981 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18982 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18983 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18984 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18985 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18986 +#endif
18987 + pax_close_kernel();
18988 +#endif
18989 +
18990 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18991 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18992 + unreachable();
18993 +#else
18994 restart_lowmem(type);
18995 +#endif
18996 +
18997 }
18998 #ifdef CONFIG_APM_MODULE
18999 EXPORT_SYMBOL(machine_real_restart);
19000 @@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19001 * try to force a triple fault and then cycle between hitting the keyboard
19002 * controller and doing that
19003 */
19004 -static void native_machine_emergency_restart(void)
19005 +__noreturn static void native_machine_emergency_restart(void)
19006 {
19007 int i;
19008 int attempt = 0;
19009 @@ -680,13 +710,13 @@ void native_machine_shutdown(void)
19010 #endif
19011 }
19012
19013 -static void __machine_emergency_restart(int emergency)
19014 +static __noreturn void __machine_emergency_restart(int emergency)
19015 {
19016 reboot_emergency = emergency;
19017 machine_ops.emergency_restart();
19018 }
19019
19020 -static void native_machine_restart(char *__unused)
19021 +static __noreturn void native_machine_restart(char *__unused)
19022 {
19023 printk("machine restart\n");
19024
19025 @@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
19026 __machine_emergency_restart(0);
19027 }
19028
19029 -static void native_machine_halt(void)
19030 +static __noreturn void native_machine_halt(void)
19031 {
19032 /* stop other cpus and apics */
19033 machine_shutdown();
19034 @@ -706,7 +736,7 @@ static void native_machine_halt(void)
19035 stop_this_cpu(NULL);
19036 }
19037
19038 -static void native_machine_power_off(void)
19039 +__noreturn static void native_machine_power_off(void)
19040 {
19041 if (pm_power_off) {
19042 if (!reboot_force)
19043 @@ -715,6 +745,7 @@ static void native_machine_power_off(void)
19044 }
19045 /* a fallback in case there is no PM info available */
19046 tboot_shutdown(TB_SHUTDOWN_HALT);
19047 + unreachable();
19048 }
19049
19050 struct machine_ops machine_ops = {
19051 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19052 index 7a6f3b3..bed145d7 100644
19053 --- a/arch/x86/kernel/relocate_kernel_64.S
19054 +++ b/arch/x86/kernel/relocate_kernel_64.S
19055 @@ -11,6 +11,7 @@
19056 #include <asm/kexec.h>
19057 #include <asm/processor-flags.h>
19058 #include <asm/pgtable_types.h>
19059 +#include <asm/alternative-asm.h>
19060
19061 /*
19062 * Must be relocatable PIC code callable as a C function
19063 @@ -160,13 +161,14 @@ identity_mapped:
19064 xorq %rbp, %rbp
19065 xorq %r8, %r8
19066 xorq %r9, %r9
19067 - xorq %r10, %r9
19068 + xorq %r10, %r10
19069 xorq %r11, %r11
19070 xorq %r12, %r12
19071 xorq %r13, %r13
19072 xorq %r14, %r14
19073 xorq %r15, %r15
19074
19075 + pax_force_retaddr 0, 1
19076 ret
19077
19078 1:
19079 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19080 index d7d5099..28555d0 100644
19081 --- a/arch/x86/kernel/setup.c
19082 +++ b/arch/x86/kernel/setup.c
19083 @@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
19084
19085 switch (data->type) {
19086 case SETUP_E820_EXT:
19087 - parse_e820_ext(data);
19088 + parse_e820_ext((struct setup_data __force_kernel *)data);
19089 break;
19090 case SETUP_DTB:
19091 add_dtb(pa_data);
19092 @@ -649,7 +649,7 @@ static void __init trim_bios_range(void)
19093 * area (640->1Mb) as ram even though it is not.
19094 * take them out.
19095 */
19096 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19097 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19098 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19099 }
19100
19101 @@ -767,14 +767,14 @@ void __init setup_arch(char **cmdline_p)
19102
19103 if (!boot_params.hdr.root_flags)
19104 root_mountflags &= ~MS_RDONLY;
19105 - init_mm.start_code = (unsigned long) _text;
19106 - init_mm.end_code = (unsigned long) _etext;
19107 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19108 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19109 init_mm.end_data = (unsigned long) _edata;
19110 init_mm.brk = _brk_end;
19111
19112 - code_resource.start = virt_to_phys(_text);
19113 - code_resource.end = virt_to_phys(_etext)-1;
19114 - data_resource.start = virt_to_phys(_etext);
19115 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19116 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19117 + data_resource.start = virt_to_phys(_sdata);
19118 data_resource.end = virt_to_phys(_edata)-1;
19119 bss_resource.start = virt_to_phys(&__bss_start);
19120 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19121 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19122 index 5a98aa2..2f9288d 100644
19123 --- a/arch/x86/kernel/setup_percpu.c
19124 +++ b/arch/x86/kernel/setup_percpu.c
19125 @@ -21,19 +21,17 @@
19126 #include <asm/cpu.h>
19127 #include <asm/stackprotector.h>
19128
19129 -DEFINE_PER_CPU(int, cpu_number);
19130 +#ifdef CONFIG_SMP
19131 +DEFINE_PER_CPU(unsigned int, cpu_number);
19132 EXPORT_PER_CPU_SYMBOL(cpu_number);
19133 +#endif
19134
19135 -#ifdef CONFIG_X86_64
19136 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19137 -#else
19138 -#define BOOT_PERCPU_OFFSET 0
19139 -#endif
19140
19141 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19142 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19143
19144 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19145 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19146 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19147 };
19148 EXPORT_SYMBOL(__per_cpu_offset);
19149 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19150 {
19151 #ifdef CONFIG_X86_32
19152 struct desc_struct gdt;
19153 + unsigned long base = per_cpu_offset(cpu);
19154
19155 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19156 - 0x2 | DESCTYPE_S, 0x8);
19157 - gdt.s = 1;
19158 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19159 + 0x83 | DESCTYPE_S, 0xC);
19160 write_gdt_entry(get_cpu_gdt_table(cpu),
19161 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19162 #endif
19163 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19164 /* alrighty, percpu areas up and running */
19165 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19166 for_each_possible_cpu(cpu) {
19167 +#ifdef CONFIG_CC_STACKPROTECTOR
19168 +#ifdef CONFIG_X86_32
19169 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19170 +#endif
19171 +#endif
19172 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19173 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19174 per_cpu(cpu_number, cpu) = cpu;
19175 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19176 */
19177 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19178 #endif
19179 +#ifdef CONFIG_CC_STACKPROTECTOR
19180 +#ifdef CONFIG_X86_32
19181 + if (!cpu)
19182 + per_cpu(stack_canary.canary, cpu) = canary;
19183 +#endif
19184 +#endif
19185 /*
19186 * Up to this point, the boot CPU has been using .init.data
19187 * area. Reload any changed state for the boot CPU.
19188 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19189 index 46a01bd..2e88e6d 100644
19190 --- a/arch/x86/kernel/signal.c
19191 +++ b/arch/x86/kernel/signal.c
19192 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
19193 * Align the stack pointer according to the i386 ABI,
19194 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19195 */
19196 - sp = ((sp + 4) & -16ul) - 4;
19197 + sp = ((sp - 12) & -16ul) - 4;
19198 #else /* !CONFIG_X86_32 */
19199 sp = round_down(sp, 16) - 8;
19200 #endif
19201 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19202 * Return an always-bogus address instead so we will die with SIGSEGV.
19203 */
19204 if (onsigstack && !likely(on_sig_stack(sp)))
19205 - return (void __user *)-1L;
19206 + return (__force void __user *)-1L;
19207
19208 /* save i387 state */
19209 if (used_math() && save_i387_xstate(*fpstate) < 0)
19210 - return (void __user *)-1L;
19211 + return (__force void __user *)-1L;
19212
19213 return (void __user *)sp;
19214 }
19215 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19216 }
19217
19218 if (current->mm->context.vdso)
19219 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19220 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19221 else
19222 - restorer = &frame->retcode;
19223 + restorer = (void __user *)&frame->retcode;
19224 if (ka->sa.sa_flags & SA_RESTORER)
19225 restorer = ka->sa.sa_restorer;
19226
19227 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19228 * reasons and because gdb uses it as a signature to notice
19229 * signal handler stack frames.
19230 */
19231 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19232 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19233
19234 if (err)
19235 return -EFAULT;
19236 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19237 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19238
19239 /* Set up to return from userspace. */
19240 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19241 + if (current->mm->context.vdso)
19242 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19243 + else
19244 + restorer = (void __user *)&frame->retcode;
19245 if (ka->sa.sa_flags & SA_RESTORER)
19246 restorer = ka->sa.sa_restorer;
19247 put_user_ex(restorer, &frame->pretcode);
19248 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19249 * reasons and because gdb uses it as a signature to notice
19250 * signal handler stack frames.
19251 */
19252 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19253 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19254 } put_user_catch(err);
19255
19256 if (err)
19257 @@ -765,7 +768,7 @@ static void do_signal(struct pt_regs *regs)
19258 * X86_32: vm86 regs switched out by assembly code before reaching
19259 * here, so testing against kernel CS suffices.
19260 */
19261 - if (!user_mode(regs))
19262 + if (!user_mode_novm(regs))
19263 return;
19264
19265 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19266 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19267 index 66d250c..f1b10bd 100644
19268 --- a/arch/x86/kernel/smpboot.c
19269 +++ b/arch/x86/kernel/smpboot.c
19270 @@ -715,17 +715,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19271 set_idle_for_cpu(cpu, c_idle.idle);
19272 do_rest:
19273 per_cpu(current_task, cpu) = c_idle.idle;
19274 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19275 #ifdef CONFIG_X86_32
19276 /* Stack for startup_32 can be just as for start_secondary onwards */
19277 irq_ctx_init(cpu);
19278 #else
19279 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19280 initial_gs = per_cpu_offset(cpu);
19281 - per_cpu(kernel_stack, cpu) =
19282 - (unsigned long)task_stack_page(c_idle.idle) -
19283 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19284 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19285 #endif
19286 +
19287 + pax_open_kernel();
19288 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19289 + pax_close_kernel();
19290 +
19291 initial_code = (unsigned long)start_secondary;
19292 stack_start = c_idle.idle->thread.sp;
19293
19294 @@ -868,6 +871,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19295
19296 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19297
19298 +#ifdef CONFIG_PAX_PER_CPU_PGD
19299 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19300 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19301 + KERNEL_PGD_PTRS);
19302 +#endif
19303 +
19304 err = do_boot_cpu(apicid, cpu);
19305 if (err) {
19306 pr_debug("do_boot_cpu failed %d\n", err);
19307 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19308 index c346d11..d43b163 100644
19309 --- a/arch/x86/kernel/step.c
19310 +++ b/arch/x86/kernel/step.c
19311 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19312 struct desc_struct *desc;
19313 unsigned long base;
19314
19315 - seg &= ~7UL;
19316 + seg >>= 3;
19317
19318 mutex_lock(&child->mm->context.lock);
19319 - if (unlikely((seg >> 3) >= child->mm->context.size))
19320 + if (unlikely(seg >= child->mm->context.size))
19321 addr = -1L; /* bogus selector, access would fault */
19322 else {
19323 desc = child->mm->context.ldt + seg;
19324 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19325 addr += base;
19326 }
19327 mutex_unlock(&child->mm->context.lock);
19328 - }
19329 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19330 + addr = ktla_ktva(addr);
19331
19332 return addr;
19333 }
19334 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19335 unsigned char opcode[15];
19336 unsigned long addr = convert_ip_to_linear(child, regs);
19337
19338 + if (addr == -EINVAL)
19339 + return 0;
19340 +
19341 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19342 for (i = 0; i < copied; i++) {
19343 switch (opcode[i]) {
19344 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19345 index 0b0cb5f..db6b9ed 100644
19346 --- a/arch/x86/kernel/sys_i386_32.c
19347 +++ b/arch/x86/kernel/sys_i386_32.c
19348 @@ -24,17 +24,224 @@
19349
19350 #include <asm/syscalls.h>
19351
19352 -/*
19353 - * Do a system call from kernel instead of calling sys_execve so we
19354 - * end up with proper pt_regs.
19355 - */
19356 -int kernel_execve(const char *filename,
19357 - const char *const argv[],
19358 - const char *const envp[])
19359 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19360 {
19361 - long __res;
19362 - asm volatile ("int $0x80"
19363 - : "=a" (__res)
19364 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19365 - return __res;
19366 + unsigned long pax_task_size = TASK_SIZE;
19367 +
19368 +#ifdef CONFIG_PAX_SEGMEXEC
19369 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19370 + pax_task_size = SEGMEXEC_TASK_SIZE;
19371 +#endif
19372 +
19373 + if (len > pax_task_size || addr > pax_task_size - len)
19374 + return -EINVAL;
19375 +
19376 + return 0;
19377 +}
19378 +
19379 +unsigned long
19380 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19381 + unsigned long len, unsigned long pgoff, unsigned long flags)
19382 +{
19383 + struct mm_struct *mm = current->mm;
19384 + struct vm_area_struct *vma;
19385 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19386 +
19387 +#ifdef CONFIG_PAX_SEGMEXEC
19388 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19389 + pax_task_size = SEGMEXEC_TASK_SIZE;
19390 +#endif
19391 +
19392 + pax_task_size -= PAGE_SIZE;
19393 +
19394 + if (len > pax_task_size)
19395 + return -ENOMEM;
19396 +
19397 + if (flags & MAP_FIXED)
19398 + return addr;
19399 +
19400 +#ifdef CONFIG_PAX_RANDMMAP
19401 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19402 +#endif
19403 +
19404 + if (addr) {
19405 + addr = PAGE_ALIGN(addr);
19406 + if (pax_task_size - len >= addr) {
19407 + vma = find_vma(mm, addr);
19408 + if (check_heap_stack_gap(vma, addr, len))
19409 + return addr;
19410 + }
19411 + }
19412 + if (len > mm->cached_hole_size) {
19413 + start_addr = addr = mm->free_area_cache;
19414 + } else {
19415 + start_addr = addr = mm->mmap_base;
19416 + mm->cached_hole_size = 0;
19417 + }
19418 +
19419 +#ifdef CONFIG_PAX_PAGEEXEC
19420 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19421 + start_addr = 0x00110000UL;
19422 +
19423 +#ifdef CONFIG_PAX_RANDMMAP
19424 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19425 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19426 +#endif
19427 +
19428 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19429 + start_addr = addr = mm->mmap_base;
19430 + else
19431 + addr = start_addr;
19432 + }
19433 +#endif
19434 +
19435 +full_search:
19436 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19437 + /* At this point: (!vma || addr < vma->vm_end). */
19438 + if (pax_task_size - len < addr) {
19439 + /*
19440 + * Start a new search - just in case we missed
19441 + * some holes.
19442 + */
19443 + if (start_addr != mm->mmap_base) {
19444 + start_addr = addr = mm->mmap_base;
19445 + mm->cached_hole_size = 0;
19446 + goto full_search;
19447 + }
19448 + return -ENOMEM;
19449 + }
19450 + if (check_heap_stack_gap(vma, addr, len))
19451 + break;
19452 + if (addr + mm->cached_hole_size < vma->vm_start)
19453 + mm->cached_hole_size = vma->vm_start - addr;
19454 + addr = vma->vm_end;
19455 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19456 + start_addr = addr = mm->mmap_base;
19457 + mm->cached_hole_size = 0;
19458 + goto full_search;
19459 + }
19460 + }
19461 +
19462 + /*
19463 + * Remember the place where we stopped the search:
19464 + */
19465 + mm->free_area_cache = addr + len;
19466 + return addr;
19467 +}
19468 +
19469 +unsigned long
19470 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19471 + const unsigned long len, const unsigned long pgoff,
19472 + const unsigned long flags)
19473 +{
19474 + struct vm_area_struct *vma;
19475 + struct mm_struct *mm = current->mm;
19476 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19477 +
19478 +#ifdef CONFIG_PAX_SEGMEXEC
19479 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19480 + pax_task_size = SEGMEXEC_TASK_SIZE;
19481 +#endif
19482 +
19483 + pax_task_size -= PAGE_SIZE;
19484 +
19485 + /* requested length too big for entire address space */
19486 + if (len > pax_task_size)
19487 + return -ENOMEM;
19488 +
19489 + if (flags & MAP_FIXED)
19490 + return addr;
19491 +
19492 +#ifdef CONFIG_PAX_PAGEEXEC
19493 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19494 + goto bottomup;
19495 +#endif
19496 +
19497 +#ifdef CONFIG_PAX_RANDMMAP
19498 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19499 +#endif
19500 +
19501 + /* requesting a specific address */
19502 + if (addr) {
19503 + addr = PAGE_ALIGN(addr);
19504 + if (pax_task_size - len >= addr) {
19505 + vma = find_vma(mm, addr);
19506 + if (check_heap_stack_gap(vma, addr, len))
19507 + return addr;
19508 + }
19509 + }
19510 +
19511 + /* check if free_area_cache is useful for us */
19512 + if (len <= mm->cached_hole_size) {
19513 + mm->cached_hole_size = 0;
19514 + mm->free_area_cache = mm->mmap_base;
19515 + }
19516 +
19517 + /* either no address requested or can't fit in requested address hole */
19518 + addr = mm->free_area_cache;
19519 +
19520 + /* make sure it can fit in the remaining address space */
19521 + if (addr > len) {
19522 + vma = find_vma(mm, addr-len);
19523 + if (check_heap_stack_gap(vma, addr - len, len))
19524 + /* remember the address as a hint for next time */
19525 + return (mm->free_area_cache = addr-len);
19526 + }
19527 +
19528 + if (mm->mmap_base < len)
19529 + goto bottomup;
19530 +
19531 + addr = mm->mmap_base-len;
19532 +
19533 + do {
19534 + /*
19535 + * Lookup failure means no vma is above this address,
19536 + * else if new region fits below vma->vm_start,
19537 + * return with success:
19538 + */
19539 + vma = find_vma(mm, addr);
19540 + if (check_heap_stack_gap(vma, addr, len))
19541 + /* remember the address as a hint for next time */
19542 + return (mm->free_area_cache = addr);
19543 +
19544 + /* remember the largest hole we saw so far */
19545 + if (addr + mm->cached_hole_size < vma->vm_start)
19546 + mm->cached_hole_size = vma->vm_start - addr;
19547 +
19548 + /* try just below the current vma->vm_start */
19549 + addr = skip_heap_stack_gap(vma, len);
19550 + } while (!IS_ERR_VALUE(addr));
19551 +
19552 +bottomup:
19553 + /*
19554 + * A failed mmap() very likely causes application failure,
19555 + * so fall back to the bottom-up function here. This scenario
19556 + * can happen with large stack limits and large mmap()
19557 + * allocations.
19558 + */
19559 +
19560 +#ifdef CONFIG_PAX_SEGMEXEC
19561 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19562 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19563 + else
19564 +#endif
19565 +
19566 + mm->mmap_base = TASK_UNMAPPED_BASE;
19567 +
19568 +#ifdef CONFIG_PAX_RANDMMAP
19569 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19570 + mm->mmap_base += mm->delta_mmap;
19571 +#endif
19572 +
19573 + mm->free_area_cache = mm->mmap_base;
19574 + mm->cached_hole_size = ~0UL;
19575 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19576 + /*
19577 + * Restore the topdown base:
19578 + */
19579 + mm->mmap_base = base;
19580 + mm->free_area_cache = base;
19581 + mm->cached_hole_size = ~0UL;
19582 +
19583 + return addr;
19584 }
19585 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19586 index 0514890..3dbebce 100644
19587 --- a/arch/x86/kernel/sys_x86_64.c
19588 +++ b/arch/x86/kernel/sys_x86_64.c
19589 @@ -95,8 +95,8 @@ out:
19590 return error;
19591 }
19592
19593 -static void find_start_end(unsigned long flags, unsigned long *begin,
19594 - unsigned long *end)
19595 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19596 + unsigned long *begin, unsigned long *end)
19597 {
19598 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19599 unsigned long new_begin;
19600 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19601 *begin = new_begin;
19602 }
19603 } else {
19604 - *begin = TASK_UNMAPPED_BASE;
19605 + *begin = mm->mmap_base;
19606 *end = TASK_SIZE;
19607 }
19608 }
19609 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19610 if (flags & MAP_FIXED)
19611 return addr;
19612
19613 - find_start_end(flags, &begin, &end);
19614 + find_start_end(mm, flags, &begin, &end);
19615
19616 if (len > end)
19617 return -ENOMEM;
19618
19619 +#ifdef CONFIG_PAX_RANDMMAP
19620 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19621 +#endif
19622 +
19623 if (addr) {
19624 addr = PAGE_ALIGN(addr);
19625 vma = find_vma(mm, addr);
19626 - if (end - len >= addr &&
19627 - (!vma || addr + len <= vma->vm_start))
19628 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19629 return addr;
19630 }
19631 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19632 @@ -172,7 +175,7 @@ full_search:
19633 }
19634 return -ENOMEM;
19635 }
19636 - if (!vma || addr + len <= vma->vm_start) {
19637 + if (check_heap_stack_gap(vma, addr, len)) {
19638 /*
19639 * Remember the place where we stopped the search:
19640 */
19641 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19642 {
19643 struct vm_area_struct *vma;
19644 struct mm_struct *mm = current->mm;
19645 - unsigned long addr = addr0;
19646 + unsigned long base = mm->mmap_base, addr = addr0;
19647
19648 /* requested length too big for entire address space */
19649 if (len > TASK_SIZE)
19650 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19651 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19652 goto bottomup;
19653
19654 +#ifdef CONFIG_PAX_RANDMMAP
19655 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19656 +#endif
19657 +
19658 /* requesting a specific address */
19659 if (addr) {
19660 addr = PAGE_ALIGN(addr);
19661 - vma = find_vma(mm, addr);
19662 - if (TASK_SIZE - len >= addr &&
19663 - (!vma || addr + len <= vma->vm_start))
19664 - return addr;
19665 + if (TASK_SIZE - len >= addr) {
19666 + vma = find_vma(mm, addr);
19667 + if (check_heap_stack_gap(vma, addr, len))
19668 + return addr;
19669 + }
19670 }
19671
19672 /* check if free_area_cache is useful for us */
19673 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19674 ALIGN_TOPDOWN);
19675
19676 vma = find_vma(mm, tmp_addr);
19677 - if (!vma || tmp_addr + len <= vma->vm_start)
19678 + if (check_heap_stack_gap(vma, tmp_addr, len))
19679 /* remember the address as a hint for next time */
19680 return mm->free_area_cache = tmp_addr;
19681 }
19682 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19683 * return with success:
19684 */
19685 vma = find_vma(mm, addr);
19686 - if (!vma || addr+len <= vma->vm_start)
19687 + if (check_heap_stack_gap(vma, addr, len))
19688 /* remember the address as a hint for next time */
19689 return mm->free_area_cache = addr;
19690
19691 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19692 mm->cached_hole_size = vma->vm_start - addr;
19693
19694 /* try just below the current vma->vm_start */
19695 - addr = vma->vm_start-len;
19696 - } while (len < vma->vm_start);
19697 + addr = skip_heap_stack_gap(vma, len);
19698 + } while (!IS_ERR_VALUE(addr));
19699
19700 bottomup:
19701 /*
19702 @@ -270,13 +278,21 @@ bottomup:
19703 * can happen with large stack limits and large mmap()
19704 * allocations.
19705 */
19706 + mm->mmap_base = TASK_UNMAPPED_BASE;
19707 +
19708 +#ifdef CONFIG_PAX_RANDMMAP
19709 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19710 + mm->mmap_base += mm->delta_mmap;
19711 +#endif
19712 +
19713 + mm->free_area_cache = mm->mmap_base;
19714 mm->cached_hole_size = ~0UL;
19715 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19716 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19717 /*
19718 * Restore the topdown base:
19719 */
19720 - mm->free_area_cache = mm->mmap_base;
19721 + mm->mmap_base = base;
19722 + mm->free_area_cache = base;
19723 mm->cached_hole_size = ~0UL;
19724
19725 return addr;
19726 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19727 index e2410e2..4fe3fbc 100644
19728 --- a/arch/x86/kernel/tboot.c
19729 +++ b/arch/x86/kernel/tboot.c
19730 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19731
19732 void tboot_shutdown(u32 shutdown_type)
19733 {
19734 - void (*shutdown)(void);
19735 + void (* __noreturn shutdown)(void);
19736
19737 if (!tboot_enabled())
19738 return;
19739 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19740
19741 switch_to_tboot_pt();
19742
19743 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19744 + shutdown = (void *)tboot->shutdown_entry;
19745 shutdown();
19746
19747 /* should not reach here */
19748 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19749 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19750 }
19751
19752 -static atomic_t ap_wfs_count;
19753 +static atomic_unchecked_t ap_wfs_count;
19754
19755 static int tboot_wait_for_aps(int num_aps)
19756 {
19757 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19758 {
19759 switch (action) {
19760 case CPU_DYING:
19761 - atomic_inc(&ap_wfs_count);
19762 + atomic_inc_unchecked(&ap_wfs_count);
19763 if (num_online_cpus() == 1)
19764 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19765 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19766 return NOTIFY_BAD;
19767 break;
19768 }
19769 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
19770
19771 tboot_create_trampoline();
19772
19773 - atomic_set(&ap_wfs_count, 0);
19774 + atomic_set_unchecked(&ap_wfs_count, 0);
19775 register_hotcpu_notifier(&tboot_cpu_notifier);
19776 return 0;
19777 }
19778 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19779 index dd5fbf4..b7f2232 100644
19780 --- a/arch/x86/kernel/time.c
19781 +++ b/arch/x86/kernel/time.c
19782 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19783 {
19784 unsigned long pc = instruction_pointer(regs);
19785
19786 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19787 + if (!user_mode(regs) && in_lock_functions(pc)) {
19788 #ifdef CONFIG_FRAME_POINTER
19789 - return *(unsigned long *)(regs->bp + sizeof(long));
19790 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19791 #else
19792 unsigned long *sp =
19793 (unsigned long *)kernel_stack_pointer(regs);
19794 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19795 * or above a saved flags. Eflags has bits 22-31 zero,
19796 * kernel addresses don't.
19797 */
19798 +
19799 +#ifdef CONFIG_PAX_KERNEXEC
19800 + return ktla_ktva(sp[0]);
19801 +#else
19802 if (sp[0] >> 22)
19803 return sp[0];
19804 if (sp[1] >> 22)
19805 return sp[1];
19806 #endif
19807 +
19808 +#endif
19809 }
19810 return pc;
19811 }
19812 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19813 index bcfec2d..8f88b4a 100644
19814 --- a/arch/x86/kernel/tls.c
19815 +++ b/arch/x86/kernel/tls.c
19816 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19817 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19818 return -EINVAL;
19819
19820 +#ifdef CONFIG_PAX_SEGMEXEC
19821 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19822 + return -EINVAL;
19823 +#endif
19824 +
19825 set_tls_desc(p, idx, &info, 1);
19826
19827 return 0;
19828 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19829 index 451c0a7..e57f551 100644
19830 --- a/arch/x86/kernel/trampoline_32.S
19831 +++ b/arch/x86/kernel/trampoline_32.S
19832 @@ -32,6 +32,12 @@
19833 #include <asm/segment.h>
19834 #include <asm/page_types.h>
19835
19836 +#ifdef CONFIG_PAX_KERNEXEC
19837 +#define ta(X) (X)
19838 +#else
19839 +#define ta(X) ((X) - __PAGE_OFFSET)
19840 +#endif
19841 +
19842 #ifdef CONFIG_SMP
19843
19844 .section ".x86_trampoline","a"
19845 @@ -62,7 +68,7 @@ r_base = .
19846 inc %ax # protected mode (PE) bit
19847 lmsw %ax # into protected mode
19848 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19849 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19850 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19851
19852 # These need to be in the same 64K segment as the above;
19853 # hence we don't use the boot_gdt_descr defined in head.S
19854 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19855 index 09ff517..df19fbff 100644
19856 --- a/arch/x86/kernel/trampoline_64.S
19857 +++ b/arch/x86/kernel/trampoline_64.S
19858 @@ -90,7 +90,7 @@ startup_32:
19859 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19860 movl %eax, %ds
19861
19862 - movl $X86_CR4_PAE, %eax
19863 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19864 movl %eax, %cr4 # Enable PAE mode
19865
19866 # Setup trampoline 4 level pagetables
19867 @@ -138,7 +138,7 @@ tidt:
19868 # so the kernel can live anywhere
19869 .balign 4
19870 tgdt:
19871 - .short tgdt_end - tgdt # gdt limit
19872 + .short tgdt_end - tgdt - 1 # gdt limit
19873 .long tgdt - r_base
19874 .short 0
19875 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19876 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19877 index 4bbe04d..41d0943 100644
19878 --- a/arch/x86/kernel/traps.c
19879 +++ b/arch/x86/kernel/traps.c
19880 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19881
19882 /* Do we ignore FPU interrupts ? */
19883 char ignore_fpu_irq;
19884 -
19885 -/*
19886 - * The IDT has to be page-aligned to simplify the Pentium
19887 - * F0 0F bug workaround.
19888 - */
19889 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19890 #endif
19891
19892 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19893 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19894 }
19895
19896 static void __kprobes
19897 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19898 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19899 long error_code, siginfo_t *info)
19900 {
19901 struct task_struct *tsk = current;
19902
19903 #ifdef CONFIG_X86_32
19904 - if (regs->flags & X86_VM_MASK) {
19905 + if (v8086_mode(regs)) {
19906 /*
19907 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19908 * On nmi (interrupt 2), do_trap should not be called.
19909 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19910 }
19911 #endif
19912
19913 - if (!user_mode(regs))
19914 + if (!user_mode_novm(regs))
19915 goto kernel_trap;
19916
19917 #ifdef CONFIG_X86_32
19918 @@ -148,7 +142,7 @@ trap_signal:
19919 printk_ratelimit()) {
19920 printk(KERN_INFO
19921 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19922 - tsk->comm, tsk->pid, str,
19923 + tsk->comm, task_pid_nr(tsk), str,
19924 regs->ip, regs->sp, error_code);
19925 print_vma_addr(" in ", regs->ip);
19926 printk("\n");
19927 @@ -165,8 +159,20 @@ kernel_trap:
19928 if (!fixup_exception(regs)) {
19929 tsk->thread.error_code = error_code;
19930 tsk->thread.trap_no = trapnr;
19931 +
19932 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19933 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19934 + str = "PAX: suspicious stack segment fault";
19935 +#endif
19936 +
19937 die(str, regs, error_code);
19938 }
19939 +
19940 +#ifdef CONFIG_PAX_REFCOUNT
19941 + if (trapnr == 4)
19942 + pax_report_refcount_overflow(regs);
19943 +#endif
19944 +
19945 return;
19946
19947 #ifdef CONFIG_X86_32
19948 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19949 conditional_sti(regs);
19950
19951 #ifdef CONFIG_X86_32
19952 - if (regs->flags & X86_VM_MASK)
19953 + if (v8086_mode(regs))
19954 goto gp_in_vm86;
19955 #endif
19956
19957 tsk = current;
19958 - if (!user_mode(regs))
19959 + if (!user_mode_novm(regs))
19960 goto gp_in_kernel;
19961
19962 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19963 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19964 + struct mm_struct *mm = tsk->mm;
19965 + unsigned long limit;
19966 +
19967 + down_write(&mm->mmap_sem);
19968 + limit = mm->context.user_cs_limit;
19969 + if (limit < TASK_SIZE) {
19970 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19971 + up_write(&mm->mmap_sem);
19972 + return;
19973 + }
19974 + up_write(&mm->mmap_sem);
19975 + }
19976 +#endif
19977 +
19978 tsk->thread.error_code = error_code;
19979 tsk->thread.trap_no = 13;
19980
19981 @@ -295,6 +317,13 @@ gp_in_kernel:
19982 if (notify_die(DIE_GPF, "general protection fault", regs,
19983 error_code, 13, SIGSEGV) == NOTIFY_STOP)
19984 return;
19985 +
19986 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19987 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19988 + die("PAX: suspicious general protection fault", regs, error_code);
19989 + else
19990 +#endif
19991 +
19992 die("general protection fault", regs, error_code);
19993 }
19994
19995 @@ -421,7 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19996 /* It's safe to allow irq's after DR6 has been saved */
19997 preempt_conditional_sti(regs);
19998
19999 - if (regs->flags & X86_VM_MASK) {
20000 + if (v8086_mode(regs)) {
20001 handle_vm86_trap((struct kernel_vm86_regs *) regs,
20002 error_code, 1);
20003 preempt_conditional_cli(regs);
20004 @@ -436,7 +465,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20005 * We already checked v86 mode above, so we can check for kernel mode
20006 * by just checking the CPL of CS.
20007 */
20008 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
20009 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
20010 tsk->thread.debugreg6 &= ~DR_STEP;
20011 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
20012 regs->flags &= ~X86_EFLAGS_TF;
20013 @@ -466,7 +495,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
20014 return;
20015 conditional_sti(regs);
20016
20017 - if (!user_mode_vm(regs))
20018 + if (!user_mode(regs))
20019 {
20020 if (!fixup_exception(regs)) {
20021 task->thread.error_code = error_code;
20022 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20023 index b9242ba..50c5edd 100644
20024 --- a/arch/x86/kernel/verify_cpu.S
20025 +++ b/arch/x86/kernel/verify_cpu.S
20026 @@ -20,6 +20,7 @@
20027 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20028 * arch/x86/kernel/trampoline_64.S: secondary processor verification
20029 * arch/x86/kernel/head_32.S: processor startup
20030 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20031 *
20032 * verify_cpu, returns the status of longmode and SSE in register %eax.
20033 * 0: Success 1: Failure
20034 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20035 index 328cb37..56556b4 100644
20036 --- a/arch/x86/kernel/vm86_32.c
20037 +++ b/arch/x86/kernel/vm86_32.c
20038 @@ -41,6 +41,7 @@
20039 #include <linux/ptrace.h>
20040 #include <linux/audit.h>
20041 #include <linux/stddef.h>
20042 +#include <linux/grsecurity.h>
20043
20044 #include <asm/uaccess.h>
20045 #include <asm/io.h>
20046 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20047 do_exit(SIGSEGV);
20048 }
20049
20050 - tss = &per_cpu(init_tss, get_cpu());
20051 + tss = init_tss + get_cpu();
20052 current->thread.sp0 = current->thread.saved_sp0;
20053 current->thread.sysenter_cs = __KERNEL_CS;
20054 load_sp0(tss, &current->thread);
20055 @@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
20056 struct task_struct *tsk;
20057 int tmp, ret = -EPERM;
20058
20059 +#ifdef CONFIG_GRKERNSEC_VM86
20060 + if (!capable(CAP_SYS_RAWIO)) {
20061 + gr_handle_vm86();
20062 + goto out;
20063 + }
20064 +#endif
20065 +
20066 tsk = current;
20067 if (tsk->thread.saved_sp0)
20068 goto out;
20069 @@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
20070 int tmp, ret;
20071 struct vm86plus_struct __user *v86;
20072
20073 +#ifdef CONFIG_GRKERNSEC_VM86
20074 + if (!capable(CAP_SYS_RAWIO)) {
20075 + gr_handle_vm86();
20076 + ret = -EPERM;
20077 + goto out;
20078 + }
20079 +#endif
20080 +
20081 tsk = current;
20082 switch (cmd) {
20083 case VM86_REQUEST_IRQ:
20084 @@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20085 tsk->thread.saved_fs = info->regs32->fs;
20086 tsk->thread.saved_gs = get_user_gs(info->regs32);
20087
20088 - tss = &per_cpu(init_tss, get_cpu());
20089 + tss = init_tss + get_cpu();
20090 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20091 if (cpu_has_sep)
20092 tsk->thread.sysenter_cs = 0;
20093 @@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20094 goto cannot_handle;
20095 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20096 goto cannot_handle;
20097 - intr_ptr = (unsigned long __user *) (i << 2);
20098 + intr_ptr = (__force unsigned long __user *) (i << 2);
20099 if (get_user(segoffs, intr_ptr))
20100 goto cannot_handle;
20101 if ((segoffs >> 16) == BIOSSEG)
20102 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20103 index 0f703f1..9e15f64 100644
20104 --- a/arch/x86/kernel/vmlinux.lds.S
20105 +++ b/arch/x86/kernel/vmlinux.lds.S
20106 @@ -26,6 +26,13 @@
20107 #include <asm/page_types.h>
20108 #include <asm/cache.h>
20109 #include <asm/boot.h>
20110 +#include <asm/segment.h>
20111 +
20112 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20113 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20114 +#else
20115 +#define __KERNEL_TEXT_OFFSET 0
20116 +#endif
20117
20118 #undef i386 /* in case the preprocessor is a 32bit one */
20119
20120 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
20121
20122 PHDRS {
20123 text PT_LOAD FLAGS(5); /* R_E */
20124 +#ifdef CONFIG_X86_32
20125 + module PT_LOAD FLAGS(5); /* R_E */
20126 +#endif
20127 +#ifdef CONFIG_XEN
20128 + rodata PT_LOAD FLAGS(5); /* R_E */
20129 +#else
20130 + rodata PT_LOAD FLAGS(4); /* R__ */
20131 +#endif
20132 data PT_LOAD FLAGS(6); /* RW_ */
20133 -#ifdef CONFIG_X86_64
20134 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20135 #ifdef CONFIG_SMP
20136 percpu PT_LOAD FLAGS(6); /* RW_ */
20137 #endif
20138 + text.init PT_LOAD FLAGS(5); /* R_E */
20139 + text.exit PT_LOAD FLAGS(5); /* R_E */
20140 init PT_LOAD FLAGS(7); /* RWE */
20141 -#endif
20142 note PT_NOTE FLAGS(0); /* ___ */
20143 }
20144
20145 SECTIONS
20146 {
20147 #ifdef CONFIG_X86_32
20148 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20149 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20150 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20151 #else
20152 - . = __START_KERNEL;
20153 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20154 + . = __START_KERNEL;
20155 #endif
20156
20157 /* Text and read-only data */
20158 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20159 - _text = .;
20160 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20161 /* bootstrapping code */
20162 +#ifdef CONFIG_X86_32
20163 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20164 +#else
20165 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20166 +#endif
20167 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20168 + _text = .;
20169 HEAD_TEXT
20170 #ifdef CONFIG_X86_32
20171 . = ALIGN(PAGE_SIZE);
20172 @@ -108,13 +128,47 @@ SECTIONS
20173 IRQENTRY_TEXT
20174 *(.fixup)
20175 *(.gnu.warning)
20176 - /* End of text section */
20177 - _etext = .;
20178 } :text = 0x9090
20179
20180 - NOTES :text :note
20181 + . += __KERNEL_TEXT_OFFSET;
20182
20183 - EXCEPTION_TABLE(16) :text = 0x9090
20184 +#ifdef CONFIG_X86_32
20185 + . = ALIGN(PAGE_SIZE);
20186 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20187 +
20188 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20189 + MODULES_EXEC_VADDR = .;
20190 + BYTE(0)
20191 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20192 + . = ALIGN(HPAGE_SIZE);
20193 + MODULES_EXEC_END = . - 1;
20194 +#endif
20195 +
20196 + } :module
20197 +#endif
20198 +
20199 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20200 + /* End of text section */
20201 + _etext = . - __KERNEL_TEXT_OFFSET;
20202 + }
20203 +
20204 +#ifdef CONFIG_X86_32
20205 + . = ALIGN(PAGE_SIZE);
20206 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20207 + *(.idt)
20208 + . = ALIGN(PAGE_SIZE);
20209 + *(.empty_zero_page)
20210 + *(.initial_pg_fixmap)
20211 + *(.initial_pg_pmd)
20212 + *(.initial_page_table)
20213 + *(.swapper_pg_dir)
20214 + } :rodata
20215 +#endif
20216 +
20217 + . = ALIGN(PAGE_SIZE);
20218 + NOTES :rodata :note
20219 +
20220 + EXCEPTION_TABLE(16) :rodata
20221
20222 #if defined(CONFIG_DEBUG_RODATA)
20223 /* .text should occupy whole number of pages */
20224 @@ -126,16 +180,20 @@ SECTIONS
20225
20226 /* Data */
20227 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20228 +
20229 +#ifdef CONFIG_PAX_KERNEXEC
20230 + . = ALIGN(HPAGE_SIZE);
20231 +#else
20232 + . = ALIGN(PAGE_SIZE);
20233 +#endif
20234 +
20235 /* Start of data section */
20236 _sdata = .;
20237
20238 /* init_task */
20239 INIT_TASK_DATA(THREAD_SIZE)
20240
20241 -#ifdef CONFIG_X86_32
20242 - /* 32 bit has nosave before _edata */
20243 NOSAVE_DATA
20244 -#endif
20245
20246 PAGE_ALIGNED_DATA(PAGE_SIZE)
20247
20248 @@ -176,12 +234,19 @@ SECTIONS
20249 #endif /* CONFIG_X86_64 */
20250
20251 /* Init code and data - will be freed after init */
20252 - . = ALIGN(PAGE_SIZE);
20253 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20254 + BYTE(0)
20255 +
20256 +#ifdef CONFIG_PAX_KERNEXEC
20257 + . = ALIGN(HPAGE_SIZE);
20258 +#else
20259 + . = ALIGN(PAGE_SIZE);
20260 +#endif
20261 +
20262 __init_begin = .; /* paired with __init_end */
20263 - }
20264 + } :init.begin
20265
20266 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20267 +#ifdef CONFIG_SMP
20268 /*
20269 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20270 * output PHDR, so the next output section - .init.text - should
20271 @@ -190,12 +255,27 @@ SECTIONS
20272 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20273 #endif
20274
20275 - INIT_TEXT_SECTION(PAGE_SIZE)
20276 -#ifdef CONFIG_X86_64
20277 - :init
20278 -#endif
20279 + . = ALIGN(PAGE_SIZE);
20280 + init_begin = .;
20281 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20282 + VMLINUX_SYMBOL(_sinittext) = .;
20283 + INIT_TEXT
20284 + VMLINUX_SYMBOL(_einittext) = .;
20285 + . = ALIGN(PAGE_SIZE);
20286 + } :text.init
20287
20288 - INIT_DATA_SECTION(16)
20289 + /*
20290 + * .exit.text is discard at runtime, not link time, to deal with
20291 + * references from .altinstructions and .eh_frame
20292 + */
20293 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20294 + EXIT_TEXT
20295 + . = ALIGN(16);
20296 + } :text.exit
20297 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20298 +
20299 + . = ALIGN(PAGE_SIZE);
20300 + INIT_DATA_SECTION(16) :init
20301
20302 /*
20303 * Code and data for a variety of lowlevel trampolines, to be
20304 @@ -269,19 +349,12 @@ SECTIONS
20305 }
20306
20307 . = ALIGN(8);
20308 - /*
20309 - * .exit.text is discard at runtime, not link time, to deal with
20310 - * references from .altinstructions and .eh_frame
20311 - */
20312 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20313 - EXIT_TEXT
20314 - }
20315
20316 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20317 EXIT_DATA
20318 }
20319
20320 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20321 +#ifndef CONFIG_SMP
20322 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20323 #endif
20324
20325 @@ -300,16 +373,10 @@ SECTIONS
20326 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20327 __smp_locks = .;
20328 *(.smp_locks)
20329 - . = ALIGN(PAGE_SIZE);
20330 __smp_locks_end = .;
20331 + . = ALIGN(PAGE_SIZE);
20332 }
20333
20334 -#ifdef CONFIG_X86_64
20335 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20336 - NOSAVE_DATA
20337 - }
20338 -#endif
20339 -
20340 /* BSS */
20341 . = ALIGN(PAGE_SIZE);
20342 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20343 @@ -325,6 +392,7 @@ SECTIONS
20344 __brk_base = .;
20345 . += 64 * 1024; /* 64k alignment slop space */
20346 *(.brk_reservation) /* areas brk users have reserved */
20347 + . = ALIGN(HPAGE_SIZE);
20348 __brk_limit = .;
20349 }
20350
20351 @@ -351,13 +419,12 @@ SECTIONS
20352 * for the boot processor.
20353 */
20354 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20355 -INIT_PER_CPU(gdt_page);
20356 INIT_PER_CPU(irq_stack_union);
20357
20358 /*
20359 * Build-time check on the image size:
20360 */
20361 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20362 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20363 "kernel image bigger than KERNEL_IMAGE_SIZE");
20364
20365 #ifdef CONFIG_SMP
20366 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20367 index b07ba93..a212969 100644
20368 --- a/arch/x86/kernel/vsyscall_64.c
20369 +++ b/arch/x86/kernel/vsyscall_64.c
20370 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
20371 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
20372 };
20373
20374 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20375 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20376
20377 static int __init vsyscall_setup(char *str)
20378 {
20379 if (str) {
20380 if (!strcmp("emulate", str))
20381 vsyscall_mode = EMULATE;
20382 - else if (!strcmp("native", str))
20383 - vsyscall_mode = NATIVE;
20384 else if (!strcmp("none", str))
20385 vsyscall_mode = NONE;
20386 else
20387 @@ -207,7 +205,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20388
20389 tsk = current;
20390 if (seccomp_mode(&tsk->seccomp))
20391 - do_exit(SIGKILL);
20392 + do_group_exit(SIGKILL);
20393
20394 /*
20395 * With a real vsyscall, page faults cause SIGSEGV. We want to
20396 @@ -279,8 +277,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20397 return true;
20398
20399 sigsegv:
20400 - force_sig(SIGSEGV, current);
20401 - return true;
20402 + do_group_exit(SIGKILL);
20403 }
20404
20405 /*
20406 @@ -333,10 +330,7 @@ void __init map_vsyscall(void)
20407 extern char __vvar_page;
20408 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20409
20410 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20411 - vsyscall_mode == NATIVE
20412 - ? PAGE_KERNEL_VSYSCALL
20413 - : PAGE_KERNEL_VVAR);
20414 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20415 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20416 (unsigned long)VSYSCALL_START);
20417
20418 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20419 index 9796c2f..f686fbf 100644
20420 --- a/arch/x86/kernel/x8664_ksyms_64.c
20421 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20422 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20423 EXPORT_SYMBOL(copy_user_generic_string);
20424 EXPORT_SYMBOL(copy_user_generic_unrolled);
20425 EXPORT_SYMBOL(__copy_user_nocache);
20426 -EXPORT_SYMBOL(_copy_from_user);
20427 -EXPORT_SYMBOL(_copy_to_user);
20428
20429 EXPORT_SYMBOL(copy_page);
20430 EXPORT_SYMBOL(clear_page);
20431 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20432 index 7110911..e8cdee5 100644
20433 --- a/arch/x86/kernel/xsave.c
20434 +++ b/arch/x86/kernel/xsave.c
20435 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20436 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20437 return -EINVAL;
20438
20439 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20440 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20441 fx_sw_user->extended_size -
20442 FP_XSTATE_MAGIC2_SIZE));
20443 if (err)
20444 @@ -266,7 +266,7 @@ fx_only:
20445 * the other extended state.
20446 */
20447 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20448 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20449 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20450 }
20451
20452 /*
20453 @@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
20454 if (use_xsave())
20455 err = restore_user_xstate(buf);
20456 else
20457 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
20458 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20459 buf);
20460 if (unlikely(err)) {
20461 /*
20462 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20463 index 89b02bf..0f6511d 100644
20464 --- a/arch/x86/kvm/cpuid.c
20465 +++ b/arch/x86/kvm/cpuid.c
20466 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20467 struct kvm_cpuid2 *cpuid,
20468 struct kvm_cpuid_entry2 __user *entries)
20469 {
20470 - int r;
20471 + int r, i;
20472
20473 r = -E2BIG;
20474 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20475 goto out;
20476 r = -EFAULT;
20477 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20478 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20479 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20480 goto out;
20481 + for (i = 0; i < cpuid->nent; ++i) {
20482 + struct kvm_cpuid_entry2 cpuid_entry;
20483 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20484 + goto out;
20485 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20486 + }
20487 vcpu->arch.cpuid_nent = cpuid->nent;
20488 kvm_apic_set_version(vcpu);
20489 kvm_x86_ops->cpuid_update(vcpu);
20490 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20491 struct kvm_cpuid2 *cpuid,
20492 struct kvm_cpuid_entry2 __user *entries)
20493 {
20494 - int r;
20495 + int r, i;
20496
20497 r = -E2BIG;
20498 if (cpuid->nent < vcpu->arch.cpuid_nent)
20499 goto out;
20500 r = -EFAULT;
20501 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20502 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20503 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20504 goto out;
20505 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20506 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20507 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20508 + goto out;
20509 + }
20510 return 0;
20511
20512 out:
20513 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20514 index 0982507..7f6d72f 100644
20515 --- a/arch/x86/kvm/emulate.c
20516 +++ b/arch/x86/kvm/emulate.c
20517 @@ -250,6 +250,7 @@ struct gprefix {
20518
20519 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20520 do { \
20521 + unsigned long _tmp; \
20522 __asm__ __volatile__ ( \
20523 _PRE_EFLAGS("0", "4", "2") \
20524 _op _suffix " %"_x"3,%1; " \
20525 @@ -264,8 +265,6 @@ struct gprefix {
20526 /* Raw emulation: instruction has two explicit operands. */
20527 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20528 do { \
20529 - unsigned long _tmp; \
20530 - \
20531 switch ((ctxt)->dst.bytes) { \
20532 case 2: \
20533 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20534 @@ -281,7 +280,6 @@ struct gprefix {
20535
20536 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20537 do { \
20538 - unsigned long _tmp; \
20539 switch ((ctxt)->dst.bytes) { \
20540 case 1: \
20541 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20542 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20543 index cfdc6e0..ab92e84 100644
20544 --- a/arch/x86/kvm/lapic.c
20545 +++ b/arch/x86/kvm/lapic.c
20546 @@ -54,7 +54,7 @@
20547 #define APIC_BUS_CYCLE_NS 1
20548
20549 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20550 -#define apic_debug(fmt, arg...)
20551 +#define apic_debug(fmt, arg...) do {} while (0)
20552
20553 #define APIC_LVT_NUM 6
20554 /* 14 is the version for Xeon and Pentium 8.4.8*/
20555 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20556 index 1561028..0ed7f14 100644
20557 --- a/arch/x86/kvm/paging_tmpl.h
20558 +++ b/arch/x86/kvm/paging_tmpl.h
20559 @@ -197,7 +197,7 @@ retry_walk:
20560 if (unlikely(kvm_is_error_hva(host_addr)))
20561 goto error;
20562
20563 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20564 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20565 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20566 goto error;
20567
20568 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20569 index e385214..f8df033 100644
20570 --- a/arch/x86/kvm/svm.c
20571 +++ b/arch/x86/kvm/svm.c
20572 @@ -3420,7 +3420,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20573 int cpu = raw_smp_processor_id();
20574
20575 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20576 +
20577 + pax_open_kernel();
20578 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20579 + pax_close_kernel();
20580 +
20581 load_TR_desc();
20582 }
20583
20584 @@ -3798,6 +3802,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20585 #endif
20586 #endif
20587
20588 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20589 + __set_fs(current_thread_info()->addr_limit);
20590 +#endif
20591 +
20592 reload_tss(vcpu);
20593
20594 local_irq_disable();
20595 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20596 index a7a6f60..04b745a 100644
20597 --- a/arch/x86/kvm/vmx.c
20598 +++ b/arch/x86/kvm/vmx.c
20599 @@ -1306,7 +1306,11 @@ static void reload_tss(void)
20600 struct desc_struct *descs;
20601
20602 descs = (void *)gdt->address;
20603 +
20604 + pax_open_kernel();
20605 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20606 + pax_close_kernel();
20607 +
20608 load_TR_desc();
20609 }
20610
20611 @@ -2637,8 +2641,11 @@ static __init int hardware_setup(void)
20612 if (!cpu_has_vmx_flexpriority())
20613 flexpriority_enabled = 0;
20614
20615 - if (!cpu_has_vmx_tpr_shadow())
20616 - kvm_x86_ops->update_cr8_intercept = NULL;
20617 + if (!cpu_has_vmx_tpr_shadow()) {
20618 + pax_open_kernel();
20619 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20620 + pax_close_kernel();
20621 + }
20622
20623 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20624 kvm_disable_largepages();
20625 @@ -3654,7 +3661,7 @@ static void vmx_set_constant_host_state(void)
20626 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20627
20628 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20629 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20630 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20631
20632 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20633 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20634 @@ -6192,6 +6199,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20635 "jmp .Lkvm_vmx_return \n\t"
20636 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20637 ".Lkvm_vmx_return: "
20638 +
20639 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20640 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20641 + ".Lkvm_vmx_return2: "
20642 +#endif
20643 +
20644 /* Save guest registers, load host registers, keep flags */
20645 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20646 "pop %0 \n\t"
20647 @@ -6240,6 +6253,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20648 #endif
20649 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20650 [wordsize]"i"(sizeof(ulong))
20651 +
20652 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20653 + ,[cs]"i"(__KERNEL_CS)
20654 +#endif
20655 +
20656 : "cc", "memory"
20657 , R"ax", R"bx", R"di", R"si"
20658 #ifdef CONFIG_X86_64
20659 @@ -6268,7 +6286,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20660 }
20661 }
20662
20663 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20664 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20665 +
20666 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20667 + loadsegment(fs, __KERNEL_PERCPU);
20668 +#endif
20669 +
20670 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20671 + __set_fs(current_thread_info()->addr_limit);
20672 +#endif
20673 +
20674 vmx->loaded_vmcs->launched = 1;
20675
20676 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20677 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20678 index 8d1c6c6..99c2d5f 100644
20679 --- a/arch/x86/kvm/x86.c
20680 +++ b/arch/x86/kvm/x86.c
20681 @@ -1311,8 +1311,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20682 {
20683 struct kvm *kvm = vcpu->kvm;
20684 int lm = is_long_mode(vcpu);
20685 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20686 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20687 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20688 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20689 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20690 : kvm->arch.xen_hvm_config.blob_size_32;
20691 u32 page_num = data & ~PAGE_MASK;
20692 @@ -2145,6 +2145,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20693 if (n < msr_list.nmsrs)
20694 goto out;
20695 r = -EFAULT;
20696 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20697 + goto out;
20698 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20699 num_msrs_to_save * sizeof(u32)))
20700 goto out;
20701 @@ -2266,7 +2268,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20702 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20703 struct kvm_interrupt *irq)
20704 {
20705 - if (irq->irq < 0 || irq->irq >= 256)
20706 + if (irq->irq >= 256)
20707 return -EINVAL;
20708 if (irqchip_in_kernel(vcpu->kvm))
20709 return -ENXIO;
20710 @@ -4782,7 +4784,7 @@ static void kvm_set_mmio_spte_mask(void)
20711 kvm_mmu_set_mmio_spte_mask(mask);
20712 }
20713
20714 -int kvm_arch_init(void *opaque)
20715 +int kvm_arch_init(const void *opaque)
20716 {
20717 int r;
20718 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20719 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20720 index 642d880..44e0f3f 100644
20721 --- a/arch/x86/lguest/boot.c
20722 +++ b/arch/x86/lguest/boot.c
20723 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20724 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20725 * Launcher to reboot us.
20726 */
20727 -static void lguest_restart(char *reason)
20728 +static __noreturn void lguest_restart(char *reason)
20729 {
20730 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20731 + BUG();
20732 }
20733
20734 /*G:050
20735 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
20736 index 042f682..c92afb6 100644
20737 --- a/arch/x86/lib/atomic64_32.c
20738 +++ b/arch/x86/lib/atomic64_32.c
20739 @@ -8,18 +8,30 @@
20740
20741 long long atomic64_read_cx8(long long, const atomic64_t *v);
20742 EXPORT_SYMBOL(atomic64_read_cx8);
20743 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
20744 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
20745 long long atomic64_set_cx8(long long, const atomic64_t *v);
20746 EXPORT_SYMBOL(atomic64_set_cx8);
20747 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
20748 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
20749 long long atomic64_xchg_cx8(long long, unsigned high);
20750 EXPORT_SYMBOL(atomic64_xchg_cx8);
20751 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
20752 EXPORT_SYMBOL(atomic64_add_return_cx8);
20753 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20754 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
20755 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
20756 EXPORT_SYMBOL(atomic64_sub_return_cx8);
20757 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20758 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
20759 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
20760 EXPORT_SYMBOL(atomic64_inc_return_cx8);
20761 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20762 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
20763 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
20764 EXPORT_SYMBOL(atomic64_dec_return_cx8);
20765 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20766 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
20767 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
20768 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
20769 int atomic64_inc_not_zero_cx8(atomic64_t *v);
20770 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
20771 #ifndef CONFIG_X86_CMPXCHG64
20772 long long atomic64_read_386(long long, const atomic64_t *v);
20773 EXPORT_SYMBOL(atomic64_read_386);
20774 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
20775 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
20776 long long atomic64_set_386(long long, const atomic64_t *v);
20777 EXPORT_SYMBOL(atomic64_set_386);
20778 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
20779 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
20780 long long atomic64_xchg_386(long long, unsigned high);
20781 EXPORT_SYMBOL(atomic64_xchg_386);
20782 long long atomic64_add_return_386(long long a, atomic64_t *v);
20783 EXPORT_SYMBOL(atomic64_add_return_386);
20784 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20785 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
20786 long long atomic64_sub_return_386(long long a, atomic64_t *v);
20787 EXPORT_SYMBOL(atomic64_sub_return_386);
20788 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20789 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
20790 long long atomic64_inc_return_386(long long a, atomic64_t *v);
20791 EXPORT_SYMBOL(atomic64_inc_return_386);
20792 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20793 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
20794 long long atomic64_dec_return_386(long long a, atomic64_t *v);
20795 EXPORT_SYMBOL(atomic64_dec_return_386);
20796 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20797 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
20798 long long atomic64_add_386(long long a, atomic64_t *v);
20799 EXPORT_SYMBOL(atomic64_add_386);
20800 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
20801 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
20802 long long atomic64_sub_386(long long a, atomic64_t *v);
20803 EXPORT_SYMBOL(atomic64_sub_386);
20804 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
20805 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
20806 long long atomic64_inc_386(long long a, atomic64_t *v);
20807 EXPORT_SYMBOL(atomic64_inc_386);
20808 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
20809 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
20810 long long atomic64_dec_386(long long a, atomic64_t *v);
20811 EXPORT_SYMBOL(atomic64_dec_386);
20812 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
20813 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
20814 long long atomic64_dec_if_positive_386(atomic64_t *v);
20815 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
20816 int atomic64_inc_not_zero_386(atomic64_t *v);
20817 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20818 index e8e7e0d..56fd1b0 100644
20819 --- a/arch/x86/lib/atomic64_386_32.S
20820 +++ b/arch/x86/lib/atomic64_386_32.S
20821 @@ -48,6 +48,10 @@ BEGIN(read)
20822 movl (v), %eax
20823 movl 4(v), %edx
20824 RET_ENDP
20825 +BEGIN(read_unchecked)
20826 + movl (v), %eax
20827 + movl 4(v), %edx
20828 +RET_ENDP
20829 #undef v
20830
20831 #define v %esi
20832 @@ -55,6 +59,10 @@ BEGIN(set)
20833 movl %ebx, (v)
20834 movl %ecx, 4(v)
20835 RET_ENDP
20836 +BEGIN(set_unchecked)
20837 + movl %ebx, (v)
20838 + movl %ecx, 4(v)
20839 +RET_ENDP
20840 #undef v
20841
20842 #define v %esi
20843 @@ -70,6 +78,20 @@ RET_ENDP
20844 BEGIN(add)
20845 addl %eax, (v)
20846 adcl %edx, 4(v)
20847 +
20848 +#ifdef CONFIG_PAX_REFCOUNT
20849 + jno 0f
20850 + subl %eax, (v)
20851 + sbbl %edx, 4(v)
20852 + int $4
20853 +0:
20854 + _ASM_EXTABLE(0b, 0b)
20855 +#endif
20856 +
20857 +RET_ENDP
20858 +BEGIN(add_unchecked)
20859 + addl %eax, (v)
20860 + adcl %edx, 4(v)
20861 RET_ENDP
20862 #undef v
20863
20864 @@ -77,6 +99,24 @@ RET_ENDP
20865 BEGIN(add_return)
20866 addl (v), %eax
20867 adcl 4(v), %edx
20868 +
20869 +#ifdef CONFIG_PAX_REFCOUNT
20870 + into
20871 +1234:
20872 + _ASM_EXTABLE(1234b, 2f)
20873 +#endif
20874 +
20875 + movl %eax, (v)
20876 + movl %edx, 4(v)
20877 +
20878 +#ifdef CONFIG_PAX_REFCOUNT
20879 +2:
20880 +#endif
20881 +
20882 +RET_ENDP
20883 +BEGIN(add_return_unchecked)
20884 + addl (v), %eax
20885 + adcl 4(v), %edx
20886 movl %eax, (v)
20887 movl %edx, 4(v)
20888 RET_ENDP
20889 @@ -86,6 +126,20 @@ RET_ENDP
20890 BEGIN(sub)
20891 subl %eax, (v)
20892 sbbl %edx, 4(v)
20893 +
20894 +#ifdef CONFIG_PAX_REFCOUNT
20895 + jno 0f
20896 + addl %eax, (v)
20897 + adcl %edx, 4(v)
20898 + int $4
20899 +0:
20900 + _ASM_EXTABLE(0b, 0b)
20901 +#endif
20902 +
20903 +RET_ENDP
20904 +BEGIN(sub_unchecked)
20905 + subl %eax, (v)
20906 + sbbl %edx, 4(v)
20907 RET_ENDP
20908 #undef v
20909
20910 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20911 sbbl $0, %edx
20912 addl (v), %eax
20913 adcl 4(v), %edx
20914 +
20915 +#ifdef CONFIG_PAX_REFCOUNT
20916 + into
20917 +1234:
20918 + _ASM_EXTABLE(1234b, 2f)
20919 +#endif
20920 +
20921 + movl %eax, (v)
20922 + movl %edx, 4(v)
20923 +
20924 +#ifdef CONFIG_PAX_REFCOUNT
20925 +2:
20926 +#endif
20927 +
20928 +RET_ENDP
20929 +BEGIN(sub_return_unchecked)
20930 + negl %edx
20931 + negl %eax
20932 + sbbl $0, %edx
20933 + addl (v), %eax
20934 + adcl 4(v), %edx
20935 movl %eax, (v)
20936 movl %edx, 4(v)
20937 RET_ENDP
20938 @@ -105,6 +180,20 @@ RET_ENDP
20939 BEGIN(inc)
20940 addl $1, (v)
20941 adcl $0, 4(v)
20942 +
20943 +#ifdef CONFIG_PAX_REFCOUNT
20944 + jno 0f
20945 + subl $1, (v)
20946 + sbbl $0, 4(v)
20947 + int $4
20948 +0:
20949 + _ASM_EXTABLE(0b, 0b)
20950 +#endif
20951 +
20952 +RET_ENDP
20953 +BEGIN(inc_unchecked)
20954 + addl $1, (v)
20955 + adcl $0, 4(v)
20956 RET_ENDP
20957 #undef v
20958
20959 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20960 movl 4(v), %edx
20961 addl $1, %eax
20962 adcl $0, %edx
20963 +
20964 +#ifdef CONFIG_PAX_REFCOUNT
20965 + into
20966 +1234:
20967 + _ASM_EXTABLE(1234b, 2f)
20968 +#endif
20969 +
20970 + movl %eax, (v)
20971 + movl %edx, 4(v)
20972 +
20973 +#ifdef CONFIG_PAX_REFCOUNT
20974 +2:
20975 +#endif
20976 +
20977 +RET_ENDP
20978 +BEGIN(inc_return_unchecked)
20979 + movl (v), %eax
20980 + movl 4(v), %edx
20981 + addl $1, %eax
20982 + adcl $0, %edx
20983 movl %eax, (v)
20984 movl %edx, 4(v)
20985 RET_ENDP
20986 @@ -123,6 +232,20 @@ RET_ENDP
20987 BEGIN(dec)
20988 subl $1, (v)
20989 sbbl $0, 4(v)
20990 +
20991 +#ifdef CONFIG_PAX_REFCOUNT
20992 + jno 0f
20993 + addl $1, (v)
20994 + adcl $0, 4(v)
20995 + int $4
20996 +0:
20997 + _ASM_EXTABLE(0b, 0b)
20998 +#endif
20999 +
21000 +RET_ENDP
21001 +BEGIN(dec_unchecked)
21002 + subl $1, (v)
21003 + sbbl $0, 4(v)
21004 RET_ENDP
21005 #undef v
21006
21007 @@ -132,6 +255,26 @@ BEGIN(dec_return)
21008 movl 4(v), %edx
21009 subl $1, %eax
21010 sbbl $0, %edx
21011 +
21012 +#ifdef CONFIG_PAX_REFCOUNT
21013 + into
21014 +1234:
21015 + _ASM_EXTABLE(1234b, 2f)
21016 +#endif
21017 +
21018 + movl %eax, (v)
21019 + movl %edx, 4(v)
21020 +
21021 +#ifdef CONFIG_PAX_REFCOUNT
21022 +2:
21023 +#endif
21024 +
21025 +RET_ENDP
21026 +BEGIN(dec_return_unchecked)
21027 + movl (v), %eax
21028 + movl 4(v), %edx
21029 + subl $1, %eax
21030 + sbbl $0, %edx
21031 movl %eax, (v)
21032 movl %edx, 4(v)
21033 RET_ENDP
21034 @@ -143,6 +286,13 @@ BEGIN(add_unless)
21035 adcl %edx, %edi
21036 addl (v), %eax
21037 adcl 4(v), %edx
21038 +
21039 +#ifdef CONFIG_PAX_REFCOUNT
21040 + into
21041 +1234:
21042 + _ASM_EXTABLE(1234b, 2f)
21043 +#endif
21044 +
21045 cmpl %eax, %esi
21046 je 3f
21047 1:
21048 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
21049 1:
21050 addl $1, %eax
21051 adcl $0, %edx
21052 +
21053 +#ifdef CONFIG_PAX_REFCOUNT
21054 + into
21055 +1234:
21056 + _ASM_EXTABLE(1234b, 2f)
21057 +#endif
21058 +
21059 movl %eax, (v)
21060 movl %edx, 4(v)
21061 movl $1, %eax
21062 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
21063 movl 4(v), %edx
21064 subl $1, %eax
21065 sbbl $0, %edx
21066 +
21067 +#ifdef CONFIG_PAX_REFCOUNT
21068 + into
21069 +1234:
21070 + _ASM_EXTABLE(1234b, 1f)
21071 +#endif
21072 +
21073 js 1f
21074 movl %eax, (v)
21075 movl %edx, 4(v)
21076 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
21077 index 391a083..3a2cf39 100644
21078 --- a/arch/x86/lib/atomic64_cx8_32.S
21079 +++ b/arch/x86/lib/atomic64_cx8_32.S
21080 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
21081 CFI_STARTPROC
21082
21083 read64 %ecx
21084 + pax_force_retaddr
21085 ret
21086 CFI_ENDPROC
21087 ENDPROC(atomic64_read_cx8)
21088
21089 +ENTRY(atomic64_read_unchecked_cx8)
21090 + CFI_STARTPROC
21091 +
21092 + read64 %ecx
21093 + pax_force_retaddr
21094 + ret
21095 + CFI_ENDPROC
21096 +ENDPROC(atomic64_read_unchecked_cx8)
21097 +
21098 ENTRY(atomic64_set_cx8)
21099 CFI_STARTPROC
21100
21101 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
21102 cmpxchg8b (%esi)
21103 jne 1b
21104
21105 + pax_force_retaddr
21106 ret
21107 CFI_ENDPROC
21108 ENDPROC(atomic64_set_cx8)
21109
21110 +ENTRY(atomic64_set_unchecked_cx8)
21111 + CFI_STARTPROC
21112 +
21113 +1:
21114 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
21115 + * are atomic on 586 and newer */
21116 + cmpxchg8b (%esi)
21117 + jne 1b
21118 +
21119 + pax_force_retaddr
21120 + ret
21121 + CFI_ENDPROC
21122 +ENDPROC(atomic64_set_unchecked_cx8)
21123 +
21124 ENTRY(atomic64_xchg_cx8)
21125 CFI_STARTPROC
21126
21127 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
21128 cmpxchg8b (%esi)
21129 jne 1b
21130
21131 + pax_force_retaddr
21132 ret
21133 CFI_ENDPROC
21134 ENDPROC(atomic64_xchg_cx8)
21135
21136 -.macro addsub_return func ins insc
21137 -ENTRY(atomic64_\func\()_return_cx8)
21138 +.macro addsub_return func ins insc unchecked=""
21139 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21140 CFI_STARTPROC
21141 SAVE ebp
21142 SAVE ebx
21143 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
21144 movl %edx, %ecx
21145 \ins\()l %esi, %ebx
21146 \insc\()l %edi, %ecx
21147 +
21148 +.ifb \unchecked
21149 +#ifdef CONFIG_PAX_REFCOUNT
21150 + into
21151 +2:
21152 + _ASM_EXTABLE(2b, 3f)
21153 +#endif
21154 +.endif
21155 +
21156 LOCK_PREFIX
21157 cmpxchg8b (%ebp)
21158 jne 1b
21159 -
21160 -10:
21161 movl %ebx, %eax
21162 movl %ecx, %edx
21163 +
21164 +.ifb \unchecked
21165 +#ifdef CONFIG_PAX_REFCOUNT
21166 +3:
21167 +#endif
21168 +.endif
21169 +
21170 RESTORE edi
21171 RESTORE esi
21172 RESTORE ebx
21173 RESTORE ebp
21174 + pax_force_retaddr
21175 ret
21176 CFI_ENDPROC
21177 -ENDPROC(atomic64_\func\()_return_cx8)
21178 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21179 .endm
21180
21181 addsub_return add add adc
21182 addsub_return sub sub sbb
21183 +addsub_return add add adc _unchecked
21184 +addsub_return sub sub sbb _unchecked
21185
21186 -.macro incdec_return func ins insc
21187 -ENTRY(atomic64_\func\()_return_cx8)
21188 +.macro incdec_return func ins insc unchecked=""
21189 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21190 CFI_STARTPROC
21191 SAVE ebx
21192
21193 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21194 movl %edx, %ecx
21195 \ins\()l $1, %ebx
21196 \insc\()l $0, %ecx
21197 +
21198 +.ifb \unchecked
21199 +#ifdef CONFIG_PAX_REFCOUNT
21200 + into
21201 +2:
21202 + _ASM_EXTABLE(2b, 3f)
21203 +#endif
21204 +.endif
21205 +
21206 LOCK_PREFIX
21207 cmpxchg8b (%esi)
21208 jne 1b
21209
21210 -10:
21211 movl %ebx, %eax
21212 movl %ecx, %edx
21213 +
21214 +.ifb \unchecked
21215 +#ifdef CONFIG_PAX_REFCOUNT
21216 +3:
21217 +#endif
21218 +.endif
21219 +
21220 RESTORE ebx
21221 + pax_force_retaddr
21222 ret
21223 CFI_ENDPROC
21224 -ENDPROC(atomic64_\func\()_return_cx8)
21225 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21226 .endm
21227
21228 incdec_return inc add adc
21229 incdec_return dec sub sbb
21230 +incdec_return inc add adc _unchecked
21231 +incdec_return dec sub sbb _unchecked
21232
21233 ENTRY(atomic64_dec_if_positive_cx8)
21234 CFI_STARTPROC
21235 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21236 movl %edx, %ecx
21237 subl $1, %ebx
21238 sbb $0, %ecx
21239 +
21240 +#ifdef CONFIG_PAX_REFCOUNT
21241 + into
21242 +1234:
21243 + _ASM_EXTABLE(1234b, 2f)
21244 +#endif
21245 +
21246 js 2f
21247 LOCK_PREFIX
21248 cmpxchg8b (%esi)
21249 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21250 movl %ebx, %eax
21251 movl %ecx, %edx
21252 RESTORE ebx
21253 + pax_force_retaddr
21254 ret
21255 CFI_ENDPROC
21256 ENDPROC(atomic64_dec_if_positive_cx8)
21257 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
21258 movl %edx, %ecx
21259 addl %esi, %ebx
21260 adcl %edi, %ecx
21261 +
21262 +#ifdef CONFIG_PAX_REFCOUNT
21263 + into
21264 +1234:
21265 + _ASM_EXTABLE(1234b, 3f)
21266 +#endif
21267 +
21268 LOCK_PREFIX
21269 cmpxchg8b (%ebp)
21270 jne 1b
21271 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
21272 CFI_ADJUST_CFA_OFFSET -8
21273 RESTORE ebx
21274 RESTORE ebp
21275 + pax_force_retaddr
21276 ret
21277 4:
21278 cmpl %edx, 4(%esp)
21279 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21280 movl %edx, %ecx
21281 addl $1, %ebx
21282 adcl $0, %ecx
21283 +
21284 +#ifdef CONFIG_PAX_REFCOUNT
21285 + into
21286 +1234:
21287 + _ASM_EXTABLE(1234b, 3f)
21288 +#endif
21289 +
21290 LOCK_PREFIX
21291 cmpxchg8b (%esi)
21292 jne 1b
21293 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21294 movl $1, %eax
21295 3:
21296 RESTORE ebx
21297 + pax_force_retaddr
21298 ret
21299 4:
21300 testl %edx, %edx
21301 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21302 index 78d16a5..fbcf666 100644
21303 --- a/arch/x86/lib/checksum_32.S
21304 +++ b/arch/x86/lib/checksum_32.S
21305 @@ -28,7 +28,8 @@
21306 #include <linux/linkage.h>
21307 #include <asm/dwarf2.h>
21308 #include <asm/errno.h>
21309 -
21310 +#include <asm/segment.h>
21311 +
21312 /*
21313 * computes a partial checksum, e.g. for TCP/UDP fragments
21314 */
21315 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21316
21317 #define ARGBASE 16
21318 #define FP 12
21319 -
21320 -ENTRY(csum_partial_copy_generic)
21321 +
21322 +ENTRY(csum_partial_copy_generic_to_user)
21323 CFI_STARTPROC
21324 +
21325 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21326 + pushl_cfi %gs
21327 + popl_cfi %es
21328 + jmp csum_partial_copy_generic
21329 +#endif
21330 +
21331 +ENTRY(csum_partial_copy_generic_from_user)
21332 +
21333 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21334 + pushl_cfi %gs
21335 + popl_cfi %ds
21336 +#endif
21337 +
21338 +ENTRY(csum_partial_copy_generic)
21339 subl $4,%esp
21340 CFI_ADJUST_CFA_OFFSET 4
21341 pushl_cfi %edi
21342 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21343 jmp 4f
21344 SRC(1: movw (%esi), %bx )
21345 addl $2, %esi
21346 -DST( movw %bx, (%edi) )
21347 +DST( movw %bx, %es:(%edi) )
21348 addl $2, %edi
21349 addw %bx, %ax
21350 adcl $0, %eax
21351 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21352 SRC(1: movl (%esi), %ebx )
21353 SRC( movl 4(%esi), %edx )
21354 adcl %ebx, %eax
21355 -DST( movl %ebx, (%edi) )
21356 +DST( movl %ebx, %es:(%edi) )
21357 adcl %edx, %eax
21358 -DST( movl %edx, 4(%edi) )
21359 +DST( movl %edx, %es:4(%edi) )
21360
21361 SRC( movl 8(%esi), %ebx )
21362 SRC( movl 12(%esi), %edx )
21363 adcl %ebx, %eax
21364 -DST( movl %ebx, 8(%edi) )
21365 +DST( movl %ebx, %es:8(%edi) )
21366 adcl %edx, %eax
21367 -DST( movl %edx, 12(%edi) )
21368 +DST( movl %edx, %es:12(%edi) )
21369
21370 SRC( movl 16(%esi), %ebx )
21371 SRC( movl 20(%esi), %edx )
21372 adcl %ebx, %eax
21373 -DST( movl %ebx, 16(%edi) )
21374 +DST( movl %ebx, %es:16(%edi) )
21375 adcl %edx, %eax
21376 -DST( movl %edx, 20(%edi) )
21377 +DST( movl %edx, %es:20(%edi) )
21378
21379 SRC( movl 24(%esi), %ebx )
21380 SRC( movl 28(%esi), %edx )
21381 adcl %ebx, %eax
21382 -DST( movl %ebx, 24(%edi) )
21383 +DST( movl %ebx, %es:24(%edi) )
21384 adcl %edx, %eax
21385 -DST( movl %edx, 28(%edi) )
21386 +DST( movl %edx, %es:28(%edi) )
21387
21388 lea 32(%esi), %esi
21389 lea 32(%edi), %edi
21390 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21391 shrl $2, %edx # This clears CF
21392 SRC(3: movl (%esi), %ebx )
21393 adcl %ebx, %eax
21394 -DST( movl %ebx, (%edi) )
21395 +DST( movl %ebx, %es:(%edi) )
21396 lea 4(%esi), %esi
21397 lea 4(%edi), %edi
21398 dec %edx
21399 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21400 jb 5f
21401 SRC( movw (%esi), %cx )
21402 leal 2(%esi), %esi
21403 -DST( movw %cx, (%edi) )
21404 +DST( movw %cx, %es:(%edi) )
21405 leal 2(%edi), %edi
21406 je 6f
21407 shll $16,%ecx
21408 SRC(5: movb (%esi), %cl )
21409 -DST( movb %cl, (%edi) )
21410 +DST( movb %cl, %es:(%edi) )
21411 6: addl %ecx, %eax
21412 adcl $0, %eax
21413 7:
21414 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21415
21416 6001:
21417 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21418 - movl $-EFAULT, (%ebx)
21419 + movl $-EFAULT, %ss:(%ebx)
21420
21421 # zero the complete destination - computing the rest
21422 # is too much work
21423 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21424
21425 6002:
21426 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21427 - movl $-EFAULT,(%ebx)
21428 + movl $-EFAULT,%ss:(%ebx)
21429 jmp 5000b
21430
21431 .previous
21432
21433 + pushl_cfi %ss
21434 + popl_cfi %ds
21435 + pushl_cfi %ss
21436 + popl_cfi %es
21437 popl_cfi %ebx
21438 CFI_RESTORE ebx
21439 popl_cfi %esi
21440 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21441 popl_cfi %ecx # equivalent to addl $4,%esp
21442 ret
21443 CFI_ENDPROC
21444 -ENDPROC(csum_partial_copy_generic)
21445 +ENDPROC(csum_partial_copy_generic_to_user)
21446
21447 #else
21448
21449 /* Version for PentiumII/PPro */
21450
21451 #define ROUND1(x) \
21452 + nop; nop; nop; \
21453 SRC(movl x(%esi), %ebx ) ; \
21454 addl %ebx, %eax ; \
21455 - DST(movl %ebx, x(%edi) ) ;
21456 + DST(movl %ebx, %es:x(%edi)) ;
21457
21458 #define ROUND(x) \
21459 + nop; nop; nop; \
21460 SRC(movl x(%esi), %ebx ) ; \
21461 adcl %ebx, %eax ; \
21462 - DST(movl %ebx, x(%edi) ) ;
21463 + DST(movl %ebx, %es:x(%edi)) ;
21464
21465 #define ARGBASE 12
21466 -
21467 -ENTRY(csum_partial_copy_generic)
21468 +
21469 +ENTRY(csum_partial_copy_generic_to_user)
21470 CFI_STARTPROC
21471 +
21472 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21473 + pushl_cfi %gs
21474 + popl_cfi %es
21475 + jmp csum_partial_copy_generic
21476 +#endif
21477 +
21478 +ENTRY(csum_partial_copy_generic_from_user)
21479 +
21480 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21481 + pushl_cfi %gs
21482 + popl_cfi %ds
21483 +#endif
21484 +
21485 +ENTRY(csum_partial_copy_generic)
21486 pushl_cfi %ebx
21487 CFI_REL_OFFSET ebx, 0
21488 pushl_cfi %edi
21489 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21490 subl %ebx, %edi
21491 lea -1(%esi),%edx
21492 andl $-32,%edx
21493 - lea 3f(%ebx,%ebx), %ebx
21494 + lea 3f(%ebx,%ebx,2), %ebx
21495 testl %esi, %esi
21496 jmp *%ebx
21497 1: addl $64,%esi
21498 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21499 jb 5f
21500 SRC( movw (%esi), %dx )
21501 leal 2(%esi), %esi
21502 -DST( movw %dx, (%edi) )
21503 +DST( movw %dx, %es:(%edi) )
21504 leal 2(%edi), %edi
21505 je 6f
21506 shll $16,%edx
21507 5:
21508 SRC( movb (%esi), %dl )
21509 -DST( movb %dl, (%edi) )
21510 +DST( movb %dl, %es:(%edi) )
21511 6: addl %edx, %eax
21512 adcl $0, %eax
21513 7:
21514 .section .fixup, "ax"
21515 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21516 - movl $-EFAULT, (%ebx)
21517 + movl $-EFAULT, %ss:(%ebx)
21518 # zero the complete destination (computing the rest is too much work)
21519 movl ARGBASE+8(%esp),%edi # dst
21520 movl ARGBASE+12(%esp),%ecx # len
21521 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21522 rep; stosb
21523 jmp 7b
21524 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21525 - movl $-EFAULT, (%ebx)
21526 + movl $-EFAULT, %ss:(%ebx)
21527 jmp 7b
21528 .previous
21529
21530 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21531 + pushl_cfi %ss
21532 + popl_cfi %ds
21533 + pushl_cfi %ss
21534 + popl_cfi %es
21535 +#endif
21536 +
21537 popl_cfi %esi
21538 CFI_RESTORE esi
21539 popl_cfi %edi
21540 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21541 CFI_RESTORE ebx
21542 ret
21543 CFI_ENDPROC
21544 -ENDPROC(csum_partial_copy_generic)
21545 +ENDPROC(csum_partial_copy_generic_to_user)
21546
21547 #undef ROUND
21548 #undef ROUND1
21549 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21550 index f2145cf..cea889d 100644
21551 --- a/arch/x86/lib/clear_page_64.S
21552 +++ b/arch/x86/lib/clear_page_64.S
21553 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21554 movl $4096/8,%ecx
21555 xorl %eax,%eax
21556 rep stosq
21557 + pax_force_retaddr
21558 ret
21559 CFI_ENDPROC
21560 ENDPROC(clear_page_c)
21561 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21562 movl $4096,%ecx
21563 xorl %eax,%eax
21564 rep stosb
21565 + pax_force_retaddr
21566 ret
21567 CFI_ENDPROC
21568 ENDPROC(clear_page_c_e)
21569 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21570 leaq 64(%rdi),%rdi
21571 jnz .Lloop
21572 nop
21573 + pax_force_retaddr
21574 ret
21575 CFI_ENDPROC
21576 .Lclear_page_end:
21577 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21578
21579 #include <asm/cpufeature.h>
21580
21581 - .section .altinstr_replacement,"ax"
21582 + .section .altinstr_replacement,"a"
21583 1: .byte 0xeb /* jmp <disp8> */
21584 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21585 2: .byte 0xeb /* jmp <disp8> */
21586 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21587 index 1e572c5..2a162cd 100644
21588 --- a/arch/x86/lib/cmpxchg16b_emu.S
21589 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21590 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21591
21592 popf
21593 mov $1, %al
21594 + pax_force_retaddr
21595 ret
21596
21597 not_same:
21598 popf
21599 xor %al,%al
21600 + pax_force_retaddr
21601 ret
21602
21603 CFI_ENDPROC
21604 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21605 index 01c805b..dccb07f 100644
21606 --- a/arch/x86/lib/copy_page_64.S
21607 +++ b/arch/x86/lib/copy_page_64.S
21608 @@ -9,6 +9,7 @@ copy_page_c:
21609 CFI_STARTPROC
21610 movl $4096/8,%ecx
21611 rep movsq
21612 + pax_force_retaddr
21613 ret
21614 CFI_ENDPROC
21615 ENDPROC(copy_page_c)
21616 @@ -39,7 +40,7 @@ ENTRY(copy_page)
21617 movq 16 (%rsi), %rdx
21618 movq 24 (%rsi), %r8
21619 movq 32 (%rsi), %r9
21620 - movq 40 (%rsi), %r10
21621 + movq 40 (%rsi), %r13
21622 movq 48 (%rsi), %r11
21623 movq 56 (%rsi), %r12
21624
21625 @@ -50,7 +51,7 @@ ENTRY(copy_page)
21626 movq %rdx, 16 (%rdi)
21627 movq %r8, 24 (%rdi)
21628 movq %r9, 32 (%rdi)
21629 - movq %r10, 40 (%rdi)
21630 + movq %r13, 40 (%rdi)
21631 movq %r11, 48 (%rdi)
21632 movq %r12, 56 (%rdi)
21633
21634 @@ -69,7 +70,7 @@ ENTRY(copy_page)
21635 movq 16 (%rsi), %rdx
21636 movq 24 (%rsi), %r8
21637 movq 32 (%rsi), %r9
21638 - movq 40 (%rsi), %r10
21639 + movq 40 (%rsi), %r13
21640 movq 48 (%rsi), %r11
21641 movq 56 (%rsi), %r12
21642
21643 @@ -78,7 +79,7 @@ ENTRY(copy_page)
21644 movq %rdx, 16 (%rdi)
21645 movq %r8, 24 (%rdi)
21646 movq %r9, 32 (%rdi)
21647 - movq %r10, 40 (%rdi)
21648 + movq %r13, 40 (%rdi)
21649 movq %r11, 48 (%rdi)
21650 movq %r12, 56 (%rdi)
21651
21652 @@ -95,6 +96,7 @@ ENTRY(copy_page)
21653 CFI_RESTORE r13
21654 addq $3*8,%rsp
21655 CFI_ADJUST_CFA_OFFSET -3*8
21656 + pax_force_retaddr
21657 ret
21658 .Lcopy_page_end:
21659 CFI_ENDPROC
21660 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
21661
21662 #include <asm/cpufeature.h>
21663
21664 - .section .altinstr_replacement,"ax"
21665 + .section .altinstr_replacement,"a"
21666 1: .byte 0xeb /* jmp <disp8> */
21667 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21668 2:
21669 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21670 index 0248402..821c786 100644
21671 --- a/arch/x86/lib/copy_user_64.S
21672 +++ b/arch/x86/lib/copy_user_64.S
21673 @@ -16,6 +16,7 @@
21674 #include <asm/thread_info.h>
21675 #include <asm/cpufeature.h>
21676 #include <asm/alternative-asm.h>
21677 +#include <asm/pgtable.h>
21678
21679 /*
21680 * By placing feature2 after feature1 in altinstructions section, we logically
21681 @@ -29,7 +30,7 @@
21682 .byte 0xe9 /* 32bit jump */
21683 .long \orig-1f /* by default jump to orig */
21684 1:
21685 - .section .altinstr_replacement,"ax"
21686 + .section .altinstr_replacement,"a"
21687 2: .byte 0xe9 /* near jump with 32bit immediate */
21688 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21689 3: .byte 0xe9 /* near jump with 32bit immediate */
21690 @@ -71,47 +72,20 @@
21691 #endif
21692 .endm
21693
21694 -/* Standard copy_to_user with segment limit checking */
21695 -ENTRY(_copy_to_user)
21696 - CFI_STARTPROC
21697 - GET_THREAD_INFO(%rax)
21698 - movq %rdi,%rcx
21699 - addq %rdx,%rcx
21700 - jc bad_to_user
21701 - cmpq TI_addr_limit(%rax),%rcx
21702 - ja bad_to_user
21703 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21704 - copy_user_generic_unrolled,copy_user_generic_string, \
21705 - copy_user_enhanced_fast_string
21706 - CFI_ENDPROC
21707 -ENDPROC(_copy_to_user)
21708 -
21709 -/* Standard copy_from_user with segment limit checking */
21710 -ENTRY(_copy_from_user)
21711 - CFI_STARTPROC
21712 - GET_THREAD_INFO(%rax)
21713 - movq %rsi,%rcx
21714 - addq %rdx,%rcx
21715 - jc bad_from_user
21716 - cmpq TI_addr_limit(%rax),%rcx
21717 - ja bad_from_user
21718 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21719 - copy_user_generic_unrolled,copy_user_generic_string, \
21720 - copy_user_enhanced_fast_string
21721 - CFI_ENDPROC
21722 -ENDPROC(_copy_from_user)
21723 -
21724 .section .fixup,"ax"
21725 /* must zero dest */
21726 ENTRY(bad_from_user)
21727 bad_from_user:
21728 CFI_STARTPROC
21729 + testl %edx,%edx
21730 + js bad_to_user
21731 movl %edx,%ecx
21732 xorl %eax,%eax
21733 rep
21734 stosb
21735 bad_to_user:
21736 movl %edx,%eax
21737 + pax_force_retaddr
21738 ret
21739 CFI_ENDPROC
21740 ENDPROC(bad_from_user)
21741 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21742 jz 17f
21743 1: movq (%rsi),%r8
21744 2: movq 1*8(%rsi),%r9
21745 -3: movq 2*8(%rsi),%r10
21746 +3: movq 2*8(%rsi),%rax
21747 4: movq 3*8(%rsi),%r11
21748 5: movq %r8,(%rdi)
21749 6: movq %r9,1*8(%rdi)
21750 -7: movq %r10,2*8(%rdi)
21751 +7: movq %rax,2*8(%rdi)
21752 8: movq %r11,3*8(%rdi)
21753 9: movq 4*8(%rsi),%r8
21754 10: movq 5*8(%rsi),%r9
21755 -11: movq 6*8(%rsi),%r10
21756 +11: movq 6*8(%rsi),%rax
21757 12: movq 7*8(%rsi),%r11
21758 13: movq %r8,4*8(%rdi)
21759 14: movq %r9,5*8(%rdi)
21760 -15: movq %r10,6*8(%rdi)
21761 +15: movq %rax,6*8(%rdi)
21762 16: movq %r11,7*8(%rdi)
21763 leaq 64(%rsi),%rsi
21764 leaq 64(%rdi),%rdi
21765 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21766 decl %ecx
21767 jnz 21b
21768 23: xor %eax,%eax
21769 + pax_force_retaddr
21770 ret
21771
21772 .section .fixup,"ax"
21773 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21774 3: rep
21775 movsb
21776 4: xorl %eax,%eax
21777 + pax_force_retaddr
21778 ret
21779
21780 .section .fixup,"ax"
21781 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21782 1: rep
21783 movsb
21784 2: xorl %eax,%eax
21785 + pax_force_retaddr
21786 ret
21787
21788 .section .fixup,"ax"
21789 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21790 index cb0c112..e3a6895 100644
21791 --- a/arch/x86/lib/copy_user_nocache_64.S
21792 +++ b/arch/x86/lib/copy_user_nocache_64.S
21793 @@ -8,12 +8,14 @@
21794
21795 #include <linux/linkage.h>
21796 #include <asm/dwarf2.h>
21797 +#include <asm/alternative-asm.h>
21798
21799 #define FIX_ALIGNMENT 1
21800
21801 #include <asm/current.h>
21802 #include <asm/asm-offsets.h>
21803 #include <asm/thread_info.h>
21804 +#include <asm/pgtable.h>
21805
21806 .macro ALIGN_DESTINATION
21807 #ifdef FIX_ALIGNMENT
21808 @@ -50,6 +52,15 @@
21809 */
21810 ENTRY(__copy_user_nocache)
21811 CFI_STARTPROC
21812 +
21813 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21814 + mov $PAX_USER_SHADOW_BASE,%rcx
21815 + cmp %rcx,%rsi
21816 + jae 1f
21817 + add %rcx,%rsi
21818 +1:
21819 +#endif
21820 +
21821 cmpl $8,%edx
21822 jb 20f /* less then 8 bytes, go to byte copy loop */
21823 ALIGN_DESTINATION
21824 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21825 jz 17f
21826 1: movq (%rsi),%r8
21827 2: movq 1*8(%rsi),%r9
21828 -3: movq 2*8(%rsi),%r10
21829 +3: movq 2*8(%rsi),%rax
21830 4: movq 3*8(%rsi),%r11
21831 5: movnti %r8,(%rdi)
21832 6: movnti %r9,1*8(%rdi)
21833 -7: movnti %r10,2*8(%rdi)
21834 +7: movnti %rax,2*8(%rdi)
21835 8: movnti %r11,3*8(%rdi)
21836 9: movq 4*8(%rsi),%r8
21837 10: movq 5*8(%rsi),%r9
21838 -11: movq 6*8(%rsi),%r10
21839 +11: movq 6*8(%rsi),%rax
21840 12: movq 7*8(%rsi),%r11
21841 13: movnti %r8,4*8(%rdi)
21842 14: movnti %r9,5*8(%rdi)
21843 -15: movnti %r10,6*8(%rdi)
21844 +15: movnti %rax,6*8(%rdi)
21845 16: movnti %r11,7*8(%rdi)
21846 leaq 64(%rsi),%rsi
21847 leaq 64(%rdi),%rdi
21848 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21849 jnz 21b
21850 23: xorl %eax,%eax
21851 sfence
21852 + pax_force_retaddr
21853 ret
21854
21855 .section .fixup,"ax"
21856 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21857 index fb903b7..c92b7f7 100644
21858 --- a/arch/x86/lib/csum-copy_64.S
21859 +++ b/arch/x86/lib/csum-copy_64.S
21860 @@ -8,6 +8,7 @@
21861 #include <linux/linkage.h>
21862 #include <asm/dwarf2.h>
21863 #include <asm/errno.h>
21864 +#include <asm/alternative-asm.h>
21865
21866 /*
21867 * Checksum copy with exception handling.
21868 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21869 CFI_RESTORE rbp
21870 addq $7*8, %rsp
21871 CFI_ADJUST_CFA_OFFSET -7*8
21872 + pax_force_retaddr 0, 1
21873 ret
21874 CFI_RESTORE_STATE
21875
21876 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21877 index 459b58a..9570bc7 100644
21878 --- a/arch/x86/lib/csum-wrappers_64.c
21879 +++ b/arch/x86/lib/csum-wrappers_64.c
21880 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21881 len -= 2;
21882 }
21883 }
21884 - isum = csum_partial_copy_generic((__force const void *)src,
21885 +
21886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21887 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21888 + src += PAX_USER_SHADOW_BASE;
21889 +#endif
21890 +
21891 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21892 dst, len, isum, errp, NULL);
21893 if (unlikely(*errp))
21894 goto out_err;
21895 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21896 }
21897
21898 *errp = 0;
21899 - return csum_partial_copy_generic(src, (void __force *)dst,
21900 +
21901 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21902 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21903 + dst += PAX_USER_SHADOW_BASE;
21904 +#endif
21905 +
21906 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21907 len, isum, NULL, errp);
21908 }
21909 EXPORT_SYMBOL(csum_partial_copy_to_user);
21910 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21911 index 51f1504..ddac4c1 100644
21912 --- a/arch/x86/lib/getuser.S
21913 +++ b/arch/x86/lib/getuser.S
21914 @@ -33,15 +33,38 @@
21915 #include <asm/asm-offsets.h>
21916 #include <asm/thread_info.h>
21917 #include <asm/asm.h>
21918 +#include <asm/segment.h>
21919 +#include <asm/pgtable.h>
21920 +#include <asm/alternative-asm.h>
21921 +
21922 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21923 +#define __copyuser_seg gs;
21924 +#else
21925 +#define __copyuser_seg
21926 +#endif
21927
21928 .text
21929 ENTRY(__get_user_1)
21930 CFI_STARTPROC
21931 +
21932 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21933 GET_THREAD_INFO(%_ASM_DX)
21934 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21935 jae bad_get_user
21936 -1: movzb (%_ASM_AX),%edx
21937 +
21938 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21939 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21940 + cmp %_ASM_DX,%_ASM_AX
21941 + jae 1234f
21942 + add %_ASM_DX,%_ASM_AX
21943 +1234:
21944 +#endif
21945 +
21946 +#endif
21947 +
21948 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21949 xor %eax,%eax
21950 + pax_force_retaddr
21951 ret
21952 CFI_ENDPROC
21953 ENDPROC(__get_user_1)
21954 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21955 ENTRY(__get_user_2)
21956 CFI_STARTPROC
21957 add $1,%_ASM_AX
21958 +
21959 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21960 jc bad_get_user
21961 GET_THREAD_INFO(%_ASM_DX)
21962 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21963 jae bad_get_user
21964 -2: movzwl -1(%_ASM_AX),%edx
21965 +
21966 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21967 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21968 + cmp %_ASM_DX,%_ASM_AX
21969 + jae 1234f
21970 + add %_ASM_DX,%_ASM_AX
21971 +1234:
21972 +#endif
21973 +
21974 +#endif
21975 +
21976 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21977 xor %eax,%eax
21978 + pax_force_retaddr
21979 ret
21980 CFI_ENDPROC
21981 ENDPROC(__get_user_2)
21982 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21983 ENTRY(__get_user_4)
21984 CFI_STARTPROC
21985 add $3,%_ASM_AX
21986 +
21987 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21988 jc bad_get_user
21989 GET_THREAD_INFO(%_ASM_DX)
21990 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21991 jae bad_get_user
21992 -3: mov -3(%_ASM_AX),%edx
21993 +
21994 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21995 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21996 + cmp %_ASM_DX,%_ASM_AX
21997 + jae 1234f
21998 + add %_ASM_DX,%_ASM_AX
21999 +1234:
22000 +#endif
22001 +
22002 +#endif
22003 +
22004 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22005 xor %eax,%eax
22006 + pax_force_retaddr
22007 ret
22008 CFI_ENDPROC
22009 ENDPROC(__get_user_4)
22010 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22011 GET_THREAD_INFO(%_ASM_DX)
22012 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22013 jae bad_get_user
22014 +
22015 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22016 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22017 + cmp %_ASM_DX,%_ASM_AX
22018 + jae 1234f
22019 + add %_ASM_DX,%_ASM_AX
22020 +1234:
22021 +#endif
22022 +
22023 4: movq -7(%_ASM_AX),%_ASM_DX
22024 xor %eax,%eax
22025 + pax_force_retaddr
22026 ret
22027 CFI_ENDPROC
22028 ENDPROC(__get_user_8)
22029 @@ -91,6 +152,7 @@ bad_get_user:
22030 CFI_STARTPROC
22031 xor %edx,%edx
22032 mov $(-EFAULT),%_ASM_AX
22033 + pax_force_retaddr
22034 ret
22035 CFI_ENDPROC
22036 END(bad_get_user)
22037 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
22038 index 5a1f9f3..ba9f577 100644
22039 --- a/arch/x86/lib/insn.c
22040 +++ b/arch/x86/lib/insn.c
22041 @@ -21,6 +21,11 @@
22042 #include <linux/string.h>
22043 #include <asm/inat.h>
22044 #include <asm/insn.h>
22045 +#ifdef __KERNEL__
22046 +#include <asm/pgtable_types.h>
22047 +#else
22048 +#define ktla_ktva(addr) addr
22049 +#endif
22050
22051 /* Verify next sizeof(t) bytes can be on the same instruction */
22052 #define validate_next(t, insn, n) \
22053 @@ -49,8 +54,8 @@
22054 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
22055 {
22056 memset(insn, 0, sizeof(*insn));
22057 - insn->kaddr = kaddr;
22058 - insn->next_byte = kaddr;
22059 + insn->kaddr = ktla_ktva(kaddr);
22060 + insn->next_byte = ktla_ktva(kaddr);
22061 insn->x86_64 = x86_64 ? 1 : 0;
22062 insn->opnd_bytes = 4;
22063 if (x86_64)
22064 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22065 index 05a95e7..326f2fa 100644
22066 --- a/arch/x86/lib/iomap_copy_64.S
22067 +++ b/arch/x86/lib/iomap_copy_64.S
22068 @@ -17,6 +17,7 @@
22069
22070 #include <linux/linkage.h>
22071 #include <asm/dwarf2.h>
22072 +#include <asm/alternative-asm.h>
22073
22074 /*
22075 * override generic version in lib/iomap_copy.c
22076 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22077 CFI_STARTPROC
22078 movl %edx,%ecx
22079 rep movsd
22080 + pax_force_retaddr
22081 ret
22082 CFI_ENDPROC
22083 ENDPROC(__iowrite32_copy)
22084 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22085 index efbf2a0..8893637 100644
22086 --- a/arch/x86/lib/memcpy_64.S
22087 +++ b/arch/x86/lib/memcpy_64.S
22088 @@ -34,6 +34,7 @@
22089 rep movsq
22090 movl %edx, %ecx
22091 rep movsb
22092 + pax_force_retaddr
22093 ret
22094 .Lmemcpy_e:
22095 .previous
22096 @@ -51,6 +52,7 @@
22097
22098 movl %edx, %ecx
22099 rep movsb
22100 + pax_force_retaddr
22101 ret
22102 .Lmemcpy_e_e:
22103 .previous
22104 @@ -81,13 +83,13 @@ ENTRY(memcpy)
22105 */
22106 movq 0*8(%rsi), %r8
22107 movq 1*8(%rsi), %r9
22108 - movq 2*8(%rsi), %r10
22109 + movq 2*8(%rsi), %rcx
22110 movq 3*8(%rsi), %r11
22111 leaq 4*8(%rsi), %rsi
22112
22113 movq %r8, 0*8(%rdi)
22114 movq %r9, 1*8(%rdi)
22115 - movq %r10, 2*8(%rdi)
22116 + movq %rcx, 2*8(%rdi)
22117 movq %r11, 3*8(%rdi)
22118 leaq 4*8(%rdi), %rdi
22119 jae .Lcopy_forward_loop
22120 @@ -110,12 +112,12 @@ ENTRY(memcpy)
22121 subq $0x20, %rdx
22122 movq -1*8(%rsi), %r8
22123 movq -2*8(%rsi), %r9
22124 - movq -3*8(%rsi), %r10
22125 + movq -3*8(%rsi), %rcx
22126 movq -4*8(%rsi), %r11
22127 leaq -4*8(%rsi), %rsi
22128 movq %r8, -1*8(%rdi)
22129 movq %r9, -2*8(%rdi)
22130 - movq %r10, -3*8(%rdi)
22131 + movq %rcx, -3*8(%rdi)
22132 movq %r11, -4*8(%rdi)
22133 leaq -4*8(%rdi), %rdi
22134 jae .Lcopy_backward_loop
22135 @@ -135,12 +137,13 @@ ENTRY(memcpy)
22136 */
22137 movq 0*8(%rsi), %r8
22138 movq 1*8(%rsi), %r9
22139 - movq -2*8(%rsi, %rdx), %r10
22140 + movq -2*8(%rsi, %rdx), %rcx
22141 movq -1*8(%rsi, %rdx), %r11
22142 movq %r8, 0*8(%rdi)
22143 movq %r9, 1*8(%rdi)
22144 - movq %r10, -2*8(%rdi, %rdx)
22145 + movq %rcx, -2*8(%rdi, %rdx)
22146 movq %r11, -1*8(%rdi, %rdx)
22147 + pax_force_retaddr
22148 retq
22149 .p2align 4
22150 .Lless_16bytes:
22151 @@ -153,6 +156,7 @@ ENTRY(memcpy)
22152 movq -1*8(%rsi, %rdx), %r9
22153 movq %r8, 0*8(%rdi)
22154 movq %r9, -1*8(%rdi, %rdx)
22155 + pax_force_retaddr
22156 retq
22157 .p2align 4
22158 .Lless_8bytes:
22159 @@ -166,6 +170,7 @@ ENTRY(memcpy)
22160 movl -4(%rsi, %rdx), %r8d
22161 movl %ecx, (%rdi)
22162 movl %r8d, -4(%rdi, %rdx)
22163 + pax_force_retaddr
22164 retq
22165 .p2align 4
22166 .Lless_3bytes:
22167 @@ -183,6 +188,7 @@ ENTRY(memcpy)
22168 jnz .Lloop_1
22169
22170 .Lend:
22171 + pax_force_retaddr
22172 retq
22173 CFI_ENDPROC
22174 ENDPROC(memcpy)
22175 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22176 index ee16461..c39c199 100644
22177 --- a/arch/x86/lib/memmove_64.S
22178 +++ b/arch/x86/lib/memmove_64.S
22179 @@ -61,13 +61,13 @@ ENTRY(memmove)
22180 5:
22181 sub $0x20, %rdx
22182 movq 0*8(%rsi), %r11
22183 - movq 1*8(%rsi), %r10
22184 + movq 1*8(%rsi), %rcx
22185 movq 2*8(%rsi), %r9
22186 movq 3*8(%rsi), %r8
22187 leaq 4*8(%rsi), %rsi
22188
22189 movq %r11, 0*8(%rdi)
22190 - movq %r10, 1*8(%rdi)
22191 + movq %rcx, 1*8(%rdi)
22192 movq %r9, 2*8(%rdi)
22193 movq %r8, 3*8(%rdi)
22194 leaq 4*8(%rdi), %rdi
22195 @@ -81,10 +81,10 @@ ENTRY(memmove)
22196 4:
22197 movq %rdx, %rcx
22198 movq -8(%rsi, %rdx), %r11
22199 - lea -8(%rdi, %rdx), %r10
22200 + lea -8(%rdi, %rdx), %r9
22201 shrq $3, %rcx
22202 rep movsq
22203 - movq %r11, (%r10)
22204 + movq %r11, (%r9)
22205 jmp 13f
22206 .Lmemmove_end_forward:
22207
22208 @@ -95,14 +95,14 @@ ENTRY(memmove)
22209 7:
22210 movq %rdx, %rcx
22211 movq (%rsi), %r11
22212 - movq %rdi, %r10
22213 + movq %rdi, %r9
22214 leaq -8(%rsi, %rdx), %rsi
22215 leaq -8(%rdi, %rdx), %rdi
22216 shrq $3, %rcx
22217 std
22218 rep movsq
22219 cld
22220 - movq %r11, (%r10)
22221 + movq %r11, (%r9)
22222 jmp 13f
22223
22224 /*
22225 @@ -127,13 +127,13 @@ ENTRY(memmove)
22226 8:
22227 subq $0x20, %rdx
22228 movq -1*8(%rsi), %r11
22229 - movq -2*8(%rsi), %r10
22230 + movq -2*8(%rsi), %rcx
22231 movq -3*8(%rsi), %r9
22232 movq -4*8(%rsi), %r8
22233 leaq -4*8(%rsi), %rsi
22234
22235 movq %r11, -1*8(%rdi)
22236 - movq %r10, -2*8(%rdi)
22237 + movq %rcx, -2*8(%rdi)
22238 movq %r9, -3*8(%rdi)
22239 movq %r8, -4*8(%rdi)
22240 leaq -4*8(%rdi), %rdi
22241 @@ -151,11 +151,11 @@ ENTRY(memmove)
22242 * Move data from 16 bytes to 31 bytes.
22243 */
22244 movq 0*8(%rsi), %r11
22245 - movq 1*8(%rsi), %r10
22246 + movq 1*8(%rsi), %rcx
22247 movq -2*8(%rsi, %rdx), %r9
22248 movq -1*8(%rsi, %rdx), %r8
22249 movq %r11, 0*8(%rdi)
22250 - movq %r10, 1*8(%rdi)
22251 + movq %rcx, 1*8(%rdi)
22252 movq %r9, -2*8(%rdi, %rdx)
22253 movq %r8, -1*8(%rdi, %rdx)
22254 jmp 13f
22255 @@ -167,9 +167,9 @@ ENTRY(memmove)
22256 * Move data from 8 bytes to 15 bytes.
22257 */
22258 movq 0*8(%rsi), %r11
22259 - movq -1*8(%rsi, %rdx), %r10
22260 + movq -1*8(%rsi, %rdx), %r9
22261 movq %r11, 0*8(%rdi)
22262 - movq %r10, -1*8(%rdi, %rdx)
22263 + movq %r9, -1*8(%rdi, %rdx)
22264 jmp 13f
22265 10:
22266 cmpq $4, %rdx
22267 @@ -178,9 +178,9 @@ ENTRY(memmove)
22268 * Move data from 4 bytes to 7 bytes.
22269 */
22270 movl (%rsi), %r11d
22271 - movl -4(%rsi, %rdx), %r10d
22272 + movl -4(%rsi, %rdx), %r9d
22273 movl %r11d, (%rdi)
22274 - movl %r10d, -4(%rdi, %rdx)
22275 + movl %r9d, -4(%rdi, %rdx)
22276 jmp 13f
22277 11:
22278 cmp $2, %rdx
22279 @@ -189,9 +189,9 @@ ENTRY(memmove)
22280 * Move data from 2 bytes to 3 bytes.
22281 */
22282 movw (%rsi), %r11w
22283 - movw -2(%rsi, %rdx), %r10w
22284 + movw -2(%rsi, %rdx), %r9w
22285 movw %r11w, (%rdi)
22286 - movw %r10w, -2(%rdi, %rdx)
22287 + movw %r9w, -2(%rdi, %rdx)
22288 jmp 13f
22289 12:
22290 cmp $1, %rdx
22291 @@ -202,6 +202,7 @@ ENTRY(memmove)
22292 movb (%rsi), %r11b
22293 movb %r11b, (%rdi)
22294 13:
22295 + pax_force_retaddr
22296 retq
22297 CFI_ENDPROC
22298
22299 @@ -210,6 +211,7 @@ ENTRY(memmove)
22300 /* Forward moving data. */
22301 movq %rdx, %rcx
22302 rep movsb
22303 + pax_force_retaddr
22304 retq
22305 .Lmemmove_end_forward_efs:
22306 .previous
22307 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22308 index 79bd454..dff325a 100644
22309 --- a/arch/x86/lib/memset_64.S
22310 +++ b/arch/x86/lib/memset_64.S
22311 @@ -31,6 +31,7 @@
22312 movl %r8d,%ecx
22313 rep stosb
22314 movq %r9,%rax
22315 + pax_force_retaddr
22316 ret
22317 .Lmemset_e:
22318 .previous
22319 @@ -53,6 +54,7 @@
22320 movl %edx,%ecx
22321 rep stosb
22322 movq %r9,%rax
22323 + pax_force_retaddr
22324 ret
22325 .Lmemset_e_e:
22326 .previous
22327 @@ -60,13 +62,13 @@
22328 ENTRY(memset)
22329 ENTRY(__memset)
22330 CFI_STARTPROC
22331 - movq %rdi,%r10
22332 movq %rdx,%r11
22333
22334 /* expand byte value */
22335 movzbl %sil,%ecx
22336 movabs $0x0101010101010101,%rax
22337 mul %rcx /* with rax, clobbers rdx */
22338 + movq %rdi,%rdx
22339
22340 /* align dst */
22341 movl %edi,%r9d
22342 @@ -120,7 +122,8 @@ ENTRY(__memset)
22343 jnz .Lloop_1
22344
22345 .Lende:
22346 - movq %r10,%rax
22347 + movq %rdx,%rax
22348 + pax_force_retaddr
22349 ret
22350
22351 CFI_RESTORE_STATE
22352 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22353 index c9f2d9b..e7fd2c0 100644
22354 --- a/arch/x86/lib/mmx_32.c
22355 +++ b/arch/x86/lib/mmx_32.c
22356 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22357 {
22358 void *p;
22359 int i;
22360 + unsigned long cr0;
22361
22362 if (unlikely(in_interrupt()))
22363 return __memcpy(to, from, len);
22364 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22365 kernel_fpu_begin();
22366
22367 __asm__ __volatile__ (
22368 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22369 - " prefetch 64(%0)\n"
22370 - " prefetch 128(%0)\n"
22371 - " prefetch 192(%0)\n"
22372 - " prefetch 256(%0)\n"
22373 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22374 + " prefetch 64(%1)\n"
22375 + " prefetch 128(%1)\n"
22376 + " prefetch 192(%1)\n"
22377 + " prefetch 256(%1)\n"
22378 "2: \n"
22379 ".section .fixup, \"ax\"\n"
22380 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22381 + "3: \n"
22382 +
22383 +#ifdef CONFIG_PAX_KERNEXEC
22384 + " movl %%cr0, %0\n"
22385 + " movl %0, %%eax\n"
22386 + " andl $0xFFFEFFFF, %%eax\n"
22387 + " movl %%eax, %%cr0\n"
22388 +#endif
22389 +
22390 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22391 +
22392 +#ifdef CONFIG_PAX_KERNEXEC
22393 + " movl %0, %%cr0\n"
22394 +#endif
22395 +
22396 " jmp 2b\n"
22397 ".previous\n"
22398 _ASM_EXTABLE(1b, 3b)
22399 - : : "r" (from));
22400 + : "=&r" (cr0) : "r" (from) : "ax");
22401
22402 for ( ; i > 5; i--) {
22403 __asm__ __volatile__ (
22404 - "1: prefetch 320(%0)\n"
22405 - "2: movq (%0), %%mm0\n"
22406 - " movq 8(%0), %%mm1\n"
22407 - " movq 16(%0), %%mm2\n"
22408 - " movq 24(%0), %%mm3\n"
22409 - " movq %%mm0, (%1)\n"
22410 - " movq %%mm1, 8(%1)\n"
22411 - " movq %%mm2, 16(%1)\n"
22412 - " movq %%mm3, 24(%1)\n"
22413 - " movq 32(%0), %%mm0\n"
22414 - " movq 40(%0), %%mm1\n"
22415 - " movq 48(%0), %%mm2\n"
22416 - " movq 56(%0), %%mm3\n"
22417 - " movq %%mm0, 32(%1)\n"
22418 - " movq %%mm1, 40(%1)\n"
22419 - " movq %%mm2, 48(%1)\n"
22420 - " movq %%mm3, 56(%1)\n"
22421 + "1: prefetch 320(%1)\n"
22422 + "2: movq (%1), %%mm0\n"
22423 + " movq 8(%1), %%mm1\n"
22424 + " movq 16(%1), %%mm2\n"
22425 + " movq 24(%1), %%mm3\n"
22426 + " movq %%mm0, (%2)\n"
22427 + " movq %%mm1, 8(%2)\n"
22428 + " movq %%mm2, 16(%2)\n"
22429 + " movq %%mm3, 24(%2)\n"
22430 + " movq 32(%1), %%mm0\n"
22431 + " movq 40(%1), %%mm1\n"
22432 + " movq 48(%1), %%mm2\n"
22433 + " movq 56(%1), %%mm3\n"
22434 + " movq %%mm0, 32(%2)\n"
22435 + " movq %%mm1, 40(%2)\n"
22436 + " movq %%mm2, 48(%2)\n"
22437 + " movq %%mm3, 56(%2)\n"
22438 ".section .fixup, \"ax\"\n"
22439 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22440 + "3:\n"
22441 +
22442 +#ifdef CONFIG_PAX_KERNEXEC
22443 + " movl %%cr0, %0\n"
22444 + " movl %0, %%eax\n"
22445 + " andl $0xFFFEFFFF, %%eax\n"
22446 + " movl %%eax, %%cr0\n"
22447 +#endif
22448 +
22449 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22450 +
22451 +#ifdef CONFIG_PAX_KERNEXEC
22452 + " movl %0, %%cr0\n"
22453 +#endif
22454 +
22455 " jmp 2b\n"
22456 ".previous\n"
22457 _ASM_EXTABLE(1b, 3b)
22458 - : : "r" (from), "r" (to) : "memory");
22459 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22460
22461 from += 64;
22462 to += 64;
22463 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22464 static void fast_copy_page(void *to, void *from)
22465 {
22466 int i;
22467 + unsigned long cr0;
22468
22469 kernel_fpu_begin();
22470
22471 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22472 * but that is for later. -AV
22473 */
22474 __asm__ __volatile__(
22475 - "1: prefetch (%0)\n"
22476 - " prefetch 64(%0)\n"
22477 - " prefetch 128(%0)\n"
22478 - " prefetch 192(%0)\n"
22479 - " prefetch 256(%0)\n"
22480 + "1: prefetch (%1)\n"
22481 + " prefetch 64(%1)\n"
22482 + " prefetch 128(%1)\n"
22483 + " prefetch 192(%1)\n"
22484 + " prefetch 256(%1)\n"
22485 "2: \n"
22486 ".section .fixup, \"ax\"\n"
22487 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22488 + "3: \n"
22489 +
22490 +#ifdef CONFIG_PAX_KERNEXEC
22491 + " movl %%cr0, %0\n"
22492 + " movl %0, %%eax\n"
22493 + " andl $0xFFFEFFFF, %%eax\n"
22494 + " movl %%eax, %%cr0\n"
22495 +#endif
22496 +
22497 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22498 +
22499 +#ifdef CONFIG_PAX_KERNEXEC
22500 + " movl %0, %%cr0\n"
22501 +#endif
22502 +
22503 " jmp 2b\n"
22504 ".previous\n"
22505 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22506 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22507
22508 for (i = 0; i < (4096-320)/64; i++) {
22509 __asm__ __volatile__ (
22510 - "1: prefetch 320(%0)\n"
22511 - "2: movq (%0), %%mm0\n"
22512 - " movntq %%mm0, (%1)\n"
22513 - " movq 8(%0), %%mm1\n"
22514 - " movntq %%mm1, 8(%1)\n"
22515 - " movq 16(%0), %%mm2\n"
22516 - " movntq %%mm2, 16(%1)\n"
22517 - " movq 24(%0), %%mm3\n"
22518 - " movntq %%mm3, 24(%1)\n"
22519 - " movq 32(%0), %%mm4\n"
22520 - " movntq %%mm4, 32(%1)\n"
22521 - " movq 40(%0), %%mm5\n"
22522 - " movntq %%mm5, 40(%1)\n"
22523 - " movq 48(%0), %%mm6\n"
22524 - " movntq %%mm6, 48(%1)\n"
22525 - " movq 56(%0), %%mm7\n"
22526 - " movntq %%mm7, 56(%1)\n"
22527 + "1: prefetch 320(%1)\n"
22528 + "2: movq (%1), %%mm0\n"
22529 + " movntq %%mm0, (%2)\n"
22530 + " movq 8(%1), %%mm1\n"
22531 + " movntq %%mm1, 8(%2)\n"
22532 + " movq 16(%1), %%mm2\n"
22533 + " movntq %%mm2, 16(%2)\n"
22534 + " movq 24(%1), %%mm3\n"
22535 + " movntq %%mm3, 24(%2)\n"
22536 + " movq 32(%1), %%mm4\n"
22537 + " movntq %%mm4, 32(%2)\n"
22538 + " movq 40(%1), %%mm5\n"
22539 + " movntq %%mm5, 40(%2)\n"
22540 + " movq 48(%1), %%mm6\n"
22541 + " movntq %%mm6, 48(%2)\n"
22542 + " movq 56(%1), %%mm7\n"
22543 + " movntq %%mm7, 56(%2)\n"
22544 ".section .fixup, \"ax\"\n"
22545 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22546 + "3:\n"
22547 +
22548 +#ifdef CONFIG_PAX_KERNEXEC
22549 + " movl %%cr0, %0\n"
22550 + " movl %0, %%eax\n"
22551 + " andl $0xFFFEFFFF, %%eax\n"
22552 + " movl %%eax, %%cr0\n"
22553 +#endif
22554 +
22555 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22556 +
22557 +#ifdef CONFIG_PAX_KERNEXEC
22558 + " movl %0, %%cr0\n"
22559 +#endif
22560 +
22561 " jmp 2b\n"
22562 ".previous\n"
22563 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22564 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22565
22566 from += 64;
22567 to += 64;
22568 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22569 static void fast_copy_page(void *to, void *from)
22570 {
22571 int i;
22572 + unsigned long cr0;
22573
22574 kernel_fpu_begin();
22575
22576 __asm__ __volatile__ (
22577 - "1: prefetch (%0)\n"
22578 - " prefetch 64(%0)\n"
22579 - " prefetch 128(%0)\n"
22580 - " prefetch 192(%0)\n"
22581 - " prefetch 256(%0)\n"
22582 + "1: prefetch (%1)\n"
22583 + " prefetch 64(%1)\n"
22584 + " prefetch 128(%1)\n"
22585 + " prefetch 192(%1)\n"
22586 + " prefetch 256(%1)\n"
22587 "2: \n"
22588 ".section .fixup, \"ax\"\n"
22589 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22590 + "3: \n"
22591 +
22592 +#ifdef CONFIG_PAX_KERNEXEC
22593 + " movl %%cr0, %0\n"
22594 + " movl %0, %%eax\n"
22595 + " andl $0xFFFEFFFF, %%eax\n"
22596 + " movl %%eax, %%cr0\n"
22597 +#endif
22598 +
22599 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22600 +
22601 +#ifdef CONFIG_PAX_KERNEXEC
22602 + " movl %0, %%cr0\n"
22603 +#endif
22604 +
22605 " jmp 2b\n"
22606 ".previous\n"
22607 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22608 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22609
22610 for (i = 0; i < 4096/64; i++) {
22611 __asm__ __volatile__ (
22612 - "1: prefetch 320(%0)\n"
22613 - "2: movq (%0), %%mm0\n"
22614 - " movq 8(%0), %%mm1\n"
22615 - " movq 16(%0), %%mm2\n"
22616 - " movq 24(%0), %%mm3\n"
22617 - " movq %%mm0, (%1)\n"
22618 - " movq %%mm1, 8(%1)\n"
22619 - " movq %%mm2, 16(%1)\n"
22620 - " movq %%mm3, 24(%1)\n"
22621 - " movq 32(%0), %%mm0\n"
22622 - " movq 40(%0), %%mm1\n"
22623 - " movq 48(%0), %%mm2\n"
22624 - " movq 56(%0), %%mm3\n"
22625 - " movq %%mm0, 32(%1)\n"
22626 - " movq %%mm1, 40(%1)\n"
22627 - " movq %%mm2, 48(%1)\n"
22628 - " movq %%mm3, 56(%1)\n"
22629 + "1: prefetch 320(%1)\n"
22630 + "2: movq (%1), %%mm0\n"
22631 + " movq 8(%1), %%mm1\n"
22632 + " movq 16(%1), %%mm2\n"
22633 + " movq 24(%1), %%mm3\n"
22634 + " movq %%mm0, (%2)\n"
22635 + " movq %%mm1, 8(%2)\n"
22636 + " movq %%mm2, 16(%2)\n"
22637 + " movq %%mm3, 24(%2)\n"
22638 + " movq 32(%1), %%mm0\n"
22639 + " movq 40(%1), %%mm1\n"
22640 + " movq 48(%1), %%mm2\n"
22641 + " movq 56(%1), %%mm3\n"
22642 + " movq %%mm0, 32(%2)\n"
22643 + " movq %%mm1, 40(%2)\n"
22644 + " movq %%mm2, 48(%2)\n"
22645 + " movq %%mm3, 56(%2)\n"
22646 ".section .fixup, \"ax\"\n"
22647 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22648 + "3:\n"
22649 +
22650 +#ifdef CONFIG_PAX_KERNEXEC
22651 + " movl %%cr0, %0\n"
22652 + " movl %0, %%eax\n"
22653 + " andl $0xFFFEFFFF, %%eax\n"
22654 + " movl %%eax, %%cr0\n"
22655 +#endif
22656 +
22657 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22658 +
22659 +#ifdef CONFIG_PAX_KERNEXEC
22660 + " movl %0, %%cr0\n"
22661 +#endif
22662 +
22663 " jmp 2b\n"
22664 ".previous\n"
22665 _ASM_EXTABLE(1b, 3b)
22666 - : : "r" (from), "r" (to) : "memory");
22667 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22668
22669 from += 64;
22670 to += 64;
22671 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22672 index 69fa106..adda88b 100644
22673 --- a/arch/x86/lib/msr-reg.S
22674 +++ b/arch/x86/lib/msr-reg.S
22675 @@ -3,6 +3,7 @@
22676 #include <asm/dwarf2.h>
22677 #include <asm/asm.h>
22678 #include <asm/msr.h>
22679 +#include <asm/alternative-asm.h>
22680
22681 #ifdef CONFIG_X86_64
22682 /*
22683 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22684 CFI_STARTPROC
22685 pushq_cfi %rbx
22686 pushq_cfi %rbp
22687 - movq %rdi, %r10 /* Save pointer */
22688 + movq %rdi, %r9 /* Save pointer */
22689 xorl %r11d, %r11d /* Return value */
22690 movl (%rdi), %eax
22691 movl 4(%rdi), %ecx
22692 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22693 movl 28(%rdi), %edi
22694 CFI_REMEMBER_STATE
22695 1: \op
22696 -2: movl %eax, (%r10)
22697 +2: movl %eax, (%r9)
22698 movl %r11d, %eax /* Return value */
22699 - movl %ecx, 4(%r10)
22700 - movl %edx, 8(%r10)
22701 - movl %ebx, 12(%r10)
22702 - movl %ebp, 20(%r10)
22703 - movl %esi, 24(%r10)
22704 - movl %edi, 28(%r10)
22705 + movl %ecx, 4(%r9)
22706 + movl %edx, 8(%r9)
22707 + movl %ebx, 12(%r9)
22708 + movl %ebp, 20(%r9)
22709 + movl %esi, 24(%r9)
22710 + movl %edi, 28(%r9)
22711 popq_cfi %rbp
22712 popq_cfi %rbx
22713 + pax_force_retaddr
22714 ret
22715 3:
22716 CFI_RESTORE_STATE
22717 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22718 index 36b0d15..d381858 100644
22719 --- a/arch/x86/lib/putuser.S
22720 +++ b/arch/x86/lib/putuser.S
22721 @@ -15,7 +15,9 @@
22722 #include <asm/thread_info.h>
22723 #include <asm/errno.h>
22724 #include <asm/asm.h>
22725 -
22726 +#include <asm/segment.h>
22727 +#include <asm/pgtable.h>
22728 +#include <asm/alternative-asm.h>
22729
22730 /*
22731 * __put_user_X
22732 @@ -29,52 +31,119 @@
22733 * as they get called from within inline assembly.
22734 */
22735
22736 -#define ENTER CFI_STARTPROC ; \
22737 - GET_THREAD_INFO(%_ASM_BX)
22738 -#define EXIT ret ; \
22739 +#define ENTER CFI_STARTPROC
22740 +#define EXIT pax_force_retaddr; ret ; \
22741 CFI_ENDPROC
22742
22743 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22744 +#define _DEST %_ASM_CX,%_ASM_BX
22745 +#else
22746 +#define _DEST %_ASM_CX
22747 +#endif
22748 +
22749 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22750 +#define __copyuser_seg gs;
22751 +#else
22752 +#define __copyuser_seg
22753 +#endif
22754 +
22755 .text
22756 ENTRY(__put_user_1)
22757 ENTER
22758 +
22759 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22760 + GET_THREAD_INFO(%_ASM_BX)
22761 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22762 jae bad_put_user
22763 -1: movb %al,(%_ASM_CX)
22764 +
22765 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22766 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22767 + cmp %_ASM_BX,%_ASM_CX
22768 + jb 1234f
22769 + xor %ebx,%ebx
22770 +1234:
22771 +#endif
22772 +
22773 +#endif
22774 +
22775 +1: __copyuser_seg movb %al,(_DEST)
22776 xor %eax,%eax
22777 EXIT
22778 ENDPROC(__put_user_1)
22779
22780 ENTRY(__put_user_2)
22781 ENTER
22782 +
22783 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22784 + GET_THREAD_INFO(%_ASM_BX)
22785 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22786 sub $1,%_ASM_BX
22787 cmp %_ASM_BX,%_ASM_CX
22788 jae bad_put_user
22789 -2: movw %ax,(%_ASM_CX)
22790 +
22791 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22792 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22793 + cmp %_ASM_BX,%_ASM_CX
22794 + jb 1234f
22795 + xor %ebx,%ebx
22796 +1234:
22797 +#endif
22798 +
22799 +#endif
22800 +
22801 +2: __copyuser_seg movw %ax,(_DEST)
22802 xor %eax,%eax
22803 EXIT
22804 ENDPROC(__put_user_2)
22805
22806 ENTRY(__put_user_4)
22807 ENTER
22808 +
22809 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22810 + GET_THREAD_INFO(%_ASM_BX)
22811 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22812 sub $3,%_ASM_BX
22813 cmp %_ASM_BX,%_ASM_CX
22814 jae bad_put_user
22815 -3: movl %eax,(%_ASM_CX)
22816 +
22817 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22818 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22819 + cmp %_ASM_BX,%_ASM_CX
22820 + jb 1234f
22821 + xor %ebx,%ebx
22822 +1234:
22823 +#endif
22824 +
22825 +#endif
22826 +
22827 +3: __copyuser_seg movl %eax,(_DEST)
22828 xor %eax,%eax
22829 EXIT
22830 ENDPROC(__put_user_4)
22831
22832 ENTRY(__put_user_8)
22833 ENTER
22834 +
22835 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22836 + GET_THREAD_INFO(%_ASM_BX)
22837 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22838 sub $7,%_ASM_BX
22839 cmp %_ASM_BX,%_ASM_CX
22840 jae bad_put_user
22841 -4: mov %_ASM_AX,(%_ASM_CX)
22842 +
22843 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22844 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22845 + cmp %_ASM_BX,%_ASM_CX
22846 + jb 1234f
22847 + xor %ebx,%ebx
22848 +1234:
22849 +#endif
22850 +
22851 +#endif
22852 +
22853 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22854 #ifdef CONFIG_X86_32
22855 -5: movl %edx,4(%_ASM_CX)
22856 +5: __copyuser_seg movl %edx,4(_DEST)
22857 #endif
22858 xor %eax,%eax
22859 EXIT
22860 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22861 index 1cad221..de671ee 100644
22862 --- a/arch/x86/lib/rwlock.S
22863 +++ b/arch/x86/lib/rwlock.S
22864 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22865 FRAME
22866 0: LOCK_PREFIX
22867 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22868 +
22869 +#ifdef CONFIG_PAX_REFCOUNT
22870 + jno 1234f
22871 + LOCK_PREFIX
22872 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22873 + int $4
22874 +1234:
22875 + _ASM_EXTABLE(1234b, 1234b)
22876 +#endif
22877 +
22878 1: rep; nop
22879 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22880 jne 1b
22881 LOCK_PREFIX
22882 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22883 +
22884 +#ifdef CONFIG_PAX_REFCOUNT
22885 + jno 1234f
22886 + LOCK_PREFIX
22887 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22888 + int $4
22889 +1234:
22890 + _ASM_EXTABLE(1234b, 1234b)
22891 +#endif
22892 +
22893 jnz 0b
22894 ENDFRAME
22895 + pax_force_retaddr
22896 ret
22897 CFI_ENDPROC
22898 END(__write_lock_failed)
22899 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22900 FRAME
22901 0: LOCK_PREFIX
22902 READ_LOCK_SIZE(inc) (%__lock_ptr)
22903 +
22904 +#ifdef CONFIG_PAX_REFCOUNT
22905 + jno 1234f
22906 + LOCK_PREFIX
22907 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22908 + int $4
22909 +1234:
22910 + _ASM_EXTABLE(1234b, 1234b)
22911 +#endif
22912 +
22913 1: rep; nop
22914 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22915 js 1b
22916 LOCK_PREFIX
22917 READ_LOCK_SIZE(dec) (%__lock_ptr)
22918 +
22919 +#ifdef CONFIG_PAX_REFCOUNT
22920 + jno 1234f
22921 + LOCK_PREFIX
22922 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22923 + int $4
22924 +1234:
22925 + _ASM_EXTABLE(1234b, 1234b)
22926 +#endif
22927 +
22928 js 0b
22929 ENDFRAME
22930 + pax_force_retaddr
22931 ret
22932 CFI_ENDPROC
22933 END(__read_lock_failed)
22934 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22935 index 5dff5f0..cadebf4 100644
22936 --- a/arch/x86/lib/rwsem.S
22937 +++ b/arch/x86/lib/rwsem.S
22938 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22939 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22940 CFI_RESTORE __ASM_REG(dx)
22941 restore_common_regs
22942 + pax_force_retaddr
22943 ret
22944 CFI_ENDPROC
22945 ENDPROC(call_rwsem_down_read_failed)
22946 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22947 movq %rax,%rdi
22948 call rwsem_down_write_failed
22949 restore_common_regs
22950 + pax_force_retaddr
22951 ret
22952 CFI_ENDPROC
22953 ENDPROC(call_rwsem_down_write_failed)
22954 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22955 movq %rax,%rdi
22956 call rwsem_wake
22957 restore_common_regs
22958 -1: ret
22959 +1: pax_force_retaddr
22960 + ret
22961 CFI_ENDPROC
22962 ENDPROC(call_rwsem_wake)
22963
22964 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22965 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22966 CFI_RESTORE __ASM_REG(dx)
22967 restore_common_regs
22968 + pax_force_retaddr
22969 ret
22970 CFI_ENDPROC
22971 ENDPROC(call_rwsem_downgrade_wake)
22972 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22973 index a63efd6..ccecad8 100644
22974 --- a/arch/x86/lib/thunk_64.S
22975 +++ b/arch/x86/lib/thunk_64.S
22976 @@ -8,6 +8,7 @@
22977 #include <linux/linkage.h>
22978 #include <asm/dwarf2.h>
22979 #include <asm/calling.h>
22980 +#include <asm/alternative-asm.h>
22981
22982 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22983 .macro THUNK name, func, put_ret_addr_in_rdi=0
22984 @@ -41,5 +42,6 @@
22985 SAVE_ARGS
22986 restore:
22987 RESTORE_ARGS
22988 + pax_force_retaddr
22989 ret
22990 CFI_ENDPROC
22991 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22992 index e218d5d..a99a1eb 100644
22993 --- a/arch/x86/lib/usercopy_32.c
22994 +++ b/arch/x86/lib/usercopy_32.c
22995 @@ -43,7 +43,7 @@ do { \
22996 __asm__ __volatile__( \
22997 " testl %1,%1\n" \
22998 " jz 2f\n" \
22999 - "0: lodsb\n" \
23000 + "0: "__copyuser_seg"lodsb\n" \
23001 " stosb\n" \
23002 " testb %%al,%%al\n" \
23003 " jz 1f\n" \
23004 @@ -128,10 +128,12 @@ do { \
23005 int __d0; \
23006 might_fault(); \
23007 __asm__ __volatile__( \
23008 + __COPYUSER_SET_ES \
23009 "0: rep; stosl\n" \
23010 " movl %2,%0\n" \
23011 "1: rep; stosb\n" \
23012 "2:\n" \
23013 + __COPYUSER_RESTORE_ES \
23014 ".section .fixup,\"ax\"\n" \
23015 "3: lea 0(%2,%0,4),%0\n" \
23016 " jmp 2b\n" \
23017 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23018 might_fault();
23019
23020 __asm__ __volatile__(
23021 + __COPYUSER_SET_ES
23022 " testl %0, %0\n"
23023 " jz 3f\n"
23024 " andl %0,%%ecx\n"
23025 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23026 " subl %%ecx,%0\n"
23027 " addl %0,%%eax\n"
23028 "1:\n"
23029 + __COPYUSER_RESTORE_ES
23030 ".section .fixup,\"ax\"\n"
23031 "2: xorl %%eax,%%eax\n"
23032 " jmp 1b\n"
23033 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23034
23035 #ifdef CONFIG_X86_INTEL_USERCOPY
23036 static unsigned long
23037 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23038 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23039 {
23040 int d0, d1;
23041 __asm__ __volatile__(
23042 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23043 " .align 2,0x90\n"
23044 "3: movl 0(%4), %%eax\n"
23045 "4: movl 4(%4), %%edx\n"
23046 - "5: movl %%eax, 0(%3)\n"
23047 - "6: movl %%edx, 4(%3)\n"
23048 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23049 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23050 "7: movl 8(%4), %%eax\n"
23051 "8: movl 12(%4),%%edx\n"
23052 - "9: movl %%eax, 8(%3)\n"
23053 - "10: movl %%edx, 12(%3)\n"
23054 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23055 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23056 "11: movl 16(%4), %%eax\n"
23057 "12: movl 20(%4), %%edx\n"
23058 - "13: movl %%eax, 16(%3)\n"
23059 - "14: movl %%edx, 20(%3)\n"
23060 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23061 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23062 "15: movl 24(%4), %%eax\n"
23063 "16: movl 28(%4), %%edx\n"
23064 - "17: movl %%eax, 24(%3)\n"
23065 - "18: movl %%edx, 28(%3)\n"
23066 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23067 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23068 "19: movl 32(%4), %%eax\n"
23069 "20: movl 36(%4), %%edx\n"
23070 - "21: movl %%eax, 32(%3)\n"
23071 - "22: movl %%edx, 36(%3)\n"
23072 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23073 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23074 "23: movl 40(%4), %%eax\n"
23075 "24: movl 44(%4), %%edx\n"
23076 - "25: movl %%eax, 40(%3)\n"
23077 - "26: movl %%edx, 44(%3)\n"
23078 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23079 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23080 "27: movl 48(%4), %%eax\n"
23081 "28: movl 52(%4), %%edx\n"
23082 - "29: movl %%eax, 48(%3)\n"
23083 - "30: movl %%edx, 52(%3)\n"
23084 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23085 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23086 "31: movl 56(%4), %%eax\n"
23087 "32: movl 60(%4), %%edx\n"
23088 - "33: movl %%eax, 56(%3)\n"
23089 - "34: movl %%edx, 60(%3)\n"
23090 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23091 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23092 " addl $-64, %0\n"
23093 " addl $64, %4\n"
23094 " addl $64, %3\n"
23095 @@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23096 " shrl $2, %0\n"
23097 " andl $3, %%eax\n"
23098 " cld\n"
23099 + __COPYUSER_SET_ES
23100 "99: rep; movsl\n"
23101 "36: movl %%eax, %0\n"
23102 "37: rep; movsb\n"
23103 "100:\n"
23104 + __COPYUSER_RESTORE_ES
23105 ".section .fixup,\"ax\"\n"
23106 "101: lea 0(%%eax,%0,4),%0\n"
23107 " jmp 100b\n"
23108 @@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23109 }
23110
23111 static unsigned long
23112 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23113 +{
23114 + int d0, d1;
23115 + __asm__ __volatile__(
23116 + " .align 2,0x90\n"
23117 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23118 + " cmpl $67, %0\n"
23119 + " jbe 3f\n"
23120 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23121 + " .align 2,0x90\n"
23122 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23123 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23124 + "5: movl %%eax, 0(%3)\n"
23125 + "6: movl %%edx, 4(%3)\n"
23126 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23127 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23128 + "9: movl %%eax, 8(%3)\n"
23129 + "10: movl %%edx, 12(%3)\n"
23130 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23131 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23132 + "13: movl %%eax, 16(%3)\n"
23133 + "14: movl %%edx, 20(%3)\n"
23134 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23135 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23136 + "17: movl %%eax, 24(%3)\n"
23137 + "18: movl %%edx, 28(%3)\n"
23138 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23139 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23140 + "21: movl %%eax, 32(%3)\n"
23141 + "22: movl %%edx, 36(%3)\n"
23142 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23143 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23144 + "25: movl %%eax, 40(%3)\n"
23145 + "26: movl %%edx, 44(%3)\n"
23146 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23147 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23148 + "29: movl %%eax, 48(%3)\n"
23149 + "30: movl %%edx, 52(%3)\n"
23150 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23151 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23152 + "33: movl %%eax, 56(%3)\n"
23153 + "34: movl %%edx, 60(%3)\n"
23154 + " addl $-64, %0\n"
23155 + " addl $64, %4\n"
23156 + " addl $64, %3\n"
23157 + " cmpl $63, %0\n"
23158 + " ja 1b\n"
23159 + "35: movl %0, %%eax\n"
23160 + " shrl $2, %0\n"
23161 + " andl $3, %%eax\n"
23162 + " cld\n"
23163 + "99: rep; "__copyuser_seg" movsl\n"
23164 + "36: movl %%eax, %0\n"
23165 + "37: rep; "__copyuser_seg" movsb\n"
23166 + "100:\n"
23167 + ".section .fixup,\"ax\"\n"
23168 + "101: lea 0(%%eax,%0,4),%0\n"
23169 + " jmp 100b\n"
23170 + ".previous\n"
23171 + ".section __ex_table,\"a\"\n"
23172 + " .align 4\n"
23173 + " .long 1b,100b\n"
23174 + " .long 2b,100b\n"
23175 + " .long 3b,100b\n"
23176 + " .long 4b,100b\n"
23177 + " .long 5b,100b\n"
23178 + " .long 6b,100b\n"
23179 + " .long 7b,100b\n"
23180 + " .long 8b,100b\n"
23181 + " .long 9b,100b\n"
23182 + " .long 10b,100b\n"
23183 + " .long 11b,100b\n"
23184 + " .long 12b,100b\n"
23185 + " .long 13b,100b\n"
23186 + " .long 14b,100b\n"
23187 + " .long 15b,100b\n"
23188 + " .long 16b,100b\n"
23189 + " .long 17b,100b\n"
23190 + " .long 18b,100b\n"
23191 + " .long 19b,100b\n"
23192 + " .long 20b,100b\n"
23193 + " .long 21b,100b\n"
23194 + " .long 22b,100b\n"
23195 + " .long 23b,100b\n"
23196 + " .long 24b,100b\n"
23197 + " .long 25b,100b\n"
23198 + " .long 26b,100b\n"
23199 + " .long 27b,100b\n"
23200 + " .long 28b,100b\n"
23201 + " .long 29b,100b\n"
23202 + " .long 30b,100b\n"
23203 + " .long 31b,100b\n"
23204 + " .long 32b,100b\n"
23205 + " .long 33b,100b\n"
23206 + " .long 34b,100b\n"
23207 + " .long 35b,100b\n"
23208 + " .long 36b,100b\n"
23209 + " .long 37b,100b\n"
23210 + " .long 99b,101b\n"
23211 + ".previous"
23212 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23213 + : "1"(to), "2"(from), "0"(size)
23214 + : "eax", "edx", "memory");
23215 + return size;
23216 +}
23217 +
23218 +static unsigned long
23219 +__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23220 +static unsigned long
23221 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23222 {
23223 int d0, d1;
23224 __asm__ __volatile__(
23225 " .align 2,0x90\n"
23226 - "0: movl 32(%4), %%eax\n"
23227 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23228 " cmpl $67, %0\n"
23229 " jbe 2f\n"
23230 - "1: movl 64(%4), %%eax\n"
23231 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23232 " .align 2,0x90\n"
23233 - "2: movl 0(%4), %%eax\n"
23234 - "21: movl 4(%4), %%edx\n"
23235 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23236 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23237 " movl %%eax, 0(%3)\n"
23238 " movl %%edx, 4(%3)\n"
23239 - "3: movl 8(%4), %%eax\n"
23240 - "31: movl 12(%4),%%edx\n"
23241 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23242 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23243 " movl %%eax, 8(%3)\n"
23244 " movl %%edx, 12(%3)\n"
23245 - "4: movl 16(%4), %%eax\n"
23246 - "41: movl 20(%4), %%edx\n"
23247 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23248 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23249 " movl %%eax, 16(%3)\n"
23250 " movl %%edx, 20(%3)\n"
23251 - "10: movl 24(%4), %%eax\n"
23252 - "51: movl 28(%4), %%edx\n"
23253 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23254 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23255 " movl %%eax, 24(%3)\n"
23256 " movl %%edx, 28(%3)\n"
23257 - "11: movl 32(%4), %%eax\n"
23258 - "61: movl 36(%4), %%edx\n"
23259 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23260 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23261 " movl %%eax, 32(%3)\n"
23262 " movl %%edx, 36(%3)\n"
23263 - "12: movl 40(%4), %%eax\n"
23264 - "71: movl 44(%4), %%edx\n"
23265 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23266 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23267 " movl %%eax, 40(%3)\n"
23268 " movl %%edx, 44(%3)\n"
23269 - "13: movl 48(%4), %%eax\n"
23270 - "81: movl 52(%4), %%edx\n"
23271 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23272 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23273 " movl %%eax, 48(%3)\n"
23274 " movl %%edx, 52(%3)\n"
23275 - "14: movl 56(%4), %%eax\n"
23276 - "91: movl 60(%4), %%edx\n"
23277 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23278 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23279 " movl %%eax, 56(%3)\n"
23280 " movl %%edx, 60(%3)\n"
23281 " addl $-64, %0\n"
23282 @@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23283 " shrl $2, %0\n"
23284 " andl $3, %%eax\n"
23285 " cld\n"
23286 - "6: rep; movsl\n"
23287 + "6: rep; "__copyuser_seg" movsl\n"
23288 " movl %%eax,%0\n"
23289 - "7: rep; movsb\n"
23290 + "7: rep; "__copyuser_seg" movsb\n"
23291 "8:\n"
23292 ".section .fixup,\"ax\"\n"
23293 "9: lea 0(%%eax,%0,4),%0\n"
23294 @@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23295 */
23296
23297 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23298 + const void __user *from, unsigned long size) __size_overflow(3);
23299 +static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23300 const void __user *from, unsigned long size)
23301 {
23302 int d0, d1;
23303
23304 __asm__ __volatile__(
23305 " .align 2,0x90\n"
23306 - "0: movl 32(%4), %%eax\n"
23307 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23308 " cmpl $67, %0\n"
23309 " jbe 2f\n"
23310 - "1: movl 64(%4), %%eax\n"
23311 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23312 " .align 2,0x90\n"
23313 - "2: movl 0(%4), %%eax\n"
23314 - "21: movl 4(%4), %%edx\n"
23315 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23316 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23317 " movnti %%eax, 0(%3)\n"
23318 " movnti %%edx, 4(%3)\n"
23319 - "3: movl 8(%4), %%eax\n"
23320 - "31: movl 12(%4),%%edx\n"
23321 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23322 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23323 " movnti %%eax, 8(%3)\n"
23324 " movnti %%edx, 12(%3)\n"
23325 - "4: movl 16(%4), %%eax\n"
23326 - "41: movl 20(%4), %%edx\n"
23327 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23328 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23329 " movnti %%eax, 16(%3)\n"
23330 " movnti %%edx, 20(%3)\n"
23331 - "10: movl 24(%4), %%eax\n"
23332 - "51: movl 28(%4), %%edx\n"
23333 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23334 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23335 " movnti %%eax, 24(%3)\n"
23336 " movnti %%edx, 28(%3)\n"
23337 - "11: movl 32(%4), %%eax\n"
23338 - "61: movl 36(%4), %%edx\n"
23339 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23340 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23341 " movnti %%eax, 32(%3)\n"
23342 " movnti %%edx, 36(%3)\n"
23343 - "12: movl 40(%4), %%eax\n"
23344 - "71: movl 44(%4), %%edx\n"
23345 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23346 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23347 " movnti %%eax, 40(%3)\n"
23348 " movnti %%edx, 44(%3)\n"
23349 - "13: movl 48(%4), %%eax\n"
23350 - "81: movl 52(%4), %%edx\n"
23351 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23352 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23353 " movnti %%eax, 48(%3)\n"
23354 " movnti %%edx, 52(%3)\n"
23355 - "14: movl 56(%4), %%eax\n"
23356 - "91: movl 60(%4), %%edx\n"
23357 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23358 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23359 " movnti %%eax, 56(%3)\n"
23360 " movnti %%edx, 60(%3)\n"
23361 " addl $-64, %0\n"
23362 @@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23363 " shrl $2, %0\n"
23364 " andl $3, %%eax\n"
23365 " cld\n"
23366 - "6: rep; movsl\n"
23367 + "6: rep; "__copyuser_seg" movsl\n"
23368 " movl %%eax,%0\n"
23369 - "7: rep; movsb\n"
23370 + "7: rep; "__copyuser_seg" movsb\n"
23371 "8:\n"
23372 ".section .fixup,\"ax\"\n"
23373 "9: lea 0(%%eax,%0,4),%0\n"
23374 @@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23375 }
23376
23377 static unsigned long __copy_user_intel_nocache(void *to,
23378 + const void __user *from, unsigned long size) __size_overflow(3);
23379 +static unsigned long __copy_user_intel_nocache(void *to,
23380 const void __user *from, unsigned long size)
23381 {
23382 int d0, d1;
23383
23384 __asm__ __volatile__(
23385 " .align 2,0x90\n"
23386 - "0: movl 32(%4), %%eax\n"
23387 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23388 " cmpl $67, %0\n"
23389 " jbe 2f\n"
23390 - "1: movl 64(%4), %%eax\n"
23391 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23392 " .align 2,0x90\n"
23393 - "2: movl 0(%4), %%eax\n"
23394 - "21: movl 4(%4), %%edx\n"
23395 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23396 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23397 " movnti %%eax, 0(%3)\n"
23398 " movnti %%edx, 4(%3)\n"
23399 - "3: movl 8(%4), %%eax\n"
23400 - "31: movl 12(%4),%%edx\n"
23401 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23402 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23403 " movnti %%eax, 8(%3)\n"
23404 " movnti %%edx, 12(%3)\n"
23405 - "4: movl 16(%4), %%eax\n"
23406 - "41: movl 20(%4), %%edx\n"
23407 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23408 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23409 " movnti %%eax, 16(%3)\n"
23410 " movnti %%edx, 20(%3)\n"
23411 - "10: movl 24(%4), %%eax\n"
23412 - "51: movl 28(%4), %%edx\n"
23413 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23414 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23415 " movnti %%eax, 24(%3)\n"
23416 " movnti %%edx, 28(%3)\n"
23417 - "11: movl 32(%4), %%eax\n"
23418 - "61: movl 36(%4), %%edx\n"
23419 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23420 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23421 " movnti %%eax, 32(%3)\n"
23422 " movnti %%edx, 36(%3)\n"
23423 - "12: movl 40(%4), %%eax\n"
23424 - "71: movl 44(%4), %%edx\n"
23425 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23426 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23427 " movnti %%eax, 40(%3)\n"
23428 " movnti %%edx, 44(%3)\n"
23429 - "13: movl 48(%4), %%eax\n"
23430 - "81: movl 52(%4), %%edx\n"
23431 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23432 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23433 " movnti %%eax, 48(%3)\n"
23434 " movnti %%edx, 52(%3)\n"
23435 - "14: movl 56(%4), %%eax\n"
23436 - "91: movl 60(%4), %%edx\n"
23437 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23438 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23439 " movnti %%eax, 56(%3)\n"
23440 " movnti %%edx, 60(%3)\n"
23441 " addl $-64, %0\n"
23442 @@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23443 " shrl $2, %0\n"
23444 " andl $3, %%eax\n"
23445 " cld\n"
23446 - "6: rep; movsl\n"
23447 + "6: rep; "__copyuser_seg" movsl\n"
23448 " movl %%eax,%0\n"
23449 - "7: rep; movsb\n"
23450 + "7: rep; "__copyuser_seg" movsb\n"
23451 "8:\n"
23452 ".section .fixup,\"ax\"\n"
23453 "9: lea 0(%%eax,%0,4),%0\n"
23454 @@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23455 */
23456 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23457 unsigned long size);
23458 -unsigned long __copy_user_intel(void __user *to, const void *from,
23459 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23460 + unsigned long size);
23461 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23462 unsigned long size);
23463 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23464 const void __user *from, unsigned long size);
23465 #endif /* CONFIG_X86_INTEL_USERCOPY */
23466
23467 /* Generic arbitrary sized copy. */
23468 -#define __copy_user(to, from, size) \
23469 +#define __copy_user(to, from, size, prefix, set, restore) \
23470 do { \
23471 int __d0, __d1, __d2; \
23472 __asm__ __volatile__( \
23473 + set \
23474 " cmp $7,%0\n" \
23475 " jbe 1f\n" \
23476 " movl %1,%0\n" \
23477 " negl %0\n" \
23478 " andl $7,%0\n" \
23479 " subl %0,%3\n" \
23480 - "4: rep; movsb\n" \
23481 + "4: rep; "prefix"movsb\n" \
23482 " movl %3,%0\n" \
23483 " shrl $2,%0\n" \
23484 " andl $3,%3\n" \
23485 " .align 2,0x90\n" \
23486 - "0: rep; movsl\n" \
23487 + "0: rep; "prefix"movsl\n" \
23488 " movl %3,%0\n" \
23489 - "1: rep; movsb\n" \
23490 + "1: rep; "prefix"movsb\n" \
23491 "2:\n" \
23492 + restore \
23493 ".section .fixup,\"ax\"\n" \
23494 "5: addl %3,%0\n" \
23495 " jmp 2b\n" \
23496 @@ -682,14 +805,14 @@ do { \
23497 " negl %0\n" \
23498 " andl $7,%0\n" \
23499 " subl %0,%3\n" \
23500 - "4: rep; movsb\n" \
23501 + "4: rep; "__copyuser_seg"movsb\n" \
23502 " movl %3,%0\n" \
23503 " shrl $2,%0\n" \
23504 " andl $3,%3\n" \
23505 " .align 2,0x90\n" \
23506 - "0: rep; movsl\n" \
23507 + "0: rep; "__copyuser_seg"movsl\n" \
23508 " movl %3,%0\n" \
23509 - "1: rep; movsb\n" \
23510 + "1: rep; "__copyuser_seg"movsb\n" \
23511 "2:\n" \
23512 ".section .fixup,\"ax\"\n" \
23513 "5: addl %3,%0\n" \
23514 @@ -775,9 +898,9 @@ survive:
23515 }
23516 #endif
23517 if (movsl_is_ok(to, from, n))
23518 - __copy_user(to, from, n);
23519 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23520 else
23521 - n = __copy_user_intel(to, from, n);
23522 + n = __generic_copy_to_user_intel(to, from, n);
23523 return n;
23524 }
23525 EXPORT_SYMBOL(__copy_to_user_ll);
23526 @@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23527 unsigned long n)
23528 {
23529 if (movsl_is_ok(to, from, n))
23530 - __copy_user(to, from, n);
23531 + __copy_user(to, from, n, __copyuser_seg, "", "");
23532 else
23533 - n = __copy_user_intel((void __user *)to,
23534 - (const void *)from, n);
23535 + n = __generic_copy_from_user_intel(to, from, n);
23536 return n;
23537 }
23538 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23539 @@ -827,65 +949,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23540 if (n > 64 && cpu_has_xmm2)
23541 n = __copy_user_intel_nocache(to, from, n);
23542 else
23543 - __copy_user(to, from, n);
23544 + __copy_user(to, from, n, __copyuser_seg, "", "");
23545 #else
23546 - __copy_user(to, from, n);
23547 + __copy_user(to, from, n, __copyuser_seg, "", "");
23548 #endif
23549 return n;
23550 }
23551 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23552
23553 -/**
23554 - * copy_to_user: - Copy a block of data into user space.
23555 - * @to: Destination address, in user space.
23556 - * @from: Source address, in kernel space.
23557 - * @n: Number of bytes to copy.
23558 - *
23559 - * Context: User context only. This function may sleep.
23560 - *
23561 - * Copy data from kernel space to user space.
23562 - *
23563 - * Returns number of bytes that could not be copied.
23564 - * On success, this will be zero.
23565 - */
23566 -unsigned long
23567 -copy_to_user(void __user *to, const void *from, unsigned long n)
23568 -{
23569 - if (access_ok(VERIFY_WRITE, to, n))
23570 - n = __copy_to_user(to, from, n);
23571 - return n;
23572 -}
23573 -EXPORT_SYMBOL(copy_to_user);
23574 -
23575 -/**
23576 - * copy_from_user: - Copy a block of data from user space.
23577 - * @to: Destination address, in kernel space.
23578 - * @from: Source address, in user space.
23579 - * @n: Number of bytes to copy.
23580 - *
23581 - * Context: User context only. This function may sleep.
23582 - *
23583 - * Copy data from user space to kernel space.
23584 - *
23585 - * Returns number of bytes that could not be copied.
23586 - * On success, this will be zero.
23587 - *
23588 - * If some data could not be copied, this function will pad the copied
23589 - * data to the requested size using zero bytes.
23590 - */
23591 -unsigned long
23592 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23593 -{
23594 - if (access_ok(VERIFY_READ, from, n))
23595 - n = __copy_from_user(to, from, n);
23596 - else
23597 - memset(to, 0, n);
23598 - return n;
23599 -}
23600 -EXPORT_SYMBOL(_copy_from_user);
23601 -
23602 void copy_from_user_overflow(void)
23603 {
23604 WARN(1, "Buffer overflow detected!\n");
23605 }
23606 EXPORT_SYMBOL(copy_from_user_overflow);
23607 +
23608 +void copy_to_user_overflow(void)
23609 +{
23610 + WARN(1, "Buffer overflow detected!\n");
23611 +}
23612 +EXPORT_SYMBOL(copy_to_user_overflow);
23613 +
23614 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23615 +void __set_fs(mm_segment_t x)
23616 +{
23617 + switch (x.seg) {
23618 + case 0:
23619 + loadsegment(gs, 0);
23620 + break;
23621 + case TASK_SIZE_MAX:
23622 + loadsegment(gs, __USER_DS);
23623 + break;
23624 + case -1UL:
23625 + loadsegment(gs, __KERNEL_DS);
23626 + break;
23627 + default:
23628 + BUG();
23629 + }
23630 + return;
23631 +}
23632 +EXPORT_SYMBOL(__set_fs);
23633 +
23634 +void set_fs(mm_segment_t x)
23635 +{
23636 + current_thread_info()->addr_limit = x;
23637 + __set_fs(x);
23638 +}
23639 +EXPORT_SYMBOL(set_fs);
23640 +#endif
23641 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23642 index b7c2849..8633ad8 100644
23643 --- a/arch/x86/lib/usercopy_64.c
23644 +++ b/arch/x86/lib/usercopy_64.c
23645 @@ -42,6 +42,12 @@ long
23646 __strncpy_from_user(char *dst, const char __user *src, long count)
23647 {
23648 long res;
23649 +
23650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23651 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23652 + src += PAX_USER_SHADOW_BASE;
23653 +#endif
23654 +
23655 __do_strncpy_from_user(dst, src, count, res);
23656 return res;
23657 }
23658 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23659 {
23660 long __d0;
23661 might_fault();
23662 +
23663 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23664 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23665 + addr += PAX_USER_SHADOW_BASE;
23666 +#endif
23667 +
23668 /* no memory constraint because it doesn't change any memory gcc knows
23669 about */
23670 asm volatile(
23671 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23672 }
23673 EXPORT_SYMBOL(strlen_user);
23674
23675 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23676 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23677 {
23678 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23679 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23680 - }
23681 - return len;
23682 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23683 +
23684 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23685 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23686 + to += PAX_USER_SHADOW_BASE;
23687 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23688 + from += PAX_USER_SHADOW_BASE;
23689 +#endif
23690 +
23691 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23692 + }
23693 + return len;
23694 }
23695 EXPORT_SYMBOL(copy_in_user);
23696
23697 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23698 * it is not necessary to optimize tail handling.
23699 */
23700 unsigned long
23701 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23702 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23703 {
23704 char c;
23705 unsigned zero_len;
23706 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23707 index 1fb85db..8b3540b 100644
23708 --- a/arch/x86/mm/extable.c
23709 +++ b/arch/x86/mm/extable.c
23710 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23711 const struct exception_table_entry *fixup;
23712
23713 #ifdef CONFIG_PNPBIOS
23714 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23715 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23716 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23717 extern u32 pnp_bios_is_utter_crap;
23718 pnp_bios_is_utter_crap = 1;
23719 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23720 index f0b4caf..d92fd42 100644
23721 --- a/arch/x86/mm/fault.c
23722 +++ b/arch/x86/mm/fault.c
23723 @@ -13,11 +13,18 @@
23724 #include <linux/perf_event.h> /* perf_sw_event */
23725 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23726 #include <linux/prefetch.h> /* prefetchw */
23727 +#include <linux/unistd.h>
23728 +#include <linux/compiler.h>
23729
23730 #include <asm/traps.h> /* dotraplinkage, ... */
23731 #include <asm/pgalloc.h> /* pgd_*(), ... */
23732 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23733 #include <asm/fixmap.h> /* VSYSCALL_START */
23734 +#include <asm/tlbflush.h>
23735 +
23736 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23737 +#include <asm/stacktrace.h>
23738 +#endif
23739
23740 /*
23741 * Page fault error code bits:
23742 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23743 int ret = 0;
23744
23745 /* kprobe_running() needs smp_processor_id() */
23746 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23747 + if (kprobes_built_in() && !user_mode(regs)) {
23748 preempt_disable();
23749 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23750 ret = 1;
23751 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23752 return !instr_lo || (instr_lo>>1) == 1;
23753 case 0x00:
23754 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23755 - if (probe_kernel_address(instr, opcode))
23756 + if (user_mode(regs)) {
23757 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23758 + return 0;
23759 + } else if (probe_kernel_address(instr, opcode))
23760 return 0;
23761
23762 *prefetch = (instr_lo == 0xF) &&
23763 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23764 while (instr < max_instr) {
23765 unsigned char opcode;
23766
23767 - if (probe_kernel_address(instr, opcode))
23768 + if (user_mode(regs)) {
23769 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23770 + break;
23771 + } else if (probe_kernel_address(instr, opcode))
23772 break;
23773
23774 instr++;
23775 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23776 force_sig_info(si_signo, &info, tsk);
23777 }
23778
23779 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23780 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23781 +#endif
23782 +
23783 +#ifdef CONFIG_PAX_EMUTRAMP
23784 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23785 +#endif
23786 +
23787 +#ifdef CONFIG_PAX_PAGEEXEC
23788 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23789 +{
23790 + pgd_t *pgd;
23791 + pud_t *pud;
23792 + pmd_t *pmd;
23793 +
23794 + pgd = pgd_offset(mm, address);
23795 + if (!pgd_present(*pgd))
23796 + return NULL;
23797 + pud = pud_offset(pgd, address);
23798 + if (!pud_present(*pud))
23799 + return NULL;
23800 + pmd = pmd_offset(pud, address);
23801 + if (!pmd_present(*pmd))
23802 + return NULL;
23803 + return pmd;
23804 +}
23805 +#endif
23806 +
23807 DEFINE_SPINLOCK(pgd_lock);
23808 LIST_HEAD(pgd_list);
23809
23810 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23811 for (address = VMALLOC_START & PMD_MASK;
23812 address >= TASK_SIZE && address < FIXADDR_TOP;
23813 address += PMD_SIZE) {
23814 +
23815 +#ifdef CONFIG_PAX_PER_CPU_PGD
23816 + unsigned long cpu;
23817 +#else
23818 struct page *page;
23819 +#endif
23820
23821 spin_lock(&pgd_lock);
23822 +
23823 +#ifdef CONFIG_PAX_PER_CPU_PGD
23824 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23825 + pgd_t *pgd = get_cpu_pgd(cpu);
23826 + pmd_t *ret;
23827 +#else
23828 list_for_each_entry(page, &pgd_list, lru) {
23829 + pgd_t *pgd = page_address(page);
23830 spinlock_t *pgt_lock;
23831 pmd_t *ret;
23832
23833 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23834 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23835
23836 spin_lock(pgt_lock);
23837 - ret = vmalloc_sync_one(page_address(page), address);
23838 +#endif
23839 +
23840 + ret = vmalloc_sync_one(pgd, address);
23841 +
23842 +#ifndef CONFIG_PAX_PER_CPU_PGD
23843 spin_unlock(pgt_lock);
23844 +#endif
23845
23846 if (!ret)
23847 break;
23848 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23849 * an interrupt in the middle of a task switch..
23850 */
23851 pgd_paddr = read_cr3();
23852 +
23853 +#ifdef CONFIG_PAX_PER_CPU_PGD
23854 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23855 +#endif
23856 +
23857 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23858 if (!pmd_k)
23859 return -1;
23860 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23861 * happen within a race in page table update. In the later
23862 * case just flush:
23863 */
23864 +
23865 +#ifdef CONFIG_PAX_PER_CPU_PGD
23866 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23867 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23868 +#else
23869 pgd = pgd_offset(current->active_mm, address);
23870 +#endif
23871 +
23872 pgd_ref = pgd_offset_k(address);
23873 if (pgd_none(*pgd_ref))
23874 return -1;
23875 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23876 static int is_errata100(struct pt_regs *regs, unsigned long address)
23877 {
23878 #ifdef CONFIG_X86_64
23879 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23880 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23881 return 1;
23882 #endif
23883 return 0;
23884 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23885 }
23886
23887 static const char nx_warning[] = KERN_CRIT
23888 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23889 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23890
23891 static void
23892 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23893 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23894 if (!oops_may_print())
23895 return;
23896
23897 - if (error_code & PF_INSTR) {
23898 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23899 unsigned int level;
23900
23901 pte_t *pte = lookup_address(address, &level);
23902
23903 if (pte && pte_present(*pte) && !pte_exec(*pte))
23904 - printk(nx_warning, current_uid());
23905 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23906 }
23907
23908 +#ifdef CONFIG_PAX_KERNEXEC
23909 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23910 + if (current->signal->curr_ip)
23911 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23912 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23913 + else
23914 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23915 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23916 + }
23917 +#endif
23918 +
23919 printk(KERN_ALERT "BUG: unable to handle kernel ");
23920 if (address < PAGE_SIZE)
23921 printk(KERN_CONT "NULL pointer dereference");
23922 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23923 }
23924 #endif
23925
23926 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23927 + if (pax_is_fetch_fault(regs, error_code, address)) {
23928 +
23929 +#ifdef CONFIG_PAX_EMUTRAMP
23930 + switch (pax_handle_fetch_fault(regs)) {
23931 + case 2:
23932 + return;
23933 + }
23934 +#endif
23935 +
23936 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23937 + do_group_exit(SIGKILL);
23938 + }
23939 +#endif
23940 +
23941 if (unlikely(show_unhandled_signals))
23942 show_signal_msg(regs, error_code, address, tsk);
23943
23944 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23945 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23946 printk(KERN_ERR
23947 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23948 - tsk->comm, tsk->pid, address);
23949 + tsk->comm, task_pid_nr(tsk), address);
23950 code = BUS_MCEERR_AR;
23951 }
23952 #endif
23953 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23954 return 1;
23955 }
23956
23957 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23958 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23959 +{
23960 + pte_t *pte;
23961 + pmd_t *pmd;
23962 + spinlock_t *ptl;
23963 + unsigned char pte_mask;
23964 +
23965 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23966 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23967 + return 0;
23968 +
23969 + /* PaX: it's our fault, let's handle it if we can */
23970 +
23971 + /* PaX: take a look at read faults before acquiring any locks */
23972 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23973 + /* instruction fetch attempt from a protected page in user mode */
23974 + up_read(&mm->mmap_sem);
23975 +
23976 +#ifdef CONFIG_PAX_EMUTRAMP
23977 + switch (pax_handle_fetch_fault(regs)) {
23978 + case 2:
23979 + return 1;
23980 + }
23981 +#endif
23982 +
23983 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23984 + do_group_exit(SIGKILL);
23985 + }
23986 +
23987 + pmd = pax_get_pmd(mm, address);
23988 + if (unlikely(!pmd))
23989 + return 0;
23990 +
23991 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23992 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23993 + pte_unmap_unlock(pte, ptl);
23994 + return 0;
23995 + }
23996 +
23997 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23998 + /* write attempt to a protected page in user mode */
23999 + pte_unmap_unlock(pte, ptl);
24000 + return 0;
24001 + }
24002 +
24003 +#ifdef CONFIG_SMP
24004 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24005 +#else
24006 + if (likely(address > get_limit(regs->cs)))
24007 +#endif
24008 + {
24009 + set_pte(pte, pte_mkread(*pte));
24010 + __flush_tlb_one(address);
24011 + pte_unmap_unlock(pte, ptl);
24012 + up_read(&mm->mmap_sem);
24013 + return 1;
24014 + }
24015 +
24016 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24017 +
24018 + /*
24019 + * PaX: fill DTLB with user rights and retry
24020 + */
24021 + __asm__ __volatile__ (
24022 + "orb %2,(%1)\n"
24023 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24024 +/*
24025 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24026 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24027 + * page fault when examined during a TLB load attempt. this is true not only
24028 + * for PTEs holding a non-present entry but also present entries that will
24029 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24030 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24031 + * for our target pages since their PTEs are simply not in the TLBs at all.
24032 +
24033 + * the best thing in omitting it is that we gain around 15-20% speed in the
24034 + * fast path of the page fault handler and can get rid of tracing since we
24035 + * can no longer flush unintended entries.
24036 + */
24037 + "invlpg (%0)\n"
24038 +#endif
24039 + __copyuser_seg"testb $0,(%0)\n"
24040 + "xorb %3,(%1)\n"
24041 + :
24042 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24043 + : "memory", "cc");
24044 + pte_unmap_unlock(pte, ptl);
24045 + up_read(&mm->mmap_sem);
24046 + return 1;
24047 +}
24048 +#endif
24049 +
24050 /*
24051 * Handle a spurious fault caused by a stale TLB entry.
24052 *
24053 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
24054 static inline int
24055 access_error(unsigned long error_code, struct vm_area_struct *vma)
24056 {
24057 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24058 + return 1;
24059 +
24060 if (error_code & PF_WRITE) {
24061 /* write, present and write, not present: */
24062 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24063 @@ -1005,18 +1197,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24064 {
24065 struct vm_area_struct *vma;
24066 struct task_struct *tsk;
24067 - unsigned long address;
24068 struct mm_struct *mm;
24069 int fault;
24070 int write = error_code & PF_WRITE;
24071 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
24072 (write ? FAULT_FLAG_WRITE : 0);
24073
24074 - tsk = current;
24075 - mm = tsk->mm;
24076 -
24077 /* Get the faulting address: */
24078 - address = read_cr2();
24079 + unsigned long address = read_cr2();
24080 +
24081 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24082 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24083 + if (!search_exception_tables(regs->ip)) {
24084 + bad_area_nosemaphore(regs, error_code, address);
24085 + return;
24086 + }
24087 + if (address < PAX_USER_SHADOW_BASE) {
24088 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24089 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
24090 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24091 + } else
24092 + address -= PAX_USER_SHADOW_BASE;
24093 + }
24094 +#endif
24095 +
24096 + tsk = current;
24097 + mm = tsk->mm;
24098
24099 /*
24100 * Detect and handle instructions that would cause a page fault for
24101 @@ -1077,7 +1283,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24102 * User-mode registers count as a user access even for any
24103 * potential system fault or CPU buglet:
24104 */
24105 - if (user_mode_vm(regs)) {
24106 + if (user_mode(regs)) {
24107 local_irq_enable();
24108 error_code |= PF_USER;
24109 } else {
24110 @@ -1132,6 +1338,11 @@ retry:
24111 might_sleep();
24112 }
24113
24114 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24115 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24116 + return;
24117 +#endif
24118 +
24119 vma = find_vma(mm, address);
24120 if (unlikely(!vma)) {
24121 bad_area(regs, error_code, address);
24122 @@ -1143,18 +1354,24 @@ retry:
24123 bad_area(regs, error_code, address);
24124 return;
24125 }
24126 - if (error_code & PF_USER) {
24127 - /*
24128 - * Accessing the stack below %sp is always a bug.
24129 - * The large cushion allows instructions like enter
24130 - * and pusha to work. ("enter $65535, $31" pushes
24131 - * 32 pointers and then decrements %sp by 65535.)
24132 - */
24133 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24134 - bad_area(regs, error_code, address);
24135 - return;
24136 - }
24137 + /*
24138 + * Accessing the stack below %sp is always a bug.
24139 + * The large cushion allows instructions like enter
24140 + * and pusha to work. ("enter $65535, $31" pushes
24141 + * 32 pointers and then decrements %sp by 65535.)
24142 + */
24143 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24144 + bad_area(regs, error_code, address);
24145 + return;
24146 }
24147 +
24148 +#ifdef CONFIG_PAX_SEGMEXEC
24149 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24150 + bad_area(regs, error_code, address);
24151 + return;
24152 + }
24153 +#endif
24154 +
24155 if (unlikely(expand_stack(vma, address))) {
24156 bad_area(regs, error_code, address);
24157 return;
24158 @@ -1209,3 +1426,292 @@ good_area:
24159
24160 up_read(&mm->mmap_sem);
24161 }
24162 +
24163 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24164 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24165 +{
24166 + struct mm_struct *mm = current->mm;
24167 + unsigned long ip = regs->ip;
24168 +
24169 + if (v8086_mode(regs))
24170 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24171 +
24172 +#ifdef CONFIG_PAX_PAGEEXEC
24173 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24174 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24175 + return true;
24176 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24177 + return true;
24178 + return false;
24179 + }
24180 +#endif
24181 +
24182 +#ifdef CONFIG_PAX_SEGMEXEC
24183 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24184 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24185 + return true;
24186 + return false;
24187 + }
24188 +#endif
24189 +
24190 + return false;
24191 +}
24192 +#endif
24193 +
24194 +#ifdef CONFIG_PAX_EMUTRAMP
24195 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24196 +{
24197 + int err;
24198 +
24199 + do { /* PaX: libffi trampoline emulation */
24200 + unsigned char mov, jmp;
24201 + unsigned int addr1, addr2;
24202 +
24203 +#ifdef CONFIG_X86_64
24204 + if ((regs->ip + 9) >> 32)
24205 + break;
24206 +#endif
24207 +
24208 + err = get_user(mov, (unsigned char __user *)regs->ip);
24209 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24210 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24211 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24212 +
24213 + if (err)
24214 + break;
24215 +
24216 + if (mov == 0xB8 && jmp == 0xE9) {
24217 + regs->ax = addr1;
24218 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24219 + return 2;
24220 + }
24221 + } while (0);
24222 +
24223 + do { /* PaX: gcc trampoline emulation #1 */
24224 + unsigned char mov1, mov2;
24225 + unsigned short jmp;
24226 + unsigned int addr1, addr2;
24227 +
24228 +#ifdef CONFIG_X86_64
24229 + if ((regs->ip + 11) >> 32)
24230 + break;
24231 +#endif
24232 +
24233 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24234 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24235 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24236 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24237 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24238 +
24239 + if (err)
24240 + break;
24241 +
24242 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24243 + regs->cx = addr1;
24244 + regs->ax = addr2;
24245 + regs->ip = addr2;
24246 + return 2;
24247 + }
24248 + } while (0);
24249 +
24250 + do { /* PaX: gcc trampoline emulation #2 */
24251 + unsigned char mov, jmp;
24252 + unsigned int addr1, addr2;
24253 +
24254 +#ifdef CONFIG_X86_64
24255 + if ((regs->ip + 9) >> 32)
24256 + break;
24257 +#endif
24258 +
24259 + err = get_user(mov, (unsigned char __user *)regs->ip);
24260 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24261 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24262 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24263 +
24264 + if (err)
24265 + break;
24266 +
24267 + if (mov == 0xB9 && jmp == 0xE9) {
24268 + regs->cx = addr1;
24269 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24270 + return 2;
24271 + }
24272 + } while (0);
24273 +
24274 + return 1; /* PaX in action */
24275 +}
24276 +
24277 +#ifdef CONFIG_X86_64
24278 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24279 +{
24280 + int err;
24281 +
24282 + do { /* PaX: libffi trampoline emulation */
24283 + unsigned short mov1, mov2, jmp1;
24284 + unsigned char stcclc, jmp2;
24285 + unsigned long addr1, addr2;
24286 +
24287 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24288 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24289 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24290 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24291 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24292 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24293 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24294 +
24295 + if (err)
24296 + break;
24297 +
24298 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24299 + regs->r11 = addr1;
24300 + regs->r10 = addr2;
24301 + if (stcclc == 0xF8)
24302 + regs->flags &= ~X86_EFLAGS_CF;
24303 + else
24304 + regs->flags |= X86_EFLAGS_CF;
24305 + regs->ip = addr1;
24306 + return 2;
24307 + }
24308 + } while (0);
24309 +
24310 + do { /* PaX: gcc trampoline emulation #1 */
24311 + unsigned short mov1, mov2, jmp1;
24312 + unsigned char jmp2;
24313 + unsigned int addr1;
24314 + unsigned long addr2;
24315 +
24316 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24317 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24318 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24319 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24320 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24321 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24322 +
24323 + if (err)
24324 + break;
24325 +
24326 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24327 + regs->r11 = addr1;
24328 + regs->r10 = addr2;
24329 + regs->ip = addr1;
24330 + return 2;
24331 + }
24332 + } while (0);
24333 +
24334 + do { /* PaX: gcc trampoline emulation #2 */
24335 + unsigned short mov1, mov2, jmp1;
24336 + unsigned char jmp2;
24337 + unsigned long addr1, addr2;
24338 +
24339 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24340 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24341 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24342 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24343 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24344 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24345 +
24346 + if (err)
24347 + break;
24348 +
24349 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24350 + regs->r11 = addr1;
24351 + regs->r10 = addr2;
24352 + regs->ip = addr1;
24353 + return 2;
24354 + }
24355 + } while (0);
24356 +
24357 + return 1; /* PaX in action */
24358 +}
24359 +#endif
24360 +
24361 +/*
24362 + * PaX: decide what to do with offenders (regs->ip = fault address)
24363 + *
24364 + * returns 1 when task should be killed
24365 + * 2 when gcc trampoline was detected
24366 + */
24367 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24368 +{
24369 + if (v8086_mode(regs))
24370 + return 1;
24371 +
24372 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24373 + return 1;
24374 +
24375 +#ifdef CONFIG_X86_32
24376 + return pax_handle_fetch_fault_32(regs);
24377 +#else
24378 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24379 + return pax_handle_fetch_fault_32(regs);
24380 + else
24381 + return pax_handle_fetch_fault_64(regs);
24382 +#endif
24383 +}
24384 +#endif
24385 +
24386 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24387 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24388 +{
24389 + long i;
24390 +
24391 + printk(KERN_ERR "PAX: bytes at PC: ");
24392 + for (i = 0; i < 20; i++) {
24393 + unsigned char c;
24394 + if (get_user(c, (unsigned char __force_user *)pc+i))
24395 + printk(KERN_CONT "?? ");
24396 + else
24397 + printk(KERN_CONT "%02x ", c);
24398 + }
24399 + printk("\n");
24400 +
24401 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24402 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24403 + unsigned long c;
24404 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24405 +#ifdef CONFIG_X86_32
24406 + printk(KERN_CONT "???????? ");
24407 +#else
24408 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24409 + printk(KERN_CONT "???????? ???????? ");
24410 + else
24411 + printk(KERN_CONT "???????????????? ");
24412 +#endif
24413 + } else {
24414 +#ifdef CONFIG_X86_64
24415 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24416 + printk(KERN_CONT "%08x ", (unsigned int)c);
24417 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24418 + } else
24419 +#endif
24420 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24421 + }
24422 + }
24423 + printk("\n");
24424 +}
24425 +#endif
24426 +
24427 +/**
24428 + * probe_kernel_write(): safely attempt to write to a location
24429 + * @dst: address to write to
24430 + * @src: pointer to the data that shall be written
24431 + * @size: size of the data chunk
24432 + *
24433 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24434 + * happens, handle that and return -EFAULT.
24435 + */
24436 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24437 +{
24438 + long ret;
24439 + mm_segment_t old_fs = get_fs();
24440 +
24441 + set_fs(KERNEL_DS);
24442 + pagefault_disable();
24443 + pax_open_kernel();
24444 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24445 + pax_close_kernel();
24446 + pagefault_enable();
24447 + set_fs(old_fs);
24448 +
24449 + return ret ? -EFAULT : 0;
24450 +}
24451 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24452 index dd74e46..7d26398 100644
24453 --- a/arch/x86/mm/gup.c
24454 +++ b/arch/x86/mm/gup.c
24455 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24456 addr = start;
24457 len = (unsigned long) nr_pages << PAGE_SHIFT;
24458 end = start + len;
24459 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24460 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24461 (void __user *)start, len)))
24462 return 0;
24463
24464 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24465 index f4f29b1..5cac4fb 100644
24466 --- a/arch/x86/mm/highmem_32.c
24467 +++ b/arch/x86/mm/highmem_32.c
24468 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24469 idx = type + KM_TYPE_NR*smp_processor_id();
24470 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24471 BUG_ON(!pte_none(*(kmap_pte-idx)));
24472 +
24473 + pax_open_kernel();
24474 set_pte(kmap_pte-idx, mk_pte(page, prot));
24475 + pax_close_kernel();
24476 +
24477 arch_flush_lazy_mmu_mode();
24478
24479 return (void *)vaddr;
24480 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24481 index 8ecbb4b..a269cab 100644
24482 --- a/arch/x86/mm/hugetlbpage.c
24483 +++ b/arch/x86/mm/hugetlbpage.c
24484 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24485 struct hstate *h = hstate_file(file);
24486 struct mm_struct *mm = current->mm;
24487 struct vm_area_struct *vma;
24488 - unsigned long start_addr;
24489 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24490 +
24491 +#ifdef CONFIG_PAX_SEGMEXEC
24492 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24493 + pax_task_size = SEGMEXEC_TASK_SIZE;
24494 +#endif
24495 +
24496 + pax_task_size -= PAGE_SIZE;
24497
24498 if (len > mm->cached_hole_size) {
24499 - start_addr = mm->free_area_cache;
24500 + start_addr = mm->free_area_cache;
24501 } else {
24502 - start_addr = TASK_UNMAPPED_BASE;
24503 - mm->cached_hole_size = 0;
24504 + start_addr = mm->mmap_base;
24505 + mm->cached_hole_size = 0;
24506 }
24507
24508 full_search:
24509 @@ -280,26 +287,27 @@ full_search:
24510
24511 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24512 /* At this point: (!vma || addr < vma->vm_end). */
24513 - if (TASK_SIZE - len < addr) {
24514 + if (pax_task_size - len < addr) {
24515 /*
24516 * Start a new search - just in case we missed
24517 * some holes.
24518 */
24519 - if (start_addr != TASK_UNMAPPED_BASE) {
24520 - start_addr = TASK_UNMAPPED_BASE;
24521 + if (start_addr != mm->mmap_base) {
24522 + start_addr = mm->mmap_base;
24523 mm->cached_hole_size = 0;
24524 goto full_search;
24525 }
24526 return -ENOMEM;
24527 }
24528 - if (!vma || addr + len <= vma->vm_start) {
24529 - mm->free_area_cache = addr + len;
24530 - return addr;
24531 - }
24532 + if (check_heap_stack_gap(vma, addr, len))
24533 + break;
24534 if (addr + mm->cached_hole_size < vma->vm_start)
24535 mm->cached_hole_size = vma->vm_start - addr;
24536 addr = ALIGN(vma->vm_end, huge_page_size(h));
24537 }
24538 +
24539 + mm->free_area_cache = addr + len;
24540 + return addr;
24541 }
24542
24543 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24544 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24545 {
24546 struct hstate *h = hstate_file(file);
24547 struct mm_struct *mm = current->mm;
24548 - struct vm_area_struct *vma, *prev_vma;
24549 - unsigned long base = mm->mmap_base, addr = addr0;
24550 + struct vm_area_struct *vma;
24551 + unsigned long base = mm->mmap_base, addr;
24552 unsigned long largest_hole = mm->cached_hole_size;
24553 - int first_time = 1;
24554
24555 /* don't allow allocations above current base */
24556 if (mm->free_area_cache > base)
24557 @@ -321,14 +328,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24558 largest_hole = 0;
24559 mm->free_area_cache = base;
24560 }
24561 -try_again:
24562 +
24563 /* make sure it can fit in the remaining address space */
24564 if (mm->free_area_cache < len)
24565 goto fail;
24566
24567 /* either no address requested or can't fit in requested address hole */
24568 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24569 + addr = (mm->free_area_cache - len);
24570 do {
24571 + addr &= huge_page_mask(h);
24572 /*
24573 * Lookup failure means no vma is above this address,
24574 * i.e. return with success:
24575 @@ -341,46 +349,47 @@ try_again:
24576 * new region fits between prev_vma->vm_end and
24577 * vma->vm_start, use it:
24578 */
24579 - prev_vma = vma->vm_prev;
24580 - if (addr + len <= vma->vm_start &&
24581 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24582 + if (check_heap_stack_gap(vma, addr, len)) {
24583 /* remember the address as a hint for next time */
24584 - mm->cached_hole_size = largest_hole;
24585 - return (mm->free_area_cache = addr);
24586 - } else {
24587 - /* pull free_area_cache down to the first hole */
24588 - if (mm->free_area_cache == vma->vm_end) {
24589 - mm->free_area_cache = vma->vm_start;
24590 - mm->cached_hole_size = largest_hole;
24591 - }
24592 + mm->cached_hole_size = largest_hole;
24593 + return (mm->free_area_cache = addr);
24594 + }
24595 + /* pull free_area_cache down to the first hole */
24596 + if (mm->free_area_cache == vma->vm_end) {
24597 + mm->free_area_cache = vma->vm_start;
24598 + mm->cached_hole_size = largest_hole;
24599 }
24600
24601 /* remember the largest hole we saw so far */
24602 if (addr + largest_hole < vma->vm_start)
24603 - largest_hole = vma->vm_start - addr;
24604 + largest_hole = vma->vm_start - addr;
24605
24606 /* try just below the current vma->vm_start */
24607 - addr = (vma->vm_start - len) & huge_page_mask(h);
24608 - } while (len <= vma->vm_start);
24609 + addr = skip_heap_stack_gap(vma, len);
24610 + } while (!IS_ERR_VALUE(addr));
24611
24612 fail:
24613 /*
24614 - * if hint left us with no space for the requested
24615 - * mapping then try again:
24616 - */
24617 - if (first_time) {
24618 - mm->free_area_cache = base;
24619 - largest_hole = 0;
24620 - first_time = 0;
24621 - goto try_again;
24622 - }
24623 - /*
24624 * A failed mmap() very likely causes application failure,
24625 * so fall back to the bottom-up function here. This scenario
24626 * can happen with large stack limits and large mmap()
24627 * allocations.
24628 */
24629 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24630 +
24631 +#ifdef CONFIG_PAX_SEGMEXEC
24632 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24633 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24634 + else
24635 +#endif
24636 +
24637 + mm->mmap_base = TASK_UNMAPPED_BASE;
24638 +
24639 +#ifdef CONFIG_PAX_RANDMMAP
24640 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24641 + mm->mmap_base += mm->delta_mmap;
24642 +#endif
24643 +
24644 + mm->free_area_cache = mm->mmap_base;
24645 mm->cached_hole_size = ~0UL;
24646 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24647 len, pgoff, flags);
24648 @@ -388,6 +397,7 @@ fail:
24649 /*
24650 * Restore the topdown base:
24651 */
24652 + mm->mmap_base = base;
24653 mm->free_area_cache = base;
24654 mm->cached_hole_size = ~0UL;
24655
24656 @@ -401,10 +411,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24657 struct hstate *h = hstate_file(file);
24658 struct mm_struct *mm = current->mm;
24659 struct vm_area_struct *vma;
24660 + unsigned long pax_task_size = TASK_SIZE;
24661
24662 if (len & ~huge_page_mask(h))
24663 return -EINVAL;
24664 - if (len > TASK_SIZE)
24665 +
24666 +#ifdef CONFIG_PAX_SEGMEXEC
24667 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24668 + pax_task_size = SEGMEXEC_TASK_SIZE;
24669 +#endif
24670 +
24671 + pax_task_size -= PAGE_SIZE;
24672 +
24673 + if (len > pax_task_size)
24674 return -ENOMEM;
24675
24676 if (flags & MAP_FIXED) {
24677 @@ -416,8 +435,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24678 if (addr) {
24679 addr = ALIGN(addr, huge_page_size(h));
24680 vma = find_vma(mm, addr);
24681 - if (TASK_SIZE - len >= addr &&
24682 - (!vma || addr + len <= vma->vm_start))
24683 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24684 return addr;
24685 }
24686 if (mm->get_unmapped_area == arch_get_unmapped_area)
24687 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24688 index 6cabf65..00139c4 100644
24689 --- a/arch/x86/mm/init.c
24690 +++ b/arch/x86/mm/init.c
24691 @@ -17,6 +17,8 @@
24692 #include <asm/tlb.h>
24693 #include <asm/proto.h>
24694 #include <asm/dma.h> /* for MAX_DMA_PFN */
24695 +#include <asm/desc.h>
24696 +#include <asm/bios_ebda.h>
24697
24698 unsigned long __initdata pgt_buf_start;
24699 unsigned long __meminitdata pgt_buf_end;
24700 @@ -33,7 +35,7 @@ int direct_gbpages
24701 static void __init find_early_table_space(unsigned long end, int use_pse,
24702 int use_gbpages)
24703 {
24704 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24705 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24706 phys_addr_t base;
24707
24708 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24709 @@ -312,10 +314,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24710 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24711 * mmio resources as well as potential bios/acpi data regions.
24712 */
24713 +
24714 +#ifdef CONFIG_GRKERNSEC_KMEM
24715 +static unsigned int ebda_start __read_only;
24716 +static unsigned int ebda_end __read_only;
24717 +#endif
24718 +
24719 int devmem_is_allowed(unsigned long pagenr)
24720 {
24721 +#ifdef CONFIG_GRKERNSEC_KMEM
24722 + /* allow BDA */
24723 + if (!pagenr)
24724 + return 1;
24725 + /* allow EBDA */
24726 + if (pagenr >= ebda_start && pagenr < ebda_end)
24727 + return 1;
24728 +#else
24729 + if (!pagenr)
24730 + return 1;
24731 +#ifdef CONFIG_VM86
24732 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24733 + return 1;
24734 +#endif
24735 +#endif
24736 +
24737 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24738 + return 1;
24739 +#ifdef CONFIG_GRKERNSEC_KMEM
24740 + /* throw out everything else below 1MB */
24741 if (pagenr <= 256)
24742 - return 1;
24743 + return 0;
24744 +#endif
24745 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24746 return 0;
24747 if (!page_is_ram(pagenr))
24748 @@ -372,8 +401,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24749 #endif
24750 }
24751
24752 +#ifdef CONFIG_GRKERNSEC_KMEM
24753 +static inline void gr_init_ebda(void)
24754 +{
24755 + unsigned int ebda_addr;
24756 + unsigned int ebda_size = 0;
24757 +
24758 + ebda_addr = get_bios_ebda();
24759 + if (ebda_addr) {
24760 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24761 + ebda_size <<= 10;
24762 + }
24763 + if (ebda_addr && ebda_size) {
24764 + ebda_start = ebda_addr >> PAGE_SHIFT;
24765 + ebda_end = min(PAGE_ALIGN(ebda_addr + ebda_size), 0xa0000) >> PAGE_SHIFT;
24766 + } else {
24767 + ebda_start = 0x9f000 >> PAGE_SHIFT;
24768 + ebda_end = 0xa0000 >> PAGE_SHIFT;
24769 + }
24770 +}
24771 +#else
24772 +static inline void gr_init_ebda(void) { }
24773 +#endif
24774 +
24775 void free_initmem(void)
24776 {
24777 +#ifdef CONFIG_PAX_KERNEXEC
24778 +#ifdef CONFIG_X86_32
24779 + /* PaX: limit KERNEL_CS to actual size */
24780 + unsigned long addr, limit;
24781 + struct desc_struct d;
24782 + int cpu;
24783 +#endif
24784 +#endif
24785 +
24786 + gr_init_ebda();
24787 +
24788 +#ifdef CONFIG_PAX_KERNEXEC
24789 +#ifdef CONFIG_X86_32
24790 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24791 + limit = (limit - 1UL) >> PAGE_SHIFT;
24792 +
24793 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24794 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24795 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24796 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24797 + }
24798 +
24799 + /* PaX: make KERNEL_CS read-only */
24800 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24801 + if (!paravirt_enabled())
24802 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24803 +/*
24804 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24805 + pgd = pgd_offset_k(addr);
24806 + pud = pud_offset(pgd, addr);
24807 + pmd = pmd_offset(pud, addr);
24808 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24809 + }
24810 +*/
24811 +#ifdef CONFIG_X86_PAE
24812 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24813 +/*
24814 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24815 + pgd = pgd_offset_k(addr);
24816 + pud = pud_offset(pgd, addr);
24817 + pmd = pmd_offset(pud, addr);
24818 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24819 + }
24820 +*/
24821 +#endif
24822 +
24823 +#ifdef CONFIG_MODULES
24824 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24825 +#endif
24826 +
24827 +#else
24828 + pgd_t *pgd;
24829 + pud_t *pud;
24830 + pmd_t *pmd;
24831 + unsigned long addr, end;
24832 +
24833 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24834 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24835 + pgd = pgd_offset_k(addr);
24836 + pud = pud_offset(pgd, addr);
24837 + pmd = pmd_offset(pud, addr);
24838 + if (!pmd_present(*pmd))
24839 + continue;
24840 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24841 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24842 + else
24843 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24844 + }
24845 +
24846 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24847 + end = addr + KERNEL_IMAGE_SIZE;
24848 + for (; addr < end; addr += PMD_SIZE) {
24849 + pgd = pgd_offset_k(addr);
24850 + pud = pud_offset(pgd, addr);
24851 + pmd = pmd_offset(pud, addr);
24852 + if (!pmd_present(*pmd))
24853 + continue;
24854 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24855 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24856 + }
24857 +#endif
24858 +
24859 + flush_tlb_all();
24860 +#endif
24861 +
24862 free_init_pages("unused kernel memory",
24863 (unsigned long)(&__init_begin),
24864 (unsigned long)(&__init_end));
24865 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24866 index 8663f6c..829ae76 100644
24867 --- a/arch/x86/mm/init_32.c
24868 +++ b/arch/x86/mm/init_32.c
24869 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
24870 }
24871
24872 /*
24873 - * Creates a middle page table and puts a pointer to it in the
24874 - * given global directory entry. This only returns the gd entry
24875 - * in non-PAE compilation mode, since the middle layer is folded.
24876 - */
24877 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24878 -{
24879 - pud_t *pud;
24880 - pmd_t *pmd_table;
24881 -
24882 -#ifdef CONFIG_X86_PAE
24883 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24884 - if (after_bootmem)
24885 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24886 - else
24887 - pmd_table = (pmd_t *)alloc_low_page();
24888 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24889 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24890 - pud = pud_offset(pgd, 0);
24891 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24892 -
24893 - return pmd_table;
24894 - }
24895 -#endif
24896 - pud = pud_offset(pgd, 0);
24897 - pmd_table = pmd_offset(pud, 0);
24898 -
24899 - return pmd_table;
24900 -}
24901 -
24902 -/*
24903 * Create a page table and place a pointer to it in a middle page
24904 * directory entry:
24905 */
24906 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24907 page_table = (pte_t *)alloc_low_page();
24908
24909 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24910 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24911 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24912 +#else
24913 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24914 +#endif
24915 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24916 }
24917
24918 return pte_offset_kernel(pmd, 0);
24919 }
24920
24921 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24922 +{
24923 + pud_t *pud;
24924 + pmd_t *pmd_table;
24925 +
24926 + pud = pud_offset(pgd, 0);
24927 + pmd_table = pmd_offset(pud, 0);
24928 +
24929 + return pmd_table;
24930 +}
24931 +
24932 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24933 {
24934 int pgd_idx = pgd_index(vaddr);
24935 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24936 int pgd_idx, pmd_idx;
24937 unsigned long vaddr;
24938 pgd_t *pgd;
24939 + pud_t *pud;
24940 pmd_t *pmd;
24941 pte_t *pte = NULL;
24942
24943 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24944 pgd = pgd_base + pgd_idx;
24945
24946 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24947 - pmd = one_md_table_init(pgd);
24948 - pmd = pmd + pmd_index(vaddr);
24949 + pud = pud_offset(pgd, vaddr);
24950 + pmd = pmd_offset(pud, vaddr);
24951 +
24952 +#ifdef CONFIG_X86_PAE
24953 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24954 +#endif
24955 +
24956 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24957 pmd++, pmd_idx++) {
24958 pte = page_table_kmap_check(one_page_table_init(pmd),
24959 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24960 }
24961 }
24962
24963 -static inline int is_kernel_text(unsigned long addr)
24964 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24965 {
24966 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24967 - return 1;
24968 - return 0;
24969 + if ((start > ktla_ktva((unsigned long)_etext) ||
24970 + end <= ktla_ktva((unsigned long)_stext)) &&
24971 + (start > ktla_ktva((unsigned long)_einittext) ||
24972 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24973 +
24974 +#ifdef CONFIG_ACPI_SLEEP
24975 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24976 +#endif
24977 +
24978 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24979 + return 0;
24980 + return 1;
24981 }
24982
24983 /*
24984 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
24985 unsigned long last_map_addr = end;
24986 unsigned long start_pfn, end_pfn;
24987 pgd_t *pgd_base = swapper_pg_dir;
24988 - int pgd_idx, pmd_idx, pte_ofs;
24989 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24990 unsigned long pfn;
24991 pgd_t *pgd;
24992 + pud_t *pud;
24993 pmd_t *pmd;
24994 pte_t *pte;
24995 unsigned pages_2m, pages_4k;
24996 @@ -281,8 +282,13 @@ repeat:
24997 pfn = start_pfn;
24998 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24999 pgd = pgd_base + pgd_idx;
25000 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25001 - pmd = one_md_table_init(pgd);
25002 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25003 + pud = pud_offset(pgd, 0);
25004 + pmd = pmd_offset(pud, 0);
25005 +
25006 +#ifdef CONFIG_X86_PAE
25007 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25008 +#endif
25009
25010 if (pfn >= end_pfn)
25011 continue;
25012 @@ -294,14 +300,13 @@ repeat:
25013 #endif
25014 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25015 pmd++, pmd_idx++) {
25016 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25017 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25018
25019 /*
25020 * Map with big pages if possible, otherwise
25021 * create normal page tables:
25022 */
25023 if (use_pse) {
25024 - unsigned int addr2;
25025 pgprot_t prot = PAGE_KERNEL_LARGE;
25026 /*
25027 * first pass will use the same initial
25028 @@ -311,11 +316,7 @@ repeat:
25029 __pgprot(PTE_IDENT_ATTR |
25030 _PAGE_PSE);
25031
25032 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25033 - PAGE_OFFSET + PAGE_SIZE-1;
25034 -
25035 - if (is_kernel_text(addr) ||
25036 - is_kernel_text(addr2))
25037 + if (is_kernel_text(address, address + PMD_SIZE))
25038 prot = PAGE_KERNEL_LARGE_EXEC;
25039
25040 pages_2m++;
25041 @@ -332,7 +333,7 @@ repeat:
25042 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25043 pte += pte_ofs;
25044 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25045 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25046 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25047 pgprot_t prot = PAGE_KERNEL;
25048 /*
25049 * first pass will use the same initial
25050 @@ -340,7 +341,7 @@ repeat:
25051 */
25052 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25053
25054 - if (is_kernel_text(addr))
25055 + if (is_kernel_text(address, address + PAGE_SIZE))
25056 prot = PAGE_KERNEL_EXEC;
25057
25058 pages_4k++;
25059 @@ -466,7 +467,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25060
25061 pud = pud_offset(pgd, va);
25062 pmd = pmd_offset(pud, va);
25063 - if (!pmd_present(*pmd))
25064 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
25065 break;
25066
25067 pte = pte_offset_kernel(pmd, va);
25068 @@ -518,12 +519,10 @@ void __init early_ioremap_page_table_range_init(void)
25069
25070 static void __init pagetable_init(void)
25071 {
25072 - pgd_t *pgd_base = swapper_pg_dir;
25073 -
25074 - permanent_kmaps_init(pgd_base);
25075 + permanent_kmaps_init(swapper_pg_dir);
25076 }
25077
25078 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25079 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25080 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25081
25082 /* user-defined highmem size */
25083 @@ -735,6 +734,12 @@ void __init mem_init(void)
25084
25085 pci_iommu_alloc();
25086
25087 +#ifdef CONFIG_PAX_PER_CPU_PGD
25088 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25089 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25090 + KERNEL_PGD_PTRS);
25091 +#endif
25092 +
25093 #ifdef CONFIG_FLATMEM
25094 BUG_ON(!mem_map);
25095 #endif
25096 @@ -761,7 +766,7 @@ void __init mem_init(void)
25097 reservedpages++;
25098
25099 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25100 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25101 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25102 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25103
25104 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25105 @@ -802,10 +807,10 @@ void __init mem_init(void)
25106 ((unsigned long)&__init_end -
25107 (unsigned long)&__init_begin) >> 10,
25108
25109 - (unsigned long)&_etext, (unsigned long)&_edata,
25110 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25111 + (unsigned long)&_sdata, (unsigned long)&_edata,
25112 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25113
25114 - (unsigned long)&_text, (unsigned long)&_etext,
25115 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25116 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25117
25118 /*
25119 @@ -883,6 +888,7 @@ void set_kernel_text_rw(void)
25120 if (!kernel_set_to_readonly)
25121 return;
25122
25123 + start = ktla_ktva(start);
25124 pr_debug("Set kernel text: %lx - %lx for read write\n",
25125 start, start+size);
25126
25127 @@ -897,6 +903,7 @@ void set_kernel_text_ro(void)
25128 if (!kernel_set_to_readonly)
25129 return;
25130
25131 + start = ktla_ktva(start);
25132 pr_debug("Set kernel text: %lx - %lx for read only\n",
25133 start, start+size);
25134
25135 @@ -925,6 +932,7 @@ void mark_rodata_ro(void)
25136 unsigned long start = PFN_ALIGN(_text);
25137 unsigned long size = PFN_ALIGN(_etext) - start;
25138
25139 + start = ktla_ktva(start);
25140 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25141 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25142 size >> 10);
25143 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25144 index 436a030..4f97ffc 100644
25145 --- a/arch/x86/mm/init_64.c
25146 +++ b/arch/x86/mm/init_64.c
25147 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
25148 * around without checking the pgd every time.
25149 */
25150
25151 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25152 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25153 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25154
25155 int force_personality32;
25156 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25157
25158 for (address = start; address <= end; address += PGDIR_SIZE) {
25159 const pgd_t *pgd_ref = pgd_offset_k(address);
25160 +
25161 +#ifdef CONFIG_PAX_PER_CPU_PGD
25162 + unsigned long cpu;
25163 +#else
25164 struct page *page;
25165 +#endif
25166
25167 if (pgd_none(*pgd_ref))
25168 continue;
25169
25170 spin_lock(&pgd_lock);
25171 +
25172 +#ifdef CONFIG_PAX_PER_CPU_PGD
25173 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25174 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
25175 +#else
25176 list_for_each_entry(page, &pgd_list, lru) {
25177 pgd_t *pgd;
25178 spinlock_t *pgt_lock;
25179 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25180 /* the pgt_lock only for Xen */
25181 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25182 spin_lock(pgt_lock);
25183 +#endif
25184
25185 if (pgd_none(*pgd))
25186 set_pgd(pgd, *pgd_ref);
25187 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25188 BUG_ON(pgd_page_vaddr(*pgd)
25189 != pgd_page_vaddr(*pgd_ref));
25190
25191 +#ifndef CONFIG_PAX_PER_CPU_PGD
25192 spin_unlock(pgt_lock);
25193 +#endif
25194 +
25195 }
25196 spin_unlock(&pgd_lock);
25197 }
25198 @@ -162,7 +176,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25199 {
25200 if (pgd_none(*pgd)) {
25201 pud_t *pud = (pud_t *)spp_getpage();
25202 - pgd_populate(&init_mm, pgd, pud);
25203 + pgd_populate_kernel(&init_mm, pgd, pud);
25204 if (pud != pud_offset(pgd, 0))
25205 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25206 pud, pud_offset(pgd, 0));
25207 @@ -174,7 +188,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25208 {
25209 if (pud_none(*pud)) {
25210 pmd_t *pmd = (pmd_t *) spp_getpage();
25211 - pud_populate(&init_mm, pud, pmd);
25212 + pud_populate_kernel(&init_mm, pud, pmd);
25213 if (pmd != pmd_offset(pud, 0))
25214 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25215 pmd, pmd_offset(pud, 0));
25216 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25217 pmd = fill_pmd(pud, vaddr);
25218 pte = fill_pte(pmd, vaddr);
25219
25220 + pax_open_kernel();
25221 set_pte(pte, new_pte);
25222 + pax_close_kernel();
25223
25224 /*
25225 * It's enough to flush this one mapping.
25226 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25227 pgd = pgd_offset_k((unsigned long)__va(phys));
25228 if (pgd_none(*pgd)) {
25229 pud = (pud_t *) spp_getpage();
25230 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25231 - _PAGE_USER));
25232 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25233 }
25234 pud = pud_offset(pgd, (unsigned long)__va(phys));
25235 if (pud_none(*pud)) {
25236 pmd = (pmd_t *) spp_getpage();
25237 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25238 - _PAGE_USER));
25239 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25240 }
25241 pmd = pmd_offset(pud, phys);
25242 BUG_ON(!pmd_none(*pmd));
25243 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25244 if (pfn >= pgt_buf_top)
25245 panic("alloc_low_page: ran out of memory");
25246
25247 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25248 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25249 clear_page(adr);
25250 *phys = pfn * PAGE_SIZE;
25251 return adr;
25252 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
25253
25254 phys = __pa(virt);
25255 left = phys & (PAGE_SIZE - 1);
25256 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25257 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25258 adr = (void *)(((unsigned long)adr) | left);
25259
25260 return adr;
25261 @@ -546,7 +560,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25262 unmap_low_page(pmd);
25263
25264 spin_lock(&init_mm.page_table_lock);
25265 - pud_populate(&init_mm, pud, __va(pmd_phys));
25266 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25267 spin_unlock(&init_mm.page_table_lock);
25268 }
25269 __flush_tlb_all();
25270 @@ -592,7 +606,7 @@ kernel_physical_mapping_init(unsigned long start,
25271 unmap_low_page(pud);
25272
25273 spin_lock(&init_mm.page_table_lock);
25274 - pgd_populate(&init_mm, pgd, __va(pud_phys));
25275 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25276 spin_unlock(&init_mm.page_table_lock);
25277 pgd_changed = true;
25278 }
25279 @@ -684,6 +698,12 @@ void __init mem_init(void)
25280
25281 pci_iommu_alloc();
25282
25283 +#ifdef CONFIG_PAX_PER_CPU_PGD
25284 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25285 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25286 + KERNEL_PGD_PTRS);
25287 +#endif
25288 +
25289 /* clear_bss() already clear the empty_zero_page */
25290
25291 reservedpages = 0;
25292 @@ -844,8 +864,8 @@ int kern_addr_valid(unsigned long addr)
25293 static struct vm_area_struct gate_vma = {
25294 .vm_start = VSYSCALL_START,
25295 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25296 - .vm_page_prot = PAGE_READONLY_EXEC,
25297 - .vm_flags = VM_READ | VM_EXEC
25298 + .vm_page_prot = PAGE_READONLY,
25299 + .vm_flags = VM_READ
25300 };
25301
25302 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25303 @@ -879,7 +899,7 @@ int in_gate_area_no_mm(unsigned long addr)
25304
25305 const char *arch_vma_name(struct vm_area_struct *vma)
25306 {
25307 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25308 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25309 return "[vdso]";
25310 if (vma == &gate_vma)
25311 return "[vsyscall]";
25312 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25313 index 7b179b4..6bd1777 100644
25314 --- a/arch/x86/mm/iomap_32.c
25315 +++ b/arch/x86/mm/iomap_32.c
25316 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25317 type = kmap_atomic_idx_push();
25318 idx = type + KM_TYPE_NR * smp_processor_id();
25319 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25320 +
25321 + pax_open_kernel();
25322 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25323 + pax_close_kernel();
25324 +
25325 arch_flush_lazy_mmu_mode();
25326
25327 return (void *)vaddr;
25328 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25329 index be1ef57..55f0160 100644
25330 --- a/arch/x86/mm/ioremap.c
25331 +++ b/arch/x86/mm/ioremap.c
25332 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25333 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25334 int is_ram = page_is_ram(pfn);
25335
25336 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25337 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25338 return NULL;
25339 WARN_ON_ONCE(is_ram);
25340 }
25341 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25342
25343 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25344 if (page_is_ram(start >> PAGE_SHIFT))
25345 +#ifdef CONFIG_HIGHMEM
25346 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25347 +#endif
25348 return __va(phys);
25349
25350 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25351 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25352 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25353
25354 static __initdata int after_paging_init;
25355 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25356 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25357
25358 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25359 {
25360 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25361 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25362
25363 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25364 - memset(bm_pte, 0, sizeof(bm_pte));
25365 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25366 + pmd_populate_user(&init_mm, pmd, bm_pte);
25367
25368 /*
25369 * The boot-ioremap range spans multiple pmds, for which
25370 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25371 index d87dd6d..bf3fa66 100644
25372 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25373 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25374 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25375 * memory (e.g. tracked pages)? For now, we need this to avoid
25376 * invoking kmemcheck for PnP BIOS calls.
25377 */
25378 - if (regs->flags & X86_VM_MASK)
25379 + if (v8086_mode(regs))
25380 return false;
25381 - if (regs->cs != __KERNEL_CS)
25382 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25383 return false;
25384
25385 pte = kmemcheck_pte_lookup(address);
25386 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25387 index 845df68..1d8d29f 100644
25388 --- a/arch/x86/mm/mmap.c
25389 +++ b/arch/x86/mm/mmap.c
25390 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25391 * Leave an at least ~128 MB hole with possible stack randomization.
25392 */
25393 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25394 -#define MAX_GAP (TASK_SIZE/6*5)
25395 +#define MAX_GAP (pax_task_size/6*5)
25396
25397 static int mmap_is_legacy(void)
25398 {
25399 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25400 return rnd << PAGE_SHIFT;
25401 }
25402
25403 -static unsigned long mmap_base(void)
25404 +static unsigned long mmap_base(struct mm_struct *mm)
25405 {
25406 unsigned long gap = rlimit(RLIMIT_STACK);
25407 + unsigned long pax_task_size = TASK_SIZE;
25408 +
25409 +#ifdef CONFIG_PAX_SEGMEXEC
25410 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25411 + pax_task_size = SEGMEXEC_TASK_SIZE;
25412 +#endif
25413
25414 if (gap < MIN_GAP)
25415 gap = MIN_GAP;
25416 else if (gap > MAX_GAP)
25417 gap = MAX_GAP;
25418
25419 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25420 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25421 }
25422
25423 /*
25424 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25425 * does, but not when emulating X86_32
25426 */
25427 -static unsigned long mmap_legacy_base(void)
25428 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25429 {
25430 - if (mmap_is_ia32())
25431 + if (mmap_is_ia32()) {
25432 +
25433 +#ifdef CONFIG_PAX_SEGMEXEC
25434 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25435 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25436 + else
25437 +#endif
25438 +
25439 return TASK_UNMAPPED_BASE;
25440 - else
25441 + } else
25442 return TASK_UNMAPPED_BASE + mmap_rnd();
25443 }
25444
25445 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25446 void arch_pick_mmap_layout(struct mm_struct *mm)
25447 {
25448 if (mmap_is_legacy()) {
25449 - mm->mmap_base = mmap_legacy_base();
25450 + mm->mmap_base = mmap_legacy_base(mm);
25451 +
25452 +#ifdef CONFIG_PAX_RANDMMAP
25453 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25454 + mm->mmap_base += mm->delta_mmap;
25455 +#endif
25456 +
25457 mm->get_unmapped_area = arch_get_unmapped_area;
25458 mm->unmap_area = arch_unmap_area;
25459 } else {
25460 - mm->mmap_base = mmap_base();
25461 + mm->mmap_base = mmap_base(mm);
25462 +
25463 +#ifdef CONFIG_PAX_RANDMMAP
25464 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25465 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25466 +#endif
25467 +
25468 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25469 mm->unmap_area = arch_unmap_area_topdown;
25470 }
25471 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25472 index dc0b727..dc9d71a 100644
25473 --- a/arch/x86/mm/mmio-mod.c
25474 +++ b/arch/x86/mm/mmio-mod.c
25475 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25476 break;
25477 default:
25478 {
25479 - unsigned char *ip = (unsigned char *)instptr;
25480 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25481 my_trace->opcode = MMIO_UNKNOWN_OP;
25482 my_trace->width = 0;
25483 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25484 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25485 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25486 void __iomem *addr)
25487 {
25488 - static atomic_t next_id;
25489 + static atomic_unchecked_t next_id;
25490 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25491 /* These are page-unaligned. */
25492 struct mmiotrace_map map = {
25493 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25494 .private = trace
25495 },
25496 .phys = offset,
25497 - .id = atomic_inc_return(&next_id)
25498 + .id = atomic_inc_return_unchecked(&next_id)
25499 };
25500 map.map_id = trace->id;
25501
25502 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25503 index b008656..773eac2 100644
25504 --- a/arch/x86/mm/pageattr-test.c
25505 +++ b/arch/x86/mm/pageattr-test.c
25506 @@ -36,7 +36,7 @@ enum {
25507
25508 static int pte_testbit(pte_t pte)
25509 {
25510 - return pte_flags(pte) & _PAGE_UNUSED1;
25511 + return pte_flags(pte) & _PAGE_CPA_TEST;
25512 }
25513
25514 struct split_state {
25515 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25516 index e1ebde3..b1e1db38 100644
25517 --- a/arch/x86/mm/pageattr.c
25518 +++ b/arch/x86/mm/pageattr.c
25519 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25520 */
25521 #ifdef CONFIG_PCI_BIOS
25522 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25523 - pgprot_val(forbidden) |= _PAGE_NX;
25524 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25525 #endif
25526
25527 /*
25528 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25529 * Does not cover __inittext since that is gone later on. On
25530 * 64bit we do not enforce !NX on the low mapping
25531 */
25532 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25533 - pgprot_val(forbidden) |= _PAGE_NX;
25534 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25535 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25536
25537 +#ifdef CONFIG_DEBUG_RODATA
25538 /*
25539 * The .rodata section needs to be read-only. Using the pfn
25540 * catches all aliases.
25541 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25542 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25543 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25544 pgprot_val(forbidden) |= _PAGE_RW;
25545 +#endif
25546
25547 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25548 /*
25549 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25550 }
25551 #endif
25552
25553 +#ifdef CONFIG_PAX_KERNEXEC
25554 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25555 + pgprot_val(forbidden) |= _PAGE_RW;
25556 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25557 + }
25558 +#endif
25559 +
25560 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25561
25562 return prot;
25563 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25564 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25565 {
25566 /* change init_mm */
25567 + pax_open_kernel();
25568 set_pte_atomic(kpte, pte);
25569 +
25570 #ifdef CONFIG_X86_32
25571 if (!SHARED_KERNEL_PMD) {
25572 +
25573 +#ifdef CONFIG_PAX_PER_CPU_PGD
25574 + unsigned long cpu;
25575 +#else
25576 struct page *page;
25577 +#endif
25578
25579 +#ifdef CONFIG_PAX_PER_CPU_PGD
25580 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25581 + pgd_t *pgd = get_cpu_pgd(cpu);
25582 +#else
25583 list_for_each_entry(page, &pgd_list, lru) {
25584 - pgd_t *pgd;
25585 + pgd_t *pgd = (pgd_t *)page_address(page);
25586 +#endif
25587 +
25588 pud_t *pud;
25589 pmd_t *pmd;
25590
25591 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25592 + pgd += pgd_index(address);
25593 pud = pud_offset(pgd, address);
25594 pmd = pmd_offset(pud, address);
25595 set_pte_atomic((pte_t *)pmd, pte);
25596 }
25597 }
25598 #endif
25599 + pax_close_kernel();
25600 }
25601
25602 static int
25603 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25604 index f6ff57b..481690f 100644
25605 --- a/arch/x86/mm/pat.c
25606 +++ b/arch/x86/mm/pat.c
25607 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25608
25609 if (!entry) {
25610 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25611 - current->comm, current->pid, start, end);
25612 + current->comm, task_pid_nr(current), start, end);
25613 return -EINVAL;
25614 }
25615
25616 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25617 while (cursor < to) {
25618 if (!devmem_is_allowed(pfn)) {
25619 printk(KERN_INFO
25620 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25621 - current->comm, from, to);
25622 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25623 + current->comm, from, to, cursor);
25624 return 0;
25625 }
25626 cursor += PAGE_SIZE;
25627 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25628 printk(KERN_INFO
25629 "%s:%d ioremap_change_attr failed %s "
25630 "for %Lx-%Lx\n",
25631 - current->comm, current->pid,
25632 + current->comm, task_pid_nr(current),
25633 cattr_name(flags),
25634 base, (unsigned long long)(base + size));
25635 return -EINVAL;
25636 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25637 if (want_flags != flags) {
25638 printk(KERN_WARNING
25639 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25640 - current->comm, current->pid,
25641 + current->comm, task_pid_nr(current),
25642 cattr_name(want_flags),
25643 (unsigned long long)paddr,
25644 (unsigned long long)(paddr + size),
25645 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25646 free_memtype(paddr, paddr + size);
25647 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25648 " for %Lx-%Lx, got %s\n",
25649 - current->comm, current->pid,
25650 + current->comm, task_pid_nr(current),
25651 cattr_name(want_flags),
25652 (unsigned long long)paddr,
25653 (unsigned long long)(paddr + size),
25654 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25655 index 9f0614d..92ae64a 100644
25656 --- a/arch/x86/mm/pf_in.c
25657 +++ b/arch/x86/mm/pf_in.c
25658 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25659 int i;
25660 enum reason_type rv = OTHERS;
25661
25662 - p = (unsigned char *)ins_addr;
25663 + p = (unsigned char *)ktla_ktva(ins_addr);
25664 p += skip_prefix(p, &prf);
25665 p += get_opcode(p, &opcode);
25666
25667 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25668 struct prefix_bits prf;
25669 int i;
25670
25671 - p = (unsigned char *)ins_addr;
25672 + p = (unsigned char *)ktla_ktva(ins_addr);
25673 p += skip_prefix(p, &prf);
25674 p += get_opcode(p, &opcode);
25675
25676 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25677 struct prefix_bits prf;
25678 int i;
25679
25680 - p = (unsigned char *)ins_addr;
25681 + p = (unsigned char *)ktla_ktva(ins_addr);
25682 p += skip_prefix(p, &prf);
25683 p += get_opcode(p, &opcode);
25684
25685 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25686 struct prefix_bits prf;
25687 int i;
25688
25689 - p = (unsigned char *)ins_addr;
25690 + p = (unsigned char *)ktla_ktva(ins_addr);
25691 p += skip_prefix(p, &prf);
25692 p += get_opcode(p, &opcode);
25693 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25694 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25695 struct prefix_bits prf;
25696 int i;
25697
25698 - p = (unsigned char *)ins_addr;
25699 + p = (unsigned char *)ktla_ktva(ins_addr);
25700 p += skip_prefix(p, &prf);
25701 p += get_opcode(p, &opcode);
25702 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25703 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25704 index 8573b83..7d9628f 100644
25705 --- a/arch/x86/mm/pgtable.c
25706 +++ b/arch/x86/mm/pgtable.c
25707 @@ -84,10 +84,60 @@ static inline void pgd_list_del(pgd_t *pgd)
25708 list_del(&page->lru);
25709 }
25710
25711 -#define UNSHARED_PTRS_PER_PGD \
25712 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25713 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25714 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25715
25716 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25717 +{
25718 + while (count--)
25719 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25720 +}
25721 +#endif
25722
25723 +#ifdef CONFIG_PAX_PER_CPU_PGD
25724 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25725 +{
25726 + while (count--) {
25727 + pgd_t pgd;
25728 +
25729 +#ifdef CONFIG_X86_64
25730 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25731 +#else
25732 + pgd = *src++;
25733 +#endif
25734 +
25735 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25736 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25737 +#endif
25738 +
25739 + *dst++ = pgd;
25740 + }
25741 +
25742 +}
25743 +#endif
25744 +
25745 +#ifdef CONFIG_X86_64
25746 +#define pxd_t pud_t
25747 +#define pyd_t pgd_t
25748 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25749 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25750 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25751 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25752 +#define PYD_SIZE PGDIR_SIZE
25753 +#else
25754 +#define pxd_t pmd_t
25755 +#define pyd_t pud_t
25756 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25757 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25758 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25759 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25760 +#define PYD_SIZE PUD_SIZE
25761 +#endif
25762 +
25763 +#ifdef CONFIG_PAX_PER_CPU_PGD
25764 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25765 +static inline void pgd_dtor(pgd_t *pgd) {}
25766 +#else
25767 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25768 {
25769 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25770 @@ -128,6 +178,7 @@ static void pgd_dtor(pgd_t *pgd)
25771 pgd_list_del(pgd);
25772 spin_unlock(&pgd_lock);
25773 }
25774 +#endif
25775
25776 /*
25777 * List of all pgd's needed for non-PAE so it can invalidate entries
25778 @@ -140,7 +191,7 @@ static void pgd_dtor(pgd_t *pgd)
25779 * -- wli
25780 */
25781
25782 -#ifdef CONFIG_X86_PAE
25783 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25784 /*
25785 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25786 * updating the top-level pagetable entries to guarantee the
25787 @@ -152,7 +203,7 @@ static void pgd_dtor(pgd_t *pgd)
25788 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25789 * and initialize the kernel pmds here.
25790 */
25791 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25792 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25793
25794 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25795 {
25796 @@ -170,36 +221,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25797 */
25798 flush_tlb_mm(mm);
25799 }
25800 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25801 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25802 #else /* !CONFIG_X86_PAE */
25803
25804 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25805 -#define PREALLOCATED_PMDS 0
25806 +#define PREALLOCATED_PXDS 0
25807
25808 #endif /* CONFIG_X86_PAE */
25809
25810 -static void free_pmds(pmd_t *pmds[])
25811 +static void free_pxds(pxd_t *pxds[])
25812 {
25813 int i;
25814
25815 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25816 - if (pmds[i])
25817 - free_page((unsigned long)pmds[i]);
25818 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25819 + if (pxds[i])
25820 + free_page((unsigned long)pxds[i]);
25821 }
25822
25823 -static int preallocate_pmds(pmd_t *pmds[])
25824 +static int preallocate_pxds(pxd_t *pxds[])
25825 {
25826 int i;
25827 bool failed = false;
25828
25829 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25830 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25831 - if (pmd == NULL)
25832 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25833 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25834 + if (pxd == NULL)
25835 failed = true;
25836 - pmds[i] = pmd;
25837 + pxds[i] = pxd;
25838 }
25839
25840 if (failed) {
25841 - free_pmds(pmds);
25842 + free_pxds(pxds);
25843 return -ENOMEM;
25844 }
25845
25846 @@ -212,51 +265,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25847 * preallocate which never got a corresponding vma will need to be
25848 * freed manually.
25849 */
25850 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25851 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25852 {
25853 int i;
25854
25855 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25856 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25857 pgd_t pgd = pgdp[i];
25858
25859 if (pgd_val(pgd) != 0) {
25860 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25861 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25862
25863 - pgdp[i] = native_make_pgd(0);
25864 + set_pgd(pgdp + i, native_make_pgd(0));
25865
25866 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25867 - pmd_free(mm, pmd);
25868 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25869 + pxd_free(mm, pxd);
25870 }
25871 }
25872 }
25873
25874 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25875 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25876 {
25877 - pud_t *pud;
25878 + pyd_t *pyd;
25879 unsigned long addr;
25880 int i;
25881
25882 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25883 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25884 return;
25885
25886 - pud = pud_offset(pgd, 0);
25887 +#ifdef CONFIG_X86_64
25888 + pyd = pyd_offset(mm, 0L);
25889 +#else
25890 + pyd = pyd_offset(pgd, 0L);
25891 +#endif
25892
25893 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25894 - i++, pud++, addr += PUD_SIZE) {
25895 - pmd_t *pmd = pmds[i];
25896 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25897 + i++, pyd++, addr += PYD_SIZE) {
25898 + pxd_t *pxd = pxds[i];
25899
25900 if (i >= KERNEL_PGD_BOUNDARY)
25901 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25902 - sizeof(pmd_t) * PTRS_PER_PMD);
25903 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25904 + sizeof(pxd_t) * PTRS_PER_PMD);
25905
25906 - pud_populate(mm, pud, pmd);
25907 + pyd_populate(mm, pyd, pxd);
25908 }
25909 }
25910
25911 pgd_t *pgd_alloc(struct mm_struct *mm)
25912 {
25913 pgd_t *pgd;
25914 - pmd_t *pmds[PREALLOCATED_PMDS];
25915 + pxd_t *pxds[PREALLOCATED_PXDS];
25916
25917 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25918
25919 @@ -265,11 +322,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25920
25921 mm->pgd = pgd;
25922
25923 - if (preallocate_pmds(pmds) != 0)
25924 + if (preallocate_pxds(pxds) != 0)
25925 goto out_free_pgd;
25926
25927 if (paravirt_pgd_alloc(mm) != 0)
25928 - goto out_free_pmds;
25929 + goto out_free_pxds;
25930
25931 /*
25932 * Make sure that pre-populating the pmds is atomic with
25933 @@ -279,14 +336,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25934 spin_lock(&pgd_lock);
25935
25936 pgd_ctor(mm, pgd);
25937 - pgd_prepopulate_pmd(mm, pgd, pmds);
25938 + pgd_prepopulate_pxd(mm, pgd, pxds);
25939
25940 spin_unlock(&pgd_lock);
25941
25942 return pgd;
25943
25944 -out_free_pmds:
25945 - free_pmds(pmds);
25946 +out_free_pxds:
25947 + free_pxds(pxds);
25948 out_free_pgd:
25949 free_page((unsigned long)pgd);
25950 out:
25951 @@ -295,7 +352,7 @@ out:
25952
25953 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25954 {
25955 - pgd_mop_up_pmds(mm, pgd);
25956 + pgd_mop_up_pxds(mm, pgd);
25957 pgd_dtor(pgd);
25958 paravirt_pgd_free(mm, pgd);
25959 free_page((unsigned long)pgd);
25960 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25961 index cac7184..09a39fa 100644
25962 --- a/arch/x86/mm/pgtable_32.c
25963 +++ b/arch/x86/mm/pgtable_32.c
25964 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25965 return;
25966 }
25967 pte = pte_offset_kernel(pmd, vaddr);
25968 +
25969 + pax_open_kernel();
25970 if (pte_val(pteval))
25971 set_pte_at(&init_mm, vaddr, pte, pteval);
25972 else
25973 pte_clear(&init_mm, vaddr, pte);
25974 + pax_close_kernel();
25975
25976 /*
25977 * It's enough to flush this one mapping.
25978 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25979 index 410531d..0f16030 100644
25980 --- a/arch/x86/mm/setup_nx.c
25981 +++ b/arch/x86/mm/setup_nx.c
25982 @@ -5,8 +5,10 @@
25983 #include <asm/pgtable.h>
25984 #include <asm/proto.h>
25985
25986 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25987 static int disable_nx __cpuinitdata;
25988
25989 +#ifndef CONFIG_PAX_PAGEEXEC
25990 /*
25991 * noexec = on|off
25992 *
25993 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25994 return 0;
25995 }
25996 early_param("noexec", noexec_setup);
25997 +#endif
25998 +
25999 +#endif
26000
26001 void __cpuinit x86_configure_nx(void)
26002 {
26003 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26004 if (cpu_has_nx && !disable_nx)
26005 __supported_pte_mask |= _PAGE_NX;
26006 else
26007 +#endif
26008 __supported_pte_mask &= ~_PAGE_NX;
26009 }
26010
26011 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26012 index d6c0418..06a0ad5 100644
26013 --- a/arch/x86/mm/tlb.c
26014 +++ b/arch/x86/mm/tlb.c
26015 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
26016 BUG();
26017 cpumask_clear_cpu(cpu,
26018 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26019 +
26020 +#ifndef CONFIG_PAX_PER_CPU_PGD
26021 load_cr3(swapper_pg_dir);
26022 +#endif
26023 +
26024 }
26025 EXPORT_SYMBOL_GPL(leave_mm);
26026
26027 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
26028 index 6687022..ceabcfa 100644
26029 --- a/arch/x86/net/bpf_jit.S
26030 +++ b/arch/x86/net/bpf_jit.S
26031 @@ -9,6 +9,7 @@
26032 */
26033 #include <linux/linkage.h>
26034 #include <asm/dwarf2.h>
26035 +#include <asm/alternative-asm.h>
26036
26037 /*
26038 * Calling convention :
26039 @@ -35,6 +36,7 @@ sk_load_word:
26040 jle bpf_slow_path_word
26041 mov (SKBDATA,%rsi),%eax
26042 bswap %eax /* ntohl() */
26043 + pax_force_retaddr
26044 ret
26045
26046
26047 @@ -53,6 +55,7 @@ sk_load_half:
26048 jle bpf_slow_path_half
26049 movzwl (SKBDATA,%rsi),%eax
26050 rol $8,%ax # ntohs()
26051 + pax_force_retaddr
26052 ret
26053
26054 sk_load_byte_ind:
26055 @@ -66,6 +69,7 @@ sk_load_byte:
26056 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
26057 jle bpf_slow_path_byte
26058 movzbl (SKBDATA,%rsi),%eax
26059 + pax_force_retaddr
26060 ret
26061
26062 /**
26063 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
26064 movzbl (SKBDATA,%rsi),%ebx
26065 and $15,%bl
26066 shl $2,%bl
26067 + pax_force_retaddr
26068 ret
26069 CFI_ENDPROC
26070 ENDPROC(sk_load_byte_msh)
26071 @@ -91,6 +96,7 @@ bpf_error:
26072 xor %eax,%eax
26073 mov -8(%rbp),%rbx
26074 leaveq
26075 + pax_force_retaddr
26076 ret
26077
26078 /* rsi contains offset and can be scratched */
26079 @@ -113,6 +119,7 @@ bpf_slow_path_word:
26080 js bpf_error
26081 mov -12(%rbp),%eax
26082 bswap %eax
26083 + pax_force_retaddr
26084 ret
26085
26086 bpf_slow_path_half:
26087 @@ -121,12 +128,14 @@ bpf_slow_path_half:
26088 mov -12(%rbp),%ax
26089 rol $8,%ax
26090 movzwl %ax,%eax
26091 + pax_force_retaddr
26092 ret
26093
26094 bpf_slow_path_byte:
26095 bpf_slow_path_common(1)
26096 js bpf_error
26097 movzbl -12(%rbp),%eax
26098 + pax_force_retaddr
26099 ret
26100
26101 bpf_slow_path_byte_msh:
26102 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
26103 and $15,%al
26104 shl $2,%al
26105 xchg %eax,%ebx
26106 + pax_force_retaddr
26107 ret
26108 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
26109 index 5a5b6e4..201d42e 100644
26110 --- a/arch/x86/net/bpf_jit_comp.c
26111 +++ b/arch/x86/net/bpf_jit_comp.c
26112 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
26113 set_fs(old_fs);
26114 }
26115
26116 +struct bpf_jit_work {
26117 + struct work_struct work;
26118 + void *image;
26119 +};
26120
26121 void bpf_jit_compile(struct sk_filter *fp)
26122 {
26123 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
26124 if (addrs == NULL)
26125 return;
26126
26127 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26128 + if (!fp->work)
26129 + goto out;
26130 +
26131 /* Before first pass, make a rough estimation of addrs[]
26132 * each bpf instruction is translated to less than 64 bytes
26133 */
26134 @@ -477,7 +485,7 @@ void bpf_jit_compile(struct sk_filter *fp)
26135 common_load: seen |= SEEN_DATAREF;
26136 if ((int)K < 0) {
26137 /* Abort the JIT because __load_pointer() is needed. */
26138 - goto out;
26139 + goto error;
26140 }
26141 t_offset = func - (image + addrs[i]);
26142 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
26143 @@ -492,7 +500,7 @@ common_load: seen |= SEEN_DATAREF;
26144 case BPF_S_LDX_B_MSH:
26145 if ((int)K < 0) {
26146 /* Abort the JIT because __load_pointer() is needed. */
26147 - goto out;
26148 + goto error;
26149 }
26150 seen |= SEEN_DATAREF | SEEN_XREG;
26151 t_offset = sk_load_byte_msh - (image + addrs[i]);
26152 @@ -582,17 +590,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26153 break;
26154 default:
26155 /* hmm, too complex filter, give up with jit compiler */
26156 - goto out;
26157 + goto error;
26158 }
26159 ilen = prog - temp;
26160 if (image) {
26161 if (unlikely(proglen + ilen > oldproglen)) {
26162 pr_err("bpb_jit_compile fatal error\n");
26163 - kfree(addrs);
26164 - module_free(NULL, image);
26165 - return;
26166 + module_free_exec(NULL, image);
26167 + goto error;
26168 }
26169 + pax_open_kernel();
26170 memcpy(image + proglen, temp, ilen);
26171 + pax_close_kernel();
26172 }
26173 proglen += ilen;
26174 addrs[i] = proglen;
26175 @@ -613,11 +622,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26176 break;
26177 }
26178 if (proglen == oldproglen) {
26179 - image = module_alloc(max_t(unsigned int,
26180 - proglen,
26181 - sizeof(struct work_struct)));
26182 + image = module_alloc_exec(proglen);
26183 if (!image)
26184 - goto out;
26185 + goto error;
26186 }
26187 oldproglen = proglen;
26188 }
26189 @@ -633,7 +640,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26190 bpf_flush_icache(image, image + proglen);
26191
26192 fp->bpf_func = (void *)image;
26193 - }
26194 + } else
26195 +error:
26196 + kfree(fp->work);
26197 +
26198 out:
26199 kfree(addrs);
26200 return;
26201 @@ -641,18 +651,20 @@ out:
26202
26203 static void jit_free_defer(struct work_struct *arg)
26204 {
26205 - module_free(NULL, arg);
26206 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26207 + kfree(arg);
26208 }
26209
26210 /* run from softirq, we must use a work_struct to call
26211 - * module_free() from process context
26212 + * module_free_exec() from process context
26213 */
26214 void bpf_jit_free(struct sk_filter *fp)
26215 {
26216 if (fp->bpf_func != sk_run_filter) {
26217 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
26218 + struct work_struct *work = &fp->work->work;
26219
26220 INIT_WORK(work, jit_free_defer);
26221 + fp->work->image = fp->bpf_func;
26222 schedule_work(work);
26223 }
26224 }
26225 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26226 index bff89df..377758a 100644
26227 --- a/arch/x86/oprofile/backtrace.c
26228 +++ b/arch/x86/oprofile/backtrace.c
26229 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26230 struct stack_frame_ia32 *fp;
26231 unsigned long bytes;
26232
26233 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26234 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26235 if (bytes != sizeof(bufhead))
26236 return NULL;
26237
26238 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26239 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26240
26241 oprofile_add_trace(bufhead[0].return_address);
26242
26243 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26244 struct stack_frame bufhead[2];
26245 unsigned long bytes;
26246
26247 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26248 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26249 if (bytes != sizeof(bufhead))
26250 return NULL;
26251
26252 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26253 {
26254 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26255
26256 - if (!user_mode_vm(regs)) {
26257 + if (!user_mode(regs)) {
26258 unsigned long stack = kernel_stack_pointer(regs);
26259 if (depth)
26260 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26261 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26262 index cb29191..036766d 100644
26263 --- a/arch/x86/pci/mrst.c
26264 +++ b/arch/x86/pci/mrst.c
26265 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
26266 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
26267 pci_mmcfg_late_init();
26268 pcibios_enable_irq = mrst_pci_irq_enable;
26269 - pci_root_ops = pci_mrst_ops;
26270 + pax_open_kernel();
26271 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26272 + pax_close_kernel();
26273 /* Continue with standard init */
26274 return 1;
26275 }
26276 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26277 index da8fe05..7ee6704 100644
26278 --- a/arch/x86/pci/pcbios.c
26279 +++ b/arch/x86/pci/pcbios.c
26280 @@ -79,50 +79,93 @@ union bios32 {
26281 static struct {
26282 unsigned long address;
26283 unsigned short segment;
26284 -} bios32_indirect = { 0, __KERNEL_CS };
26285 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26286
26287 /*
26288 * Returns the entry point for the given service, NULL on error
26289 */
26290
26291 -static unsigned long bios32_service(unsigned long service)
26292 +static unsigned long __devinit bios32_service(unsigned long service)
26293 {
26294 unsigned char return_code; /* %al */
26295 unsigned long address; /* %ebx */
26296 unsigned long length; /* %ecx */
26297 unsigned long entry; /* %edx */
26298 unsigned long flags;
26299 + struct desc_struct d, *gdt;
26300
26301 local_irq_save(flags);
26302 - __asm__("lcall *(%%edi); cld"
26303 +
26304 + gdt = get_cpu_gdt_table(smp_processor_id());
26305 +
26306 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26307 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26308 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26309 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26310 +
26311 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26312 : "=a" (return_code),
26313 "=b" (address),
26314 "=c" (length),
26315 "=d" (entry)
26316 : "0" (service),
26317 "1" (0),
26318 - "D" (&bios32_indirect));
26319 + "D" (&bios32_indirect),
26320 + "r"(__PCIBIOS_DS)
26321 + : "memory");
26322 +
26323 + pax_open_kernel();
26324 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26325 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26326 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26327 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26328 + pax_close_kernel();
26329 +
26330 local_irq_restore(flags);
26331
26332 switch (return_code) {
26333 - case 0:
26334 - return address + entry;
26335 - case 0x80: /* Not present */
26336 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26337 - return 0;
26338 - default: /* Shouldn't happen */
26339 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26340 - service, return_code);
26341 + case 0: {
26342 + int cpu;
26343 + unsigned char flags;
26344 +
26345 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26346 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26347 + printk(KERN_WARNING "bios32_service: not valid\n");
26348 return 0;
26349 + }
26350 + address = address + PAGE_OFFSET;
26351 + length += 16UL; /* some BIOSs underreport this... */
26352 + flags = 4;
26353 + if (length >= 64*1024*1024) {
26354 + length >>= PAGE_SHIFT;
26355 + flags |= 8;
26356 + }
26357 +
26358 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26359 + gdt = get_cpu_gdt_table(cpu);
26360 + pack_descriptor(&d, address, length, 0x9b, flags);
26361 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26362 + pack_descriptor(&d, address, length, 0x93, flags);
26363 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26364 + }
26365 + return entry;
26366 + }
26367 + case 0x80: /* Not present */
26368 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26369 + return 0;
26370 + default: /* Shouldn't happen */
26371 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26372 + service, return_code);
26373 + return 0;
26374 }
26375 }
26376
26377 static struct {
26378 unsigned long address;
26379 unsigned short segment;
26380 -} pci_indirect = { 0, __KERNEL_CS };
26381 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26382
26383 -static int pci_bios_present;
26384 +static int pci_bios_present __read_only;
26385
26386 static int __devinit check_pcibios(void)
26387 {
26388 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26389 unsigned long flags, pcibios_entry;
26390
26391 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26392 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26393 + pci_indirect.address = pcibios_entry;
26394
26395 local_irq_save(flags);
26396 - __asm__(
26397 - "lcall *(%%edi); cld\n\t"
26398 + __asm__("movw %w6, %%ds\n\t"
26399 + "lcall *%%ss:(%%edi); cld\n\t"
26400 + "push %%ss\n\t"
26401 + "pop %%ds\n\t"
26402 "jc 1f\n\t"
26403 "xor %%ah, %%ah\n"
26404 "1:"
26405 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26406 "=b" (ebx),
26407 "=c" (ecx)
26408 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26409 - "D" (&pci_indirect)
26410 + "D" (&pci_indirect),
26411 + "r" (__PCIBIOS_DS)
26412 : "memory");
26413 local_irq_restore(flags);
26414
26415 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26416
26417 switch (len) {
26418 case 1:
26419 - __asm__("lcall *(%%esi); cld\n\t"
26420 + __asm__("movw %w6, %%ds\n\t"
26421 + "lcall *%%ss:(%%esi); cld\n\t"
26422 + "push %%ss\n\t"
26423 + "pop %%ds\n\t"
26424 "jc 1f\n\t"
26425 "xor %%ah, %%ah\n"
26426 "1:"
26427 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26428 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26429 "b" (bx),
26430 "D" ((long)reg),
26431 - "S" (&pci_indirect));
26432 + "S" (&pci_indirect),
26433 + "r" (__PCIBIOS_DS));
26434 /*
26435 * Zero-extend the result beyond 8 bits, do not trust the
26436 * BIOS having done it:
26437 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26438 *value &= 0xff;
26439 break;
26440 case 2:
26441 - __asm__("lcall *(%%esi); cld\n\t"
26442 + __asm__("movw %w6, %%ds\n\t"
26443 + "lcall *%%ss:(%%esi); cld\n\t"
26444 + "push %%ss\n\t"
26445 + "pop %%ds\n\t"
26446 "jc 1f\n\t"
26447 "xor %%ah, %%ah\n"
26448 "1:"
26449 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26450 : "1" (PCIBIOS_READ_CONFIG_WORD),
26451 "b" (bx),
26452 "D" ((long)reg),
26453 - "S" (&pci_indirect));
26454 + "S" (&pci_indirect),
26455 + "r" (__PCIBIOS_DS));
26456 /*
26457 * Zero-extend the result beyond 16 bits, do not trust the
26458 * BIOS having done it:
26459 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26460 *value &= 0xffff;
26461 break;
26462 case 4:
26463 - __asm__("lcall *(%%esi); cld\n\t"
26464 + __asm__("movw %w6, %%ds\n\t"
26465 + "lcall *%%ss:(%%esi); cld\n\t"
26466 + "push %%ss\n\t"
26467 + "pop %%ds\n\t"
26468 "jc 1f\n\t"
26469 "xor %%ah, %%ah\n"
26470 "1:"
26471 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26472 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26473 "b" (bx),
26474 "D" ((long)reg),
26475 - "S" (&pci_indirect));
26476 + "S" (&pci_indirect),
26477 + "r" (__PCIBIOS_DS));
26478 break;
26479 }
26480
26481 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26482
26483 switch (len) {
26484 case 1:
26485 - __asm__("lcall *(%%esi); cld\n\t"
26486 + __asm__("movw %w6, %%ds\n\t"
26487 + "lcall *%%ss:(%%esi); cld\n\t"
26488 + "push %%ss\n\t"
26489 + "pop %%ds\n\t"
26490 "jc 1f\n\t"
26491 "xor %%ah, %%ah\n"
26492 "1:"
26493 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26494 "c" (value),
26495 "b" (bx),
26496 "D" ((long)reg),
26497 - "S" (&pci_indirect));
26498 + "S" (&pci_indirect),
26499 + "r" (__PCIBIOS_DS));
26500 break;
26501 case 2:
26502 - __asm__("lcall *(%%esi); cld\n\t"
26503 + __asm__("movw %w6, %%ds\n\t"
26504 + "lcall *%%ss:(%%esi); cld\n\t"
26505 + "push %%ss\n\t"
26506 + "pop %%ds\n\t"
26507 "jc 1f\n\t"
26508 "xor %%ah, %%ah\n"
26509 "1:"
26510 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26511 "c" (value),
26512 "b" (bx),
26513 "D" ((long)reg),
26514 - "S" (&pci_indirect));
26515 + "S" (&pci_indirect),
26516 + "r" (__PCIBIOS_DS));
26517 break;
26518 case 4:
26519 - __asm__("lcall *(%%esi); cld\n\t"
26520 + __asm__("movw %w6, %%ds\n\t"
26521 + "lcall *%%ss:(%%esi); cld\n\t"
26522 + "push %%ss\n\t"
26523 + "pop %%ds\n\t"
26524 "jc 1f\n\t"
26525 "xor %%ah, %%ah\n"
26526 "1:"
26527 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26528 "c" (value),
26529 "b" (bx),
26530 "D" ((long)reg),
26531 - "S" (&pci_indirect));
26532 + "S" (&pci_indirect),
26533 + "r" (__PCIBIOS_DS));
26534 break;
26535 }
26536
26537 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26538
26539 DBG("PCI: Fetching IRQ routing table... ");
26540 __asm__("push %%es\n\t"
26541 + "movw %w8, %%ds\n\t"
26542 "push %%ds\n\t"
26543 "pop %%es\n\t"
26544 - "lcall *(%%esi); cld\n\t"
26545 + "lcall *%%ss:(%%esi); cld\n\t"
26546 "pop %%es\n\t"
26547 + "push %%ss\n\t"
26548 + "pop %%ds\n"
26549 "jc 1f\n\t"
26550 "xor %%ah, %%ah\n"
26551 "1:"
26552 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26553 "1" (0),
26554 "D" ((long) &opt),
26555 "S" (&pci_indirect),
26556 - "m" (opt)
26557 + "m" (opt),
26558 + "r" (__PCIBIOS_DS)
26559 : "memory");
26560 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26561 if (ret & 0xff00)
26562 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26563 {
26564 int ret;
26565
26566 - __asm__("lcall *(%%esi); cld\n\t"
26567 + __asm__("movw %w5, %%ds\n\t"
26568 + "lcall *%%ss:(%%esi); cld\n\t"
26569 + "push %%ss\n\t"
26570 + "pop %%ds\n"
26571 "jc 1f\n\t"
26572 "xor %%ah, %%ah\n"
26573 "1:"
26574 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26575 : "0" (PCIBIOS_SET_PCI_HW_INT),
26576 "b" ((dev->bus->number << 8) | dev->devfn),
26577 "c" ((irq << 8) | (pin + 10)),
26578 - "S" (&pci_indirect));
26579 + "S" (&pci_indirect),
26580 + "r" (__PCIBIOS_DS));
26581 return !(ret & 0xff00);
26582 }
26583 EXPORT_SYMBOL(pcibios_set_irq_routing);
26584 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26585 index 40e4469..1ab536e 100644
26586 --- a/arch/x86/platform/efi/efi_32.c
26587 +++ b/arch/x86/platform/efi/efi_32.c
26588 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26589 {
26590 struct desc_ptr gdt_descr;
26591
26592 +#ifdef CONFIG_PAX_KERNEXEC
26593 + struct desc_struct d;
26594 +#endif
26595 +
26596 local_irq_save(efi_rt_eflags);
26597
26598 load_cr3(initial_page_table);
26599 __flush_tlb_all();
26600
26601 +#ifdef CONFIG_PAX_KERNEXEC
26602 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26603 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26604 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26605 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26606 +#endif
26607 +
26608 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26609 gdt_descr.size = GDT_SIZE - 1;
26610 load_gdt(&gdt_descr);
26611 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26612 {
26613 struct desc_ptr gdt_descr;
26614
26615 +#ifdef CONFIG_PAX_KERNEXEC
26616 + struct desc_struct d;
26617 +
26618 + memset(&d, 0, sizeof d);
26619 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26620 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26621 +#endif
26622 +
26623 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26624 gdt_descr.size = GDT_SIZE - 1;
26625 load_gdt(&gdt_descr);
26626 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26627 index fbe66e6..c5c0dd2 100644
26628 --- a/arch/x86/platform/efi/efi_stub_32.S
26629 +++ b/arch/x86/platform/efi/efi_stub_32.S
26630 @@ -6,7 +6,9 @@
26631 */
26632
26633 #include <linux/linkage.h>
26634 +#include <linux/init.h>
26635 #include <asm/page_types.h>
26636 +#include <asm/segment.h>
26637
26638 /*
26639 * efi_call_phys(void *, ...) is a function with variable parameters.
26640 @@ -20,7 +22,7 @@
26641 * service functions will comply with gcc calling convention, too.
26642 */
26643
26644 -.text
26645 +__INIT
26646 ENTRY(efi_call_phys)
26647 /*
26648 * 0. The function can only be called in Linux kernel. So CS has been
26649 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26650 * The mapping of lower virtual memory has been created in prelog and
26651 * epilog.
26652 */
26653 - movl $1f, %edx
26654 - subl $__PAGE_OFFSET, %edx
26655 - jmp *%edx
26656 + movl $(__KERNEXEC_EFI_DS), %edx
26657 + mov %edx, %ds
26658 + mov %edx, %es
26659 + mov %edx, %ss
26660 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26661 1:
26662
26663 /*
26664 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26665 * parameter 2, ..., param n. To make things easy, we save the return
26666 * address of efi_call_phys in a global variable.
26667 */
26668 - popl %edx
26669 - movl %edx, saved_return_addr
26670 - /* get the function pointer into ECX*/
26671 - popl %ecx
26672 - movl %ecx, efi_rt_function_ptr
26673 - movl $2f, %edx
26674 - subl $__PAGE_OFFSET, %edx
26675 - pushl %edx
26676 + popl (saved_return_addr)
26677 + popl (efi_rt_function_ptr)
26678
26679 /*
26680 * 3. Clear PG bit in %CR0.
26681 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26682 /*
26683 * 5. Call the physical function.
26684 */
26685 - jmp *%ecx
26686 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26687
26688 -2:
26689 /*
26690 * 6. After EFI runtime service returns, control will return to
26691 * following instruction. We'd better readjust stack pointer first.
26692 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26693 movl %cr0, %edx
26694 orl $0x80000000, %edx
26695 movl %edx, %cr0
26696 - jmp 1f
26697 -1:
26698 +
26699 /*
26700 * 8. Now restore the virtual mode from flat mode by
26701 * adding EIP with PAGE_OFFSET.
26702 */
26703 - movl $1f, %edx
26704 - jmp *%edx
26705 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26706 1:
26707 + movl $(__KERNEL_DS), %edx
26708 + mov %edx, %ds
26709 + mov %edx, %es
26710 + mov %edx, %ss
26711
26712 /*
26713 * 9. Balance the stack. And because EAX contain the return value,
26714 * we'd better not clobber it.
26715 */
26716 - leal efi_rt_function_ptr, %edx
26717 - movl (%edx), %ecx
26718 - pushl %ecx
26719 + pushl (efi_rt_function_ptr)
26720
26721 /*
26722 - * 10. Push the saved return address onto the stack and return.
26723 + * 10. Return to the saved return address.
26724 */
26725 - leal saved_return_addr, %edx
26726 - movl (%edx), %ecx
26727 - pushl %ecx
26728 - ret
26729 + jmpl *(saved_return_addr)
26730 ENDPROC(efi_call_phys)
26731 .previous
26732
26733 -.data
26734 +__INITDATA
26735 saved_return_addr:
26736 .long 0
26737 efi_rt_function_ptr:
26738 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26739 index 4c07cca..2c8427d 100644
26740 --- a/arch/x86/platform/efi/efi_stub_64.S
26741 +++ b/arch/x86/platform/efi/efi_stub_64.S
26742 @@ -7,6 +7,7 @@
26743 */
26744
26745 #include <linux/linkage.h>
26746 +#include <asm/alternative-asm.h>
26747
26748 #define SAVE_XMM \
26749 mov %rsp, %rax; \
26750 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26751 call *%rdi
26752 addq $32, %rsp
26753 RESTORE_XMM
26754 + pax_force_retaddr 0, 1
26755 ret
26756 ENDPROC(efi_call0)
26757
26758 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26759 call *%rdi
26760 addq $32, %rsp
26761 RESTORE_XMM
26762 + pax_force_retaddr 0, 1
26763 ret
26764 ENDPROC(efi_call1)
26765
26766 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26767 call *%rdi
26768 addq $32, %rsp
26769 RESTORE_XMM
26770 + pax_force_retaddr 0, 1
26771 ret
26772 ENDPROC(efi_call2)
26773
26774 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26775 call *%rdi
26776 addq $32, %rsp
26777 RESTORE_XMM
26778 + pax_force_retaddr 0, 1
26779 ret
26780 ENDPROC(efi_call3)
26781
26782 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26783 call *%rdi
26784 addq $32, %rsp
26785 RESTORE_XMM
26786 + pax_force_retaddr 0, 1
26787 ret
26788 ENDPROC(efi_call4)
26789
26790 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26791 call *%rdi
26792 addq $48, %rsp
26793 RESTORE_XMM
26794 + pax_force_retaddr 0, 1
26795 ret
26796 ENDPROC(efi_call5)
26797
26798 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26799 call *%rdi
26800 addq $48, %rsp
26801 RESTORE_XMM
26802 + pax_force_retaddr 0, 1
26803 ret
26804 ENDPROC(efi_call6)
26805 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26806 index 475e2cd..1b8e708 100644
26807 --- a/arch/x86/platform/mrst/mrst.c
26808 +++ b/arch/x86/platform/mrst/mrst.c
26809 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26810 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26811 int sfi_mrtc_num;
26812
26813 -static void mrst_power_off(void)
26814 +static __noreturn void mrst_power_off(void)
26815 {
26816 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
26817 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
26818 + BUG();
26819 }
26820
26821 -static void mrst_reboot(void)
26822 +static __noreturn void mrst_reboot(void)
26823 {
26824 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
26825 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
26826 else
26827 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26828 + BUG();
26829 }
26830
26831 /* parse all the mtimer info to a static mtimer array */
26832 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26833 index f10c0af..3ec1f95 100644
26834 --- a/arch/x86/power/cpu.c
26835 +++ b/arch/x86/power/cpu.c
26836 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
26837 static void fix_processor_context(void)
26838 {
26839 int cpu = smp_processor_id();
26840 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26841 + struct tss_struct *t = init_tss + cpu;
26842
26843 set_tss_desc(cpu, t); /*
26844 * This just modifies memory; should not be
26845 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
26846 */
26847
26848 #ifdef CONFIG_X86_64
26849 + pax_open_kernel();
26850 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26851 + pax_close_kernel();
26852
26853 syscall_init(); /* This sets MSR_*STAR and related */
26854 #endif
26855 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26856 index 5d17950..2253fc9 100644
26857 --- a/arch/x86/vdso/Makefile
26858 +++ b/arch/x86/vdso/Makefile
26859 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
26860 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26861 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26862
26863 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26864 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26865 GCOV_PROFILE := n
26866
26867 #
26868 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26869 index 468d591..8e80a0a 100644
26870 --- a/arch/x86/vdso/vdso32-setup.c
26871 +++ b/arch/x86/vdso/vdso32-setup.c
26872 @@ -25,6 +25,7 @@
26873 #include <asm/tlbflush.h>
26874 #include <asm/vdso.h>
26875 #include <asm/proto.h>
26876 +#include <asm/mman.h>
26877
26878 enum {
26879 VDSO_DISABLED = 0,
26880 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26881 void enable_sep_cpu(void)
26882 {
26883 int cpu = get_cpu();
26884 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26885 + struct tss_struct *tss = init_tss + cpu;
26886
26887 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26888 put_cpu();
26889 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26890 gate_vma.vm_start = FIXADDR_USER_START;
26891 gate_vma.vm_end = FIXADDR_USER_END;
26892 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26893 - gate_vma.vm_page_prot = __P101;
26894 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26895 /*
26896 * Make sure the vDSO gets into every core dump.
26897 * Dumping its contents makes post-mortem fully interpretable later
26898 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26899 if (compat)
26900 addr = VDSO_HIGH_BASE;
26901 else {
26902 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26903 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26904 if (IS_ERR_VALUE(addr)) {
26905 ret = addr;
26906 goto up_fail;
26907 }
26908 }
26909
26910 - current->mm->context.vdso = (void *)addr;
26911 + current->mm->context.vdso = addr;
26912
26913 if (compat_uses_vma || !compat) {
26914 /*
26915 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26916 }
26917
26918 current_thread_info()->sysenter_return =
26919 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26920 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26921
26922 up_fail:
26923 if (ret)
26924 - current->mm->context.vdso = NULL;
26925 + current->mm->context.vdso = 0;
26926
26927 up_write(&mm->mmap_sem);
26928
26929 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
26930
26931 const char *arch_vma_name(struct vm_area_struct *vma)
26932 {
26933 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26934 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26935 return "[vdso]";
26936 +
26937 +#ifdef CONFIG_PAX_SEGMEXEC
26938 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26939 + return "[vdso]";
26940 +#endif
26941 +
26942 return NULL;
26943 }
26944
26945 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26946 * Check to see if the corresponding task was created in compat vdso
26947 * mode.
26948 */
26949 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26950 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26951 return &gate_vma;
26952 return NULL;
26953 }
26954 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26955 index 153407c..611cba9 100644
26956 --- a/arch/x86/vdso/vma.c
26957 +++ b/arch/x86/vdso/vma.c
26958 @@ -16,8 +16,6 @@
26959 #include <asm/vdso.h>
26960 #include <asm/page.h>
26961
26962 -unsigned int __read_mostly vdso_enabled = 1;
26963 -
26964 extern char vdso_start[], vdso_end[];
26965 extern unsigned short vdso_sync_cpuid;
26966
26967 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26968 * unaligned here as a result of stack start randomization.
26969 */
26970 addr = PAGE_ALIGN(addr);
26971 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26972
26973 return addr;
26974 }
26975 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26976 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26977 {
26978 struct mm_struct *mm = current->mm;
26979 - unsigned long addr;
26980 + unsigned long addr = 0;
26981 int ret;
26982
26983 - if (!vdso_enabled)
26984 - return 0;
26985 -
26986 down_write(&mm->mmap_sem);
26987 +
26988 +#ifdef CONFIG_PAX_RANDMMAP
26989 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26990 +#endif
26991 +
26992 addr = vdso_addr(mm->start_stack, vdso_size);
26993 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26994 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26995 if (IS_ERR_VALUE(addr)) {
26996 ret = addr;
26997 goto up_fail;
26998 }
26999
27000 - current->mm->context.vdso = (void *)addr;
27001 + mm->context.vdso = addr;
27002
27003 ret = install_special_mapping(mm, addr, vdso_size,
27004 VM_READ|VM_EXEC|
27005 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
27006 VM_ALWAYSDUMP,
27007 vdso_pages);
27008 - if (ret) {
27009 - current->mm->context.vdso = NULL;
27010 - goto up_fail;
27011 - }
27012 +
27013 + if (ret)
27014 + mm->context.vdso = 0;
27015
27016 up_fail:
27017 up_write(&mm->mmap_sem);
27018 return ret;
27019 }
27020 -
27021 -static __init int vdso_setup(char *s)
27022 -{
27023 - vdso_enabled = simple_strtoul(s, NULL, 0);
27024 - return 0;
27025 -}
27026 -__setup("vdso=", vdso_setup);
27027 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27028 index 4e517d4..68a48f5 100644
27029 --- a/arch/x86/xen/enlighten.c
27030 +++ b/arch/x86/xen/enlighten.c
27031 @@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27032
27033 struct shared_info xen_dummy_shared_info;
27034
27035 -void *xen_initial_gdt;
27036 -
27037 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27038 __read_mostly int xen_have_vector_callback;
27039 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27040 @@ -1030,30 +1028,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27041 #endif
27042 };
27043
27044 -static void xen_reboot(int reason)
27045 +static __noreturn void xen_reboot(int reason)
27046 {
27047 struct sched_shutdown r = { .reason = reason };
27048
27049 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27050 - BUG();
27051 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27052 + BUG();
27053 }
27054
27055 -static void xen_restart(char *msg)
27056 +static __noreturn void xen_restart(char *msg)
27057 {
27058 xen_reboot(SHUTDOWN_reboot);
27059 }
27060
27061 -static void xen_emergency_restart(void)
27062 +static __noreturn void xen_emergency_restart(void)
27063 {
27064 xen_reboot(SHUTDOWN_reboot);
27065 }
27066
27067 -static void xen_machine_halt(void)
27068 +static __noreturn void xen_machine_halt(void)
27069 {
27070 xen_reboot(SHUTDOWN_poweroff);
27071 }
27072
27073 -static void xen_machine_power_off(void)
27074 +static __noreturn void xen_machine_power_off(void)
27075 {
27076 if (pm_power_off)
27077 pm_power_off();
27078 @@ -1156,7 +1154,17 @@ asmlinkage void __init xen_start_kernel(void)
27079 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27080
27081 /* Work out if we support NX */
27082 - x86_configure_nx();
27083 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27084 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27085 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27086 + unsigned l, h;
27087 +
27088 + __supported_pte_mask |= _PAGE_NX;
27089 + rdmsr(MSR_EFER, l, h);
27090 + l |= EFER_NX;
27091 + wrmsr(MSR_EFER, l, h);
27092 + }
27093 +#endif
27094
27095 xen_setup_features();
27096
27097 @@ -1187,13 +1195,6 @@ asmlinkage void __init xen_start_kernel(void)
27098
27099 machine_ops = xen_machine_ops;
27100
27101 - /*
27102 - * The only reliable way to retain the initial address of the
27103 - * percpu gdt_page is to remember it here, so we can go and
27104 - * mark it RW later, when the initial percpu area is freed.
27105 - */
27106 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27107 -
27108 xen_smp_init();
27109
27110 #ifdef CONFIG_ACPI_NUMA
27111 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27112 index dc19347..1b07a2c 100644
27113 --- a/arch/x86/xen/mmu.c
27114 +++ b/arch/x86/xen/mmu.c
27115 @@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27116 convert_pfn_mfn(init_level4_pgt);
27117 convert_pfn_mfn(level3_ident_pgt);
27118 convert_pfn_mfn(level3_kernel_pgt);
27119 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27120 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27121 + convert_pfn_mfn(level3_vmemmap_pgt);
27122
27123 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27124 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27125 @@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27126 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27127 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27128 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27129 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27130 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27131 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27132 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27133 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27134 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27135 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27136
27137 @@ -1963,6 +1970,7 @@ static void __init xen_post_allocator_init(void)
27138 pv_mmu_ops.set_pud = xen_set_pud;
27139 #if PAGETABLE_LEVELS == 4
27140 pv_mmu_ops.set_pgd = xen_set_pgd;
27141 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27142 #endif
27143
27144 /* This will work as long as patching hasn't happened yet
27145 @@ -2044,6 +2052,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27146 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27147 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27148 .set_pgd = xen_set_pgd_hyper,
27149 + .set_pgd_batched = xen_set_pgd_hyper,
27150
27151 .alloc_pud = xen_alloc_pmd_init,
27152 .release_pud = xen_release_pmd_init,
27153 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27154 index f2ce60a..14e08dc 100644
27155 --- a/arch/x86/xen/smp.c
27156 +++ b/arch/x86/xen/smp.c
27157 @@ -209,11 +209,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27158 {
27159 BUG_ON(smp_processor_id() != 0);
27160 native_smp_prepare_boot_cpu();
27161 -
27162 - /* We've switched to the "real" per-cpu gdt, so make sure the
27163 - old memory can be recycled */
27164 - make_lowmem_page_readwrite(xen_initial_gdt);
27165 -
27166 xen_filter_cpu_maps();
27167 xen_setup_vcpu_info_placement();
27168 }
27169 @@ -290,12 +285,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27170 gdt = get_cpu_gdt_table(cpu);
27171
27172 ctxt->flags = VGCF_IN_KERNEL;
27173 - ctxt->user_regs.ds = __USER_DS;
27174 - ctxt->user_regs.es = __USER_DS;
27175 + ctxt->user_regs.ds = __KERNEL_DS;
27176 + ctxt->user_regs.es = __KERNEL_DS;
27177 ctxt->user_regs.ss = __KERNEL_DS;
27178 #ifdef CONFIG_X86_32
27179 ctxt->user_regs.fs = __KERNEL_PERCPU;
27180 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27181 + savesegment(gs, ctxt->user_regs.gs);
27182 #else
27183 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27184 #endif
27185 @@ -346,13 +341,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27186 int rc;
27187
27188 per_cpu(current_task, cpu) = idle;
27189 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27190 #ifdef CONFIG_X86_32
27191 irq_ctx_init(cpu);
27192 #else
27193 clear_tsk_thread_flag(idle, TIF_FORK);
27194 - per_cpu(kernel_stack, cpu) =
27195 - (unsigned long)task_stack_page(idle) -
27196 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27197 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27198 #endif
27199 xen_setup_runstate_info(cpu);
27200 xen_setup_timer(cpu);
27201 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27202 index b040b0e..8cc4fe0 100644
27203 --- a/arch/x86/xen/xen-asm_32.S
27204 +++ b/arch/x86/xen/xen-asm_32.S
27205 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27206 ESP_OFFSET=4 # bytes pushed onto stack
27207
27208 /*
27209 - * Store vcpu_info pointer for easy access. Do it this way to
27210 - * avoid having to reload %fs
27211 + * Store vcpu_info pointer for easy access.
27212 */
27213 #ifdef CONFIG_SMP
27214 - GET_THREAD_INFO(%eax)
27215 - movl TI_cpu(%eax), %eax
27216 - movl __per_cpu_offset(,%eax,4), %eax
27217 - mov xen_vcpu(%eax), %eax
27218 + push %fs
27219 + mov $(__KERNEL_PERCPU), %eax
27220 + mov %eax, %fs
27221 + mov PER_CPU_VAR(xen_vcpu), %eax
27222 + pop %fs
27223 #else
27224 movl xen_vcpu, %eax
27225 #endif
27226 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27227 index aaa7291..3f77960 100644
27228 --- a/arch/x86/xen/xen-head.S
27229 +++ b/arch/x86/xen/xen-head.S
27230 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27231 #ifdef CONFIG_X86_32
27232 mov %esi,xen_start_info
27233 mov $init_thread_union+THREAD_SIZE,%esp
27234 +#ifdef CONFIG_SMP
27235 + movl $cpu_gdt_table,%edi
27236 + movl $__per_cpu_load,%eax
27237 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27238 + rorl $16,%eax
27239 + movb %al,__KERNEL_PERCPU + 4(%edi)
27240 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27241 + movl $__per_cpu_end - 1,%eax
27242 + subl $__per_cpu_start,%eax
27243 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27244 +#endif
27245 #else
27246 mov %rsi,xen_start_info
27247 mov $init_thread_union+THREAD_SIZE,%rsp
27248 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27249 index b095739..8c17bcd 100644
27250 --- a/arch/x86/xen/xen-ops.h
27251 +++ b/arch/x86/xen/xen-ops.h
27252 @@ -10,8 +10,6 @@
27253 extern const char xen_hypervisor_callback[];
27254 extern const char xen_failsafe_callback[];
27255
27256 -extern void *xen_initial_gdt;
27257 -
27258 struct trap_info;
27259 void xen_copy_trap_info(struct trap_info *traps);
27260
27261 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27262 index 525bd3d..ef888b1 100644
27263 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27264 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27265 @@ -119,9 +119,9 @@
27266 ----------------------------------------------------------------------*/
27267
27268 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27269 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27270 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27271 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27272 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27273
27274 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27275 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27276 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27277 index 2f33760..835e50a 100644
27278 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27279 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27280 @@ -11,6 +11,7 @@
27281 #ifndef _XTENSA_CORE_H
27282 #define _XTENSA_CORE_H
27283
27284 +#include <linux/const.h>
27285
27286 /****************************************************************************
27287 Parameters Useful for Any Code, USER or PRIVILEGED
27288 @@ -112,9 +113,9 @@
27289 ----------------------------------------------------------------------*/
27290
27291 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27292 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27293 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27294 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27295 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27296
27297 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27298 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27299 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27300 index af00795..2bb8105 100644
27301 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27302 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27303 @@ -11,6 +11,7 @@
27304 #ifndef _XTENSA_CORE_CONFIGURATION_H
27305 #define _XTENSA_CORE_CONFIGURATION_H
27306
27307 +#include <linux/const.h>
27308
27309 /****************************************************************************
27310 Parameters Useful for Any Code, USER or PRIVILEGED
27311 @@ -118,9 +119,9 @@
27312 ----------------------------------------------------------------------*/
27313
27314 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27315 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27316 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27317 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27318 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27319
27320 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27321 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27322 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27323 index 58916af..9cb880b 100644
27324 --- a/block/blk-iopoll.c
27325 +++ b/block/blk-iopoll.c
27326 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27327 }
27328 EXPORT_SYMBOL(blk_iopoll_complete);
27329
27330 -static void blk_iopoll_softirq(struct softirq_action *h)
27331 +static void blk_iopoll_softirq(void)
27332 {
27333 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27334 int rearm = 0, budget = blk_iopoll_budget;
27335 diff --git a/block/blk-map.c b/block/blk-map.c
27336 index 623e1cd..ca1e109 100644
27337 --- a/block/blk-map.c
27338 +++ b/block/blk-map.c
27339 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27340 if (!len || !kbuf)
27341 return -EINVAL;
27342
27343 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27344 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27345 if (do_copy)
27346 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27347 else
27348 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27349 index 1366a89..e17f54b 100644
27350 --- a/block/blk-softirq.c
27351 +++ b/block/blk-softirq.c
27352 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27353 * Softirq action handler - move entries to local list and loop over them
27354 * while passing them to the queue registered handler.
27355 */
27356 -static void blk_done_softirq(struct softirq_action *h)
27357 +static void blk_done_softirq(void)
27358 {
27359 struct list_head *cpu_list, local_list;
27360
27361 diff --git a/block/bsg.c b/block/bsg.c
27362 index ff64ae3..593560c 100644
27363 --- a/block/bsg.c
27364 +++ b/block/bsg.c
27365 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27366 struct sg_io_v4 *hdr, struct bsg_device *bd,
27367 fmode_t has_write_perm)
27368 {
27369 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27370 + unsigned char *cmdptr;
27371 +
27372 if (hdr->request_len > BLK_MAX_CDB) {
27373 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27374 if (!rq->cmd)
27375 return -ENOMEM;
27376 - }
27377 + cmdptr = rq->cmd;
27378 + } else
27379 + cmdptr = tmpcmd;
27380
27381 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27382 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27383 hdr->request_len))
27384 return -EFAULT;
27385
27386 + if (cmdptr != rq->cmd)
27387 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27388 +
27389 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27390 if (blk_verify_command(rq->cmd, has_write_perm))
27391 return -EPERM;
27392 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27393 index 7c668c8..db3521c 100644
27394 --- a/block/compat_ioctl.c
27395 +++ b/block/compat_ioctl.c
27396 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27397 err |= __get_user(f->spec1, &uf->spec1);
27398 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27399 err |= __get_user(name, &uf->name);
27400 - f->name = compat_ptr(name);
27401 + f->name = (void __force_kernel *)compat_ptr(name);
27402 if (err) {
27403 err = -EFAULT;
27404 goto out;
27405 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27406 index 6296b40..417c00f 100644
27407 --- a/block/partitions/efi.c
27408 +++ b/block/partitions/efi.c
27409 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27410 if (!gpt)
27411 return NULL;
27412
27413 + if (!le32_to_cpu(gpt->num_partition_entries))
27414 + return NULL;
27415 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27416 + if (!pte)
27417 + return NULL;
27418 +
27419 count = le32_to_cpu(gpt->num_partition_entries) *
27420 le32_to_cpu(gpt->sizeof_partition_entry);
27421 - if (!count)
27422 - return NULL;
27423 - pte = kzalloc(count, GFP_KERNEL);
27424 - if (!pte)
27425 - return NULL;
27426 -
27427 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27428 (u8 *) pte,
27429 count) < count) {
27430 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27431 index 260fa80..e8f3caf 100644
27432 --- a/block/scsi_ioctl.c
27433 +++ b/block/scsi_ioctl.c
27434 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27435 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27436 struct sg_io_hdr *hdr, fmode_t mode)
27437 {
27438 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27439 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27440 + unsigned char *cmdptr;
27441 +
27442 + if (rq->cmd != rq->__cmd)
27443 + cmdptr = rq->cmd;
27444 + else
27445 + cmdptr = tmpcmd;
27446 +
27447 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27448 return -EFAULT;
27449 +
27450 + if (cmdptr != rq->cmd)
27451 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27452 +
27453 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27454 return -EPERM;
27455
27456 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27457 int err;
27458 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27459 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27460 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27461 + unsigned char *cmdptr;
27462
27463 if (!sic)
27464 return -EINVAL;
27465 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27466 */
27467 err = -EFAULT;
27468 rq->cmd_len = cmdlen;
27469 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27470 +
27471 + if (rq->cmd != rq->__cmd)
27472 + cmdptr = rq->cmd;
27473 + else
27474 + cmdptr = tmpcmd;
27475 +
27476 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27477 goto error;
27478
27479 + if (rq->cmd != cmdptr)
27480 + memcpy(rq->cmd, cmdptr, cmdlen);
27481 +
27482 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27483 goto error;
27484
27485 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27486 index 671d4d6..5f24030 100644
27487 --- a/crypto/cryptd.c
27488 +++ b/crypto/cryptd.c
27489 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27490
27491 struct cryptd_blkcipher_request_ctx {
27492 crypto_completion_t complete;
27493 -};
27494 +} __no_const;
27495
27496 struct cryptd_hash_ctx {
27497 struct crypto_shash *child;
27498 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27499
27500 struct cryptd_aead_request_ctx {
27501 crypto_completion_t complete;
27502 -};
27503 +} __no_const;
27504
27505 static void cryptd_queue_worker(struct work_struct *work);
27506
27507 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27508 index 5d41894..22021e4 100644
27509 --- a/drivers/acpi/apei/cper.c
27510 +++ b/drivers/acpi/apei/cper.c
27511 @@ -38,12 +38,12 @@
27512 */
27513 u64 cper_next_record_id(void)
27514 {
27515 - static atomic64_t seq;
27516 + static atomic64_unchecked_t seq;
27517
27518 - if (!atomic64_read(&seq))
27519 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27520 + if (!atomic64_read_unchecked(&seq))
27521 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27522
27523 - return atomic64_inc_return(&seq);
27524 + return atomic64_inc_return_unchecked(&seq);
27525 }
27526 EXPORT_SYMBOL_GPL(cper_next_record_id);
27527
27528 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27529 index b258cab..3fb7da7 100644
27530 --- a/drivers/acpi/ec_sys.c
27531 +++ b/drivers/acpi/ec_sys.c
27532 @@ -12,6 +12,7 @@
27533 #include <linux/acpi.h>
27534 #include <linux/debugfs.h>
27535 #include <linux/module.h>
27536 +#include <linux/uaccess.h>
27537 #include "internal.h"
27538
27539 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27540 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27541 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27542 */
27543 unsigned int size = EC_SPACE_SIZE;
27544 - u8 *data = (u8 *) buf;
27545 + u8 data;
27546 loff_t init_off = *off;
27547 int err = 0;
27548
27549 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27550 size = count;
27551
27552 while (size) {
27553 - err = ec_read(*off, &data[*off - init_off]);
27554 + err = ec_read(*off, &data);
27555 if (err)
27556 return err;
27557 + if (put_user(data, &buf[*off - init_off]))
27558 + return -EFAULT;
27559 *off += 1;
27560 size--;
27561 }
27562 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27563
27564 unsigned int size = count;
27565 loff_t init_off = *off;
27566 - u8 *data = (u8 *) buf;
27567 int err = 0;
27568
27569 if (*off >= EC_SPACE_SIZE)
27570 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27571 }
27572
27573 while (size) {
27574 - u8 byte_write = data[*off - init_off];
27575 + u8 byte_write;
27576 + if (get_user(byte_write, &buf[*off - init_off]))
27577 + return -EFAULT;
27578 err = ec_write(*off, byte_write);
27579 if (err)
27580 return err;
27581 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27582 index 251c7b62..000462d 100644
27583 --- a/drivers/acpi/proc.c
27584 +++ b/drivers/acpi/proc.c
27585 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27586 size_t count, loff_t * ppos)
27587 {
27588 struct list_head *node, *next;
27589 - char strbuf[5];
27590 - char str[5] = "";
27591 - unsigned int len = count;
27592 + char strbuf[5] = {0};
27593
27594 - if (len > 4)
27595 - len = 4;
27596 - if (len < 0)
27597 + if (count > 4)
27598 + count = 4;
27599 + if (copy_from_user(strbuf, buffer, count))
27600 return -EFAULT;
27601 -
27602 - if (copy_from_user(strbuf, buffer, len))
27603 - return -EFAULT;
27604 - strbuf[len] = '\0';
27605 - sscanf(strbuf, "%s", str);
27606 + strbuf[count] = '\0';
27607
27608 mutex_lock(&acpi_device_lock);
27609 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27610 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27611 if (!dev->wakeup.flags.valid)
27612 continue;
27613
27614 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27615 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27616 if (device_can_wakeup(&dev->dev)) {
27617 bool enable = !device_may_wakeup(&dev->dev);
27618 device_set_wakeup_enable(&dev->dev, enable);
27619 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27620 index 8ae05ce..7dbbed9 100644
27621 --- a/drivers/acpi/processor_driver.c
27622 +++ b/drivers/acpi/processor_driver.c
27623 @@ -555,7 +555,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27624 return 0;
27625 #endif
27626
27627 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27628 + BUG_ON(pr->id >= nr_cpu_ids);
27629
27630 /*
27631 * Buggy BIOS check
27632 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27633 index c06e0ec..a2c06ba 100644
27634 --- a/drivers/ata/libata-core.c
27635 +++ b/drivers/ata/libata-core.c
27636 @@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27637 struct ata_port *ap;
27638 unsigned int tag;
27639
27640 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27641 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27642 ap = qc->ap;
27643
27644 qc->flags = 0;
27645 @@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27646 struct ata_port *ap;
27647 struct ata_link *link;
27648
27649 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27650 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27651 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27652 ap = qc->ap;
27653 link = qc->dev->link;
27654 @@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27655 return;
27656
27657 spin_lock(&lock);
27658 + pax_open_kernel();
27659
27660 for (cur = ops->inherits; cur; cur = cur->inherits) {
27661 void **inherit = (void **)cur;
27662 @@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27663 if (IS_ERR(*pp))
27664 *pp = NULL;
27665
27666 - ops->inherits = NULL;
27667 + *(struct ata_port_operations **)&ops->inherits = NULL;
27668
27669 + pax_close_kernel();
27670 spin_unlock(&lock);
27671 }
27672
27673 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27674 index 048589f..4002b98 100644
27675 --- a/drivers/ata/pata_arasan_cf.c
27676 +++ b/drivers/ata/pata_arasan_cf.c
27677 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27678 /* Handle platform specific quirks */
27679 if (pdata->quirk) {
27680 if (pdata->quirk & CF_BROKEN_PIO) {
27681 - ap->ops->set_piomode = NULL;
27682 + pax_open_kernel();
27683 + *(void **)&ap->ops->set_piomode = NULL;
27684 + pax_close_kernel();
27685 ap->pio_mask = 0;
27686 }
27687 if (pdata->quirk & CF_BROKEN_MWDMA)
27688 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27689 index f9b983a..887b9d8 100644
27690 --- a/drivers/atm/adummy.c
27691 +++ b/drivers/atm/adummy.c
27692 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27693 vcc->pop(vcc, skb);
27694 else
27695 dev_kfree_skb_any(skb);
27696 - atomic_inc(&vcc->stats->tx);
27697 + atomic_inc_unchecked(&vcc->stats->tx);
27698
27699 return 0;
27700 }
27701 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27702 index f8f41e0..1f987dd 100644
27703 --- a/drivers/atm/ambassador.c
27704 +++ b/drivers/atm/ambassador.c
27705 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27706 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27707
27708 // VC layer stats
27709 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27710 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27711
27712 // free the descriptor
27713 kfree (tx_descr);
27714 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27715 dump_skb ("<<<", vc, skb);
27716
27717 // VC layer stats
27718 - atomic_inc(&atm_vcc->stats->rx);
27719 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27720 __net_timestamp(skb);
27721 // end of our responsibility
27722 atm_vcc->push (atm_vcc, skb);
27723 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27724 } else {
27725 PRINTK (KERN_INFO, "dropped over-size frame");
27726 // should we count this?
27727 - atomic_inc(&atm_vcc->stats->rx_drop);
27728 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27729 }
27730
27731 } else {
27732 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27733 }
27734
27735 if (check_area (skb->data, skb->len)) {
27736 - atomic_inc(&atm_vcc->stats->tx_err);
27737 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27738 return -ENOMEM; // ?
27739 }
27740
27741 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27742 index b22d71c..d6e1049 100644
27743 --- a/drivers/atm/atmtcp.c
27744 +++ b/drivers/atm/atmtcp.c
27745 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27746 if (vcc->pop) vcc->pop(vcc,skb);
27747 else dev_kfree_skb(skb);
27748 if (dev_data) return 0;
27749 - atomic_inc(&vcc->stats->tx_err);
27750 + atomic_inc_unchecked(&vcc->stats->tx_err);
27751 return -ENOLINK;
27752 }
27753 size = skb->len+sizeof(struct atmtcp_hdr);
27754 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27755 if (!new_skb) {
27756 if (vcc->pop) vcc->pop(vcc,skb);
27757 else dev_kfree_skb(skb);
27758 - atomic_inc(&vcc->stats->tx_err);
27759 + atomic_inc_unchecked(&vcc->stats->tx_err);
27760 return -ENOBUFS;
27761 }
27762 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27763 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27764 if (vcc->pop) vcc->pop(vcc,skb);
27765 else dev_kfree_skb(skb);
27766 out_vcc->push(out_vcc,new_skb);
27767 - atomic_inc(&vcc->stats->tx);
27768 - atomic_inc(&out_vcc->stats->rx);
27769 + atomic_inc_unchecked(&vcc->stats->tx);
27770 + atomic_inc_unchecked(&out_vcc->stats->rx);
27771 return 0;
27772 }
27773
27774 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27775 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27776 read_unlock(&vcc_sklist_lock);
27777 if (!out_vcc) {
27778 - atomic_inc(&vcc->stats->tx_err);
27779 + atomic_inc_unchecked(&vcc->stats->tx_err);
27780 goto done;
27781 }
27782 skb_pull(skb,sizeof(struct atmtcp_hdr));
27783 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27784 __net_timestamp(new_skb);
27785 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27786 out_vcc->push(out_vcc,new_skb);
27787 - atomic_inc(&vcc->stats->tx);
27788 - atomic_inc(&out_vcc->stats->rx);
27789 + atomic_inc_unchecked(&vcc->stats->tx);
27790 + atomic_inc_unchecked(&out_vcc->stats->rx);
27791 done:
27792 if (vcc->pop) vcc->pop(vcc,skb);
27793 else dev_kfree_skb(skb);
27794 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27795 index 956e9ac..133516d 100644
27796 --- a/drivers/atm/eni.c
27797 +++ b/drivers/atm/eni.c
27798 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27799 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27800 vcc->dev->number);
27801 length = 0;
27802 - atomic_inc(&vcc->stats->rx_err);
27803 + atomic_inc_unchecked(&vcc->stats->rx_err);
27804 }
27805 else {
27806 length = ATM_CELL_SIZE-1; /* no HEC */
27807 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27808 size);
27809 }
27810 eff = length = 0;
27811 - atomic_inc(&vcc->stats->rx_err);
27812 + atomic_inc_unchecked(&vcc->stats->rx_err);
27813 }
27814 else {
27815 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27816 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27817 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27818 vcc->dev->number,vcc->vci,length,size << 2,descr);
27819 length = eff = 0;
27820 - atomic_inc(&vcc->stats->rx_err);
27821 + atomic_inc_unchecked(&vcc->stats->rx_err);
27822 }
27823 }
27824 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27825 @@ -771,7 +771,7 @@ rx_dequeued++;
27826 vcc->push(vcc,skb);
27827 pushed++;
27828 }
27829 - atomic_inc(&vcc->stats->rx);
27830 + atomic_inc_unchecked(&vcc->stats->rx);
27831 }
27832 wake_up(&eni_dev->rx_wait);
27833 }
27834 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
27835 PCI_DMA_TODEVICE);
27836 if (vcc->pop) vcc->pop(vcc,skb);
27837 else dev_kfree_skb_irq(skb);
27838 - atomic_inc(&vcc->stats->tx);
27839 + atomic_inc_unchecked(&vcc->stats->tx);
27840 wake_up(&eni_dev->tx_wait);
27841 dma_complete++;
27842 }
27843 @@ -1569,7 +1569,7 @@ tx_complete++;
27844 /*--------------------------------- entries ---------------------------------*/
27845
27846
27847 -static const char *media_name[] __devinitdata = {
27848 +static const char *media_name[] __devinitconst = {
27849 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27850 "UTP", "05?", "06?", "07?", /* 4- 7 */
27851 "TAXI","09?", "10?", "11?", /* 8-11 */
27852 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27853 index 5072f8a..fa52520d 100644
27854 --- a/drivers/atm/firestream.c
27855 +++ b/drivers/atm/firestream.c
27856 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27857 }
27858 }
27859
27860 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27861 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27862
27863 fs_dprintk (FS_DEBUG_TXMEM, "i");
27864 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27865 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27866 #endif
27867 skb_put (skb, qe->p1 & 0xffff);
27868 ATM_SKB(skb)->vcc = atm_vcc;
27869 - atomic_inc(&atm_vcc->stats->rx);
27870 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27871 __net_timestamp(skb);
27872 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27873 atm_vcc->push (atm_vcc, skb);
27874 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27875 kfree (pe);
27876 }
27877 if (atm_vcc)
27878 - atomic_inc(&atm_vcc->stats->rx_drop);
27879 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27880 break;
27881 case 0x1f: /* Reassembly abort: no buffers. */
27882 /* Silently increment error counter. */
27883 if (atm_vcc)
27884 - atomic_inc(&atm_vcc->stats->rx_drop);
27885 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27886 break;
27887 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27888 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27889 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27890 index 361f5ae..7fc552d 100644
27891 --- a/drivers/atm/fore200e.c
27892 +++ b/drivers/atm/fore200e.c
27893 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27894 #endif
27895 /* check error condition */
27896 if (*entry->status & STATUS_ERROR)
27897 - atomic_inc(&vcc->stats->tx_err);
27898 + atomic_inc_unchecked(&vcc->stats->tx_err);
27899 else
27900 - atomic_inc(&vcc->stats->tx);
27901 + atomic_inc_unchecked(&vcc->stats->tx);
27902 }
27903 }
27904
27905 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27906 if (skb == NULL) {
27907 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27908
27909 - atomic_inc(&vcc->stats->rx_drop);
27910 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27911 return -ENOMEM;
27912 }
27913
27914 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27915
27916 dev_kfree_skb_any(skb);
27917
27918 - atomic_inc(&vcc->stats->rx_drop);
27919 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27920 return -ENOMEM;
27921 }
27922
27923 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27924
27925 vcc->push(vcc, skb);
27926 - atomic_inc(&vcc->stats->rx);
27927 + atomic_inc_unchecked(&vcc->stats->rx);
27928
27929 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27930
27931 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27932 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27933 fore200e->atm_dev->number,
27934 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27935 - atomic_inc(&vcc->stats->rx_err);
27936 + atomic_inc_unchecked(&vcc->stats->rx_err);
27937 }
27938 }
27939
27940 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27941 goto retry_here;
27942 }
27943
27944 - atomic_inc(&vcc->stats->tx_err);
27945 + atomic_inc_unchecked(&vcc->stats->tx_err);
27946
27947 fore200e->tx_sat++;
27948 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27949 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27950 index b182c2f..1c6fa8a 100644
27951 --- a/drivers/atm/he.c
27952 +++ b/drivers/atm/he.c
27953 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27954
27955 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27956 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27957 - atomic_inc(&vcc->stats->rx_drop);
27958 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27959 goto return_host_buffers;
27960 }
27961
27962 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27963 RBRQ_LEN_ERR(he_dev->rbrq_head)
27964 ? "LEN_ERR" : "",
27965 vcc->vpi, vcc->vci);
27966 - atomic_inc(&vcc->stats->rx_err);
27967 + atomic_inc_unchecked(&vcc->stats->rx_err);
27968 goto return_host_buffers;
27969 }
27970
27971 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27972 vcc->push(vcc, skb);
27973 spin_lock(&he_dev->global_lock);
27974
27975 - atomic_inc(&vcc->stats->rx);
27976 + atomic_inc_unchecked(&vcc->stats->rx);
27977
27978 return_host_buffers:
27979 ++pdus_assembled;
27980 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27981 tpd->vcc->pop(tpd->vcc, tpd->skb);
27982 else
27983 dev_kfree_skb_any(tpd->skb);
27984 - atomic_inc(&tpd->vcc->stats->tx_err);
27985 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27986 }
27987 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27988 return;
27989 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27990 vcc->pop(vcc, skb);
27991 else
27992 dev_kfree_skb_any(skb);
27993 - atomic_inc(&vcc->stats->tx_err);
27994 + atomic_inc_unchecked(&vcc->stats->tx_err);
27995 return -EINVAL;
27996 }
27997
27998 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27999 vcc->pop(vcc, skb);
28000 else
28001 dev_kfree_skb_any(skb);
28002 - atomic_inc(&vcc->stats->tx_err);
28003 + atomic_inc_unchecked(&vcc->stats->tx_err);
28004 return -EINVAL;
28005 }
28006 #endif
28007 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28008 vcc->pop(vcc, skb);
28009 else
28010 dev_kfree_skb_any(skb);
28011 - atomic_inc(&vcc->stats->tx_err);
28012 + atomic_inc_unchecked(&vcc->stats->tx_err);
28013 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28014 return -ENOMEM;
28015 }
28016 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28017 vcc->pop(vcc, skb);
28018 else
28019 dev_kfree_skb_any(skb);
28020 - atomic_inc(&vcc->stats->tx_err);
28021 + atomic_inc_unchecked(&vcc->stats->tx_err);
28022 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28023 return -ENOMEM;
28024 }
28025 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28026 __enqueue_tpd(he_dev, tpd, cid);
28027 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28028
28029 - atomic_inc(&vcc->stats->tx);
28030 + atomic_inc_unchecked(&vcc->stats->tx);
28031
28032 return 0;
28033 }
28034 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28035 index b812103..e391a49 100644
28036 --- a/drivers/atm/horizon.c
28037 +++ b/drivers/atm/horizon.c
28038 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28039 {
28040 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28041 // VC layer stats
28042 - atomic_inc(&vcc->stats->rx);
28043 + atomic_inc_unchecked(&vcc->stats->rx);
28044 __net_timestamp(skb);
28045 // end of our responsibility
28046 vcc->push (vcc, skb);
28047 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28048 dev->tx_iovec = NULL;
28049
28050 // VC layer stats
28051 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28052 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28053
28054 // free the skb
28055 hrz_kfree_skb (skb);
28056 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28057 index 1c05212..c28e200 100644
28058 --- a/drivers/atm/idt77252.c
28059 +++ b/drivers/atm/idt77252.c
28060 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28061 else
28062 dev_kfree_skb(skb);
28063
28064 - atomic_inc(&vcc->stats->tx);
28065 + atomic_inc_unchecked(&vcc->stats->tx);
28066 }
28067
28068 atomic_dec(&scq->used);
28069 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28070 if ((sb = dev_alloc_skb(64)) == NULL) {
28071 printk("%s: Can't allocate buffers for aal0.\n",
28072 card->name);
28073 - atomic_add(i, &vcc->stats->rx_drop);
28074 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28075 break;
28076 }
28077 if (!atm_charge(vcc, sb->truesize)) {
28078 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28079 card->name);
28080 - atomic_add(i - 1, &vcc->stats->rx_drop);
28081 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28082 dev_kfree_skb(sb);
28083 break;
28084 }
28085 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28086 ATM_SKB(sb)->vcc = vcc;
28087 __net_timestamp(sb);
28088 vcc->push(vcc, sb);
28089 - atomic_inc(&vcc->stats->rx);
28090 + atomic_inc_unchecked(&vcc->stats->rx);
28091
28092 cell += ATM_CELL_PAYLOAD;
28093 }
28094 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28095 "(CDC: %08x)\n",
28096 card->name, len, rpp->len, readl(SAR_REG_CDC));
28097 recycle_rx_pool_skb(card, rpp);
28098 - atomic_inc(&vcc->stats->rx_err);
28099 + atomic_inc_unchecked(&vcc->stats->rx_err);
28100 return;
28101 }
28102 if (stat & SAR_RSQE_CRC) {
28103 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28104 recycle_rx_pool_skb(card, rpp);
28105 - atomic_inc(&vcc->stats->rx_err);
28106 + atomic_inc_unchecked(&vcc->stats->rx_err);
28107 return;
28108 }
28109 if (skb_queue_len(&rpp->queue) > 1) {
28110 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28111 RXPRINTK("%s: Can't alloc RX skb.\n",
28112 card->name);
28113 recycle_rx_pool_skb(card, rpp);
28114 - atomic_inc(&vcc->stats->rx_err);
28115 + atomic_inc_unchecked(&vcc->stats->rx_err);
28116 return;
28117 }
28118 if (!atm_charge(vcc, skb->truesize)) {
28119 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28120 __net_timestamp(skb);
28121
28122 vcc->push(vcc, skb);
28123 - atomic_inc(&vcc->stats->rx);
28124 + atomic_inc_unchecked(&vcc->stats->rx);
28125
28126 return;
28127 }
28128 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28129 __net_timestamp(skb);
28130
28131 vcc->push(vcc, skb);
28132 - atomic_inc(&vcc->stats->rx);
28133 + atomic_inc_unchecked(&vcc->stats->rx);
28134
28135 if (skb->truesize > SAR_FB_SIZE_3)
28136 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28137 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28138 if (vcc->qos.aal != ATM_AAL0) {
28139 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28140 card->name, vpi, vci);
28141 - atomic_inc(&vcc->stats->rx_drop);
28142 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28143 goto drop;
28144 }
28145
28146 if ((sb = dev_alloc_skb(64)) == NULL) {
28147 printk("%s: Can't allocate buffers for AAL0.\n",
28148 card->name);
28149 - atomic_inc(&vcc->stats->rx_err);
28150 + atomic_inc_unchecked(&vcc->stats->rx_err);
28151 goto drop;
28152 }
28153
28154 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28155 ATM_SKB(sb)->vcc = vcc;
28156 __net_timestamp(sb);
28157 vcc->push(vcc, sb);
28158 - atomic_inc(&vcc->stats->rx);
28159 + atomic_inc_unchecked(&vcc->stats->rx);
28160
28161 drop:
28162 skb_pull(queue, 64);
28163 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28164
28165 if (vc == NULL) {
28166 printk("%s: NULL connection in send().\n", card->name);
28167 - atomic_inc(&vcc->stats->tx_err);
28168 + atomic_inc_unchecked(&vcc->stats->tx_err);
28169 dev_kfree_skb(skb);
28170 return -EINVAL;
28171 }
28172 if (!test_bit(VCF_TX, &vc->flags)) {
28173 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28174 - atomic_inc(&vcc->stats->tx_err);
28175 + atomic_inc_unchecked(&vcc->stats->tx_err);
28176 dev_kfree_skb(skb);
28177 return -EINVAL;
28178 }
28179 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28180 break;
28181 default:
28182 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28183 - atomic_inc(&vcc->stats->tx_err);
28184 + atomic_inc_unchecked(&vcc->stats->tx_err);
28185 dev_kfree_skb(skb);
28186 return -EINVAL;
28187 }
28188
28189 if (skb_shinfo(skb)->nr_frags != 0) {
28190 printk("%s: No scatter-gather yet.\n", card->name);
28191 - atomic_inc(&vcc->stats->tx_err);
28192 + atomic_inc_unchecked(&vcc->stats->tx_err);
28193 dev_kfree_skb(skb);
28194 return -EINVAL;
28195 }
28196 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28197
28198 err = queue_skb(card, vc, skb, oam);
28199 if (err) {
28200 - atomic_inc(&vcc->stats->tx_err);
28201 + atomic_inc_unchecked(&vcc->stats->tx_err);
28202 dev_kfree_skb(skb);
28203 return err;
28204 }
28205 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28206 skb = dev_alloc_skb(64);
28207 if (!skb) {
28208 printk("%s: Out of memory in send_oam().\n", card->name);
28209 - atomic_inc(&vcc->stats->tx_err);
28210 + atomic_inc_unchecked(&vcc->stats->tx_err);
28211 return -ENOMEM;
28212 }
28213 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28214 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28215 index 9e373ba..cf93727 100644
28216 --- a/drivers/atm/iphase.c
28217 +++ b/drivers/atm/iphase.c
28218 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
28219 status = (u_short) (buf_desc_ptr->desc_mode);
28220 if (status & (RX_CER | RX_PTE | RX_OFL))
28221 {
28222 - atomic_inc(&vcc->stats->rx_err);
28223 + atomic_inc_unchecked(&vcc->stats->rx_err);
28224 IF_ERR(printk("IA: bad packet, dropping it");)
28225 if (status & RX_CER) {
28226 IF_ERR(printk(" cause: packet CRC error\n");)
28227 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
28228 len = dma_addr - buf_addr;
28229 if (len > iadev->rx_buf_sz) {
28230 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28231 - atomic_inc(&vcc->stats->rx_err);
28232 + atomic_inc_unchecked(&vcc->stats->rx_err);
28233 goto out_free_desc;
28234 }
28235
28236 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28237 ia_vcc = INPH_IA_VCC(vcc);
28238 if (ia_vcc == NULL)
28239 {
28240 - atomic_inc(&vcc->stats->rx_err);
28241 + atomic_inc_unchecked(&vcc->stats->rx_err);
28242 atm_return(vcc, skb->truesize);
28243 dev_kfree_skb_any(skb);
28244 goto INCR_DLE;
28245 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28246 if ((length > iadev->rx_buf_sz) || (length >
28247 (skb->len - sizeof(struct cpcs_trailer))))
28248 {
28249 - atomic_inc(&vcc->stats->rx_err);
28250 + atomic_inc_unchecked(&vcc->stats->rx_err);
28251 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28252 length, skb->len);)
28253 atm_return(vcc, skb->truesize);
28254 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28255
28256 IF_RX(printk("rx_dle_intr: skb push");)
28257 vcc->push(vcc,skb);
28258 - atomic_inc(&vcc->stats->rx);
28259 + atomic_inc_unchecked(&vcc->stats->rx);
28260 iadev->rx_pkt_cnt++;
28261 }
28262 INCR_DLE:
28263 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28264 {
28265 struct k_sonet_stats *stats;
28266 stats = &PRIV(_ia_dev[board])->sonet_stats;
28267 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28268 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28269 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28270 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28271 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28272 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28273 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28274 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28275 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28276 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28277 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28278 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28279 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28280 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28281 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28282 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28283 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28284 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28285 }
28286 ia_cmds.status = 0;
28287 break;
28288 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28289 if ((desc == 0) || (desc > iadev->num_tx_desc))
28290 {
28291 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28292 - atomic_inc(&vcc->stats->tx);
28293 + atomic_inc_unchecked(&vcc->stats->tx);
28294 if (vcc->pop)
28295 vcc->pop(vcc, skb);
28296 else
28297 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28298 ATM_DESC(skb) = vcc->vci;
28299 skb_queue_tail(&iadev->tx_dma_q, skb);
28300
28301 - atomic_inc(&vcc->stats->tx);
28302 + atomic_inc_unchecked(&vcc->stats->tx);
28303 iadev->tx_pkt_cnt++;
28304 /* Increment transaction counter */
28305 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28306
28307 #if 0
28308 /* add flow control logic */
28309 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28310 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28311 if (iavcc->vc_desc_cnt > 10) {
28312 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28313 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28314 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28315 index f556969..0da15eb 100644
28316 --- a/drivers/atm/lanai.c
28317 +++ b/drivers/atm/lanai.c
28318 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28319 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28320 lanai_endtx(lanai, lvcc);
28321 lanai_free_skb(lvcc->tx.atmvcc, skb);
28322 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28323 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28324 }
28325
28326 /* Try to fill the buffer - don't call unless there is backlog */
28327 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28328 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28329 __net_timestamp(skb);
28330 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28331 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28332 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28333 out:
28334 lvcc->rx.buf.ptr = end;
28335 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28336 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28337 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28338 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28339 lanai->stats.service_rxnotaal5++;
28340 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28341 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28342 return 0;
28343 }
28344 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28345 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28346 int bytes;
28347 read_unlock(&vcc_sklist_lock);
28348 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28349 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28350 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28351 lvcc->stats.x.aal5.service_trash++;
28352 bytes = (SERVICE_GET_END(s) * 16) -
28353 (((unsigned long) lvcc->rx.buf.ptr) -
28354 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28355 }
28356 if (s & SERVICE_STREAM) {
28357 read_unlock(&vcc_sklist_lock);
28358 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28359 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28360 lvcc->stats.x.aal5.service_stream++;
28361 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28362 "PDU on VCI %d!\n", lanai->number, vci);
28363 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28364 return 0;
28365 }
28366 DPRINTK("got rx crc error on vci %d\n", vci);
28367 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28368 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28369 lvcc->stats.x.aal5.service_rxcrc++;
28370 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28371 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28372 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28373 index 1c70c45..300718d 100644
28374 --- a/drivers/atm/nicstar.c
28375 +++ b/drivers/atm/nicstar.c
28376 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28377 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28378 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28379 card->index);
28380 - atomic_inc(&vcc->stats->tx_err);
28381 + atomic_inc_unchecked(&vcc->stats->tx_err);
28382 dev_kfree_skb_any(skb);
28383 return -EINVAL;
28384 }
28385 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28386 if (!vc->tx) {
28387 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28388 card->index);
28389 - atomic_inc(&vcc->stats->tx_err);
28390 + atomic_inc_unchecked(&vcc->stats->tx_err);
28391 dev_kfree_skb_any(skb);
28392 return -EINVAL;
28393 }
28394 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28395 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28396 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28397 card->index);
28398 - atomic_inc(&vcc->stats->tx_err);
28399 + atomic_inc_unchecked(&vcc->stats->tx_err);
28400 dev_kfree_skb_any(skb);
28401 return -EINVAL;
28402 }
28403
28404 if (skb_shinfo(skb)->nr_frags != 0) {
28405 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28406 - atomic_inc(&vcc->stats->tx_err);
28407 + atomic_inc_unchecked(&vcc->stats->tx_err);
28408 dev_kfree_skb_any(skb);
28409 return -EINVAL;
28410 }
28411 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28412 }
28413
28414 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28415 - atomic_inc(&vcc->stats->tx_err);
28416 + atomic_inc_unchecked(&vcc->stats->tx_err);
28417 dev_kfree_skb_any(skb);
28418 return -EIO;
28419 }
28420 - atomic_inc(&vcc->stats->tx);
28421 + atomic_inc_unchecked(&vcc->stats->tx);
28422
28423 return 0;
28424 }
28425 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28426 printk
28427 ("nicstar%d: Can't allocate buffers for aal0.\n",
28428 card->index);
28429 - atomic_add(i, &vcc->stats->rx_drop);
28430 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28431 break;
28432 }
28433 if (!atm_charge(vcc, sb->truesize)) {
28434 RXPRINTK
28435 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28436 card->index);
28437 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28438 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28439 dev_kfree_skb_any(sb);
28440 break;
28441 }
28442 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28443 ATM_SKB(sb)->vcc = vcc;
28444 __net_timestamp(sb);
28445 vcc->push(vcc, sb);
28446 - atomic_inc(&vcc->stats->rx);
28447 + atomic_inc_unchecked(&vcc->stats->rx);
28448 cell += ATM_CELL_PAYLOAD;
28449 }
28450
28451 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28452 if (iovb == NULL) {
28453 printk("nicstar%d: Out of iovec buffers.\n",
28454 card->index);
28455 - atomic_inc(&vcc->stats->rx_drop);
28456 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28457 recycle_rx_buf(card, skb);
28458 return;
28459 }
28460 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28461 small or large buffer itself. */
28462 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28463 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28464 - atomic_inc(&vcc->stats->rx_err);
28465 + atomic_inc_unchecked(&vcc->stats->rx_err);
28466 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28467 NS_MAX_IOVECS);
28468 NS_PRV_IOVCNT(iovb) = 0;
28469 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28470 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28471 card->index);
28472 which_list(card, skb);
28473 - atomic_inc(&vcc->stats->rx_err);
28474 + atomic_inc_unchecked(&vcc->stats->rx_err);
28475 recycle_rx_buf(card, skb);
28476 vc->rx_iov = NULL;
28477 recycle_iov_buf(card, iovb);
28478 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28479 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28480 card->index);
28481 which_list(card, skb);
28482 - atomic_inc(&vcc->stats->rx_err);
28483 + atomic_inc_unchecked(&vcc->stats->rx_err);
28484 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28485 NS_PRV_IOVCNT(iovb));
28486 vc->rx_iov = NULL;
28487 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28488 printk(" - PDU size mismatch.\n");
28489 else
28490 printk(".\n");
28491 - atomic_inc(&vcc->stats->rx_err);
28492 + atomic_inc_unchecked(&vcc->stats->rx_err);
28493 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28494 NS_PRV_IOVCNT(iovb));
28495 vc->rx_iov = NULL;
28496 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28497 /* skb points to a small buffer */
28498 if (!atm_charge(vcc, skb->truesize)) {
28499 push_rxbufs(card, skb);
28500 - atomic_inc(&vcc->stats->rx_drop);
28501 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28502 } else {
28503 skb_put(skb, len);
28504 dequeue_sm_buf(card, skb);
28505 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28506 ATM_SKB(skb)->vcc = vcc;
28507 __net_timestamp(skb);
28508 vcc->push(vcc, skb);
28509 - atomic_inc(&vcc->stats->rx);
28510 + atomic_inc_unchecked(&vcc->stats->rx);
28511 }
28512 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28513 struct sk_buff *sb;
28514 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28515 if (len <= NS_SMBUFSIZE) {
28516 if (!atm_charge(vcc, sb->truesize)) {
28517 push_rxbufs(card, sb);
28518 - atomic_inc(&vcc->stats->rx_drop);
28519 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28520 } else {
28521 skb_put(sb, len);
28522 dequeue_sm_buf(card, sb);
28523 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28524 ATM_SKB(sb)->vcc = vcc;
28525 __net_timestamp(sb);
28526 vcc->push(vcc, sb);
28527 - atomic_inc(&vcc->stats->rx);
28528 + atomic_inc_unchecked(&vcc->stats->rx);
28529 }
28530
28531 push_rxbufs(card, skb);
28532 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28533
28534 if (!atm_charge(vcc, skb->truesize)) {
28535 push_rxbufs(card, skb);
28536 - atomic_inc(&vcc->stats->rx_drop);
28537 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28538 } else {
28539 dequeue_lg_buf(card, skb);
28540 #ifdef NS_USE_DESTRUCTORS
28541 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28542 ATM_SKB(skb)->vcc = vcc;
28543 __net_timestamp(skb);
28544 vcc->push(vcc, skb);
28545 - atomic_inc(&vcc->stats->rx);
28546 + atomic_inc_unchecked(&vcc->stats->rx);
28547 }
28548
28549 push_rxbufs(card, sb);
28550 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28551 printk
28552 ("nicstar%d: Out of huge buffers.\n",
28553 card->index);
28554 - atomic_inc(&vcc->stats->rx_drop);
28555 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28556 recycle_iovec_rx_bufs(card,
28557 (struct iovec *)
28558 iovb->data,
28559 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28560 card->hbpool.count++;
28561 } else
28562 dev_kfree_skb_any(hb);
28563 - atomic_inc(&vcc->stats->rx_drop);
28564 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28565 } else {
28566 /* Copy the small buffer to the huge buffer */
28567 sb = (struct sk_buff *)iov->iov_base;
28568 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28569 #endif /* NS_USE_DESTRUCTORS */
28570 __net_timestamp(hb);
28571 vcc->push(vcc, hb);
28572 - atomic_inc(&vcc->stats->rx);
28573 + atomic_inc_unchecked(&vcc->stats->rx);
28574 }
28575 }
28576
28577 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28578 index e8cd652..bbbd1fc 100644
28579 --- a/drivers/atm/solos-pci.c
28580 +++ b/drivers/atm/solos-pci.c
28581 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28582 }
28583 atm_charge(vcc, skb->truesize);
28584 vcc->push(vcc, skb);
28585 - atomic_inc(&vcc->stats->rx);
28586 + atomic_inc_unchecked(&vcc->stats->rx);
28587 break;
28588
28589 case PKT_STATUS:
28590 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28591 vcc = SKB_CB(oldskb)->vcc;
28592
28593 if (vcc) {
28594 - atomic_inc(&vcc->stats->tx);
28595 + atomic_inc_unchecked(&vcc->stats->tx);
28596 solos_pop(vcc, oldskb);
28597 } else
28598 dev_kfree_skb_irq(oldskb);
28599 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28600 index 90f1ccc..04c4a1e 100644
28601 --- a/drivers/atm/suni.c
28602 +++ b/drivers/atm/suni.c
28603 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28604
28605
28606 #define ADD_LIMITED(s,v) \
28607 - atomic_add((v),&stats->s); \
28608 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28609 + atomic_add_unchecked((v),&stats->s); \
28610 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28611
28612
28613 static void suni_hz(unsigned long from_timer)
28614 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28615 index 5120a96..e2572bd 100644
28616 --- a/drivers/atm/uPD98402.c
28617 +++ b/drivers/atm/uPD98402.c
28618 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28619 struct sonet_stats tmp;
28620 int error = 0;
28621
28622 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28623 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28624 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28625 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28626 if (zero && !error) {
28627 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28628
28629
28630 #define ADD_LIMITED(s,v) \
28631 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28632 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28633 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28634 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28635 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28636 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28637
28638
28639 static void stat_event(struct atm_dev *dev)
28640 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28641 if (reason & uPD98402_INT_PFM) stat_event(dev);
28642 if (reason & uPD98402_INT_PCO) {
28643 (void) GET(PCOCR); /* clear interrupt cause */
28644 - atomic_add(GET(HECCT),
28645 + atomic_add_unchecked(GET(HECCT),
28646 &PRIV(dev)->sonet_stats.uncorr_hcs);
28647 }
28648 if ((reason & uPD98402_INT_RFO) &&
28649 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28650 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28651 uPD98402_INT_LOS),PIMR); /* enable them */
28652 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28653 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28654 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28655 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28656 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28657 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28658 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28659 return 0;
28660 }
28661
28662 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28663 index d889f56..17eb71e 100644
28664 --- a/drivers/atm/zatm.c
28665 +++ b/drivers/atm/zatm.c
28666 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28667 }
28668 if (!size) {
28669 dev_kfree_skb_irq(skb);
28670 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28671 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28672 continue;
28673 }
28674 if (!atm_charge(vcc,skb->truesize)) {
28675 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28676 skb->len = size;
28677 ATM_SKB(skb)->vcc = vcc;
28678 vcc->push(vcc,skb);
28679 - atomic_inc(&vcc->stats->rx);
28680 + atomic_inc_unchecked(&vcc->stats->rx);
28681 }
28682 zout(pos & 0xffff,MTA(mbx));
28683 #if 0 /* probably a stupid idea */
28684 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28685 skb_queue_head(&zatm_vcc->backlog,skb);
28686 break;
28687 }
28688 - atomic_inc(&vcc->stats->tx);
28689 + atomic_inc_unchecked(&vcc->stats->tx);
28690 wake_up(&zatm_vcc->tx_wait);
28691 }
28692
28693 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28694 index 8493536..31adee0 100644
28695 --- a/drivers/base/devtmpfs.c
28696 +++ b/drivers/base/devtmpfs.c
28697 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28698 if (!thread)
28699 return 0;
28700
28701 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28702 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28703 if (err)
28704 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28705 else
28706 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28707 index caf995f..6f76697 100644
28708 --- a/drivers/base/power/wakeup.c
28709 +++ b/drivers/base/power/wakeup.c
28710 @@ -30,14 +30,14 @@ bool events_check_enabled;
28711 * They need to be modified together atomically, so it's better to use one
28712 * atomic variable to hold them both.
28713 */
28714 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28715 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28716
28717 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28718 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28719
28720 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28721 {
28722 - unsigned int comb = atomic_read(&combined_event_count);
28723 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28724
28725 *cnt = (comb >> IN_PROGRESS_BITS);
28726 *inpr = comb & MAX_IN_PROGRESS;
28727 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28728 ws->last_time = ktime_get();
28729
28730 /* Increment the counter of events in progress. */
28731 - atomic_inc(&combined_event_count);
28732 + atomic_inc_unchecked(&combined_event_count);
28733 }
28734
28735 /**
28736 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28737 * Increment the counter of registered wakeup events and decrement the
28738 * couter of wakeup events in progress simultaneously.
28739 */
28740 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28741 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28742 }
28743
28744 /**
28745 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28746 index b0f553b..77b928b 100644
28747 --- a/drivers/block/cciss.c
28748 +++ b/drivers/block/cciss.c
28749 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28750 int err;
28751 u32 cp;
28752
28753 + memset(&arg64, 0, sizeof(arg64));
28754 +
28755 err = 0;
28756 err |=
28757 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28758 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28759 while (!list_empty(&h->reqQ)) {
28760 c = list_entry(h->reqQ.next, CommandList_struct, list);
28761 /* can't do anything if fifo is full */
28762 - if ((h->access.fifo_full(h))) {
28763 + if ((h->access->fifo_full(h))) {
28764 dev_warn(&h->pdev->dev, "fifo full\n");
28765 break;
28766 }
28767 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28768 h->Qdepth--;
28769
28770 /* Tell the controller execute command */
28771 - h->access.submit_command(h, c);
28772 + h->access->submit_command(h, c);
28773
28774 /* Put job onto the completed Q */
28775 addQ(&h->cmpQ, c);
28776 @@ -3443,17 +3445,17 @@ startio:
28777
28778 static inline unsigned long get_next_completion(ctlr_info_t *h)
28779 {
28780 - return h->access.command_completed(h);
28781 + return h->access->command_completed(h);
28782 }
28783
28784 static inline int interrupt_pending(ctlr_info_t *h)
28785 {
28786 - return h->access.intr_pending(h);
28787 + return h->access->intr_pending(h);
28788 }
28789
28790 static inline long interrupt_not_for_us(ctlr_info_t *h)
28791 {
28792 - return ((h->access.intr_pending(h) == 0) ||
28793 + return ((h->access->intr_pending(h) == 0) ||
28794 (h->interrupts_enabled == 0));
28795 }
28796
28797 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28798 u32 a;
28799
28800 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28801 - return h->access.command_completed(h);
28802 + return h->access->command_completed(h);
28803
28804 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28805 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28806 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28807 trans_support & CFGTBL_Trans_use_short_tags);
28808
28809 /* Change the access methods to the performant access methods */
28810 - h->access = SA5_performant_access;
28811 + h->access = &SA5_performant_access;
28812 h->transMethod = CFGTBL_Trans_Performant;
28813
28814 return;
28815 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28816 if (prod_index < 0)
28817 return -ENODEV;
28818 h->product_name = products[prod_index].product_name;
28819 - h->access = *(products[prod_index].access);
28820 + h->access = products[prod_index].access;
28821
28822 if (cciss_board_disabled(h)) {
28823 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28824 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28825 }
28826
28827 /* make sure the board interrupts are off */
28828 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28829 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28830 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28831 if (rc)
28832 goto clean2;
28833 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28834 * fake ones to scoop up any residual completions.
28835 */
28836 spin_lock_irqsave(&h->lock, flags);
28837 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28838 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28839 spin_unlock_irqrestore(&h->lock, flags);
28840 free_irq(h->intr[h->intr_mode], h);
28841 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28842 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28843 dev_info(&h->pdev->dev, "Board READY.\n");
28844 dev_info(&h->pdev->dev,
28845 "Waiting for stale completions to drain.\n");
28846 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28847 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28848 msleep(10000);
28849 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28850 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28851
28852 rc = controller_reset_failed(h->cfgtable);
28853 if (rc)
28854 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28855 cciss_scsi_setup(h);
28856
28857 /* Turn the interrupts on so we can service requests */
28858 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28859 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28860
28861 /* Get the firmware version */
28862 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28863 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28864 kfree(flush_buf);
28865 if (return_code != IO_OK)
28866 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28867 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28868 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28869 free_irq(h->intr[h->intr_mode], h);
28870 }
28871
28872 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28873 index 7fda30e..eb5dfe0 100644
28874 --- a/drivers/block/cciss.h
28875 +++ b/drivers/block/cciss.h
28876 @@ -101,7 +101,7 @@ struct ctlr_info
28877 /* information about each logical volume */
28878 drive_info_struct *drv[CISS_MAX_LUN];
28879
28880 - struct access_method access;
28881 + struct access_method *access;
28882
28883 /* queue and queue Info */
28884 struct list_head reqQ;
28885 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28886 index 9125bbe..eede5c8 100644
28887 --- a/drivers/block/cpqarray.c
28888 +++ b/drivers/block/cpqarray.c
28889 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28890 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28891 goto Enomem4;
28892 }
28893 - hba[i]->access.set_intr_mask(hba[i], 0);
28894 + hba[i]->access->set_intr_mask(hba[i], 0);
28895 if (request_irq(hba[i]->intr, do_ida_intr,
28896 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28897 {
28898 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28899 add_timer(&hba[i]->timer);
28900
28901 /* Enable IRQ now that spinlock and rate limit timer are set up */
28902 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28903 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28904
28905 for(j=0; j<NWD; j++) {
28906 struct gendisk *disk = ida_gendisk[i][j];
28907 @@ -694,7 +694,7 @@ DBGINFO(
28908 for(i=0; i<NR_PRODUCTS; i++) {
28909 if (board_id == products[i].board_id) {
28910 c->product_name = products[i].product_name;
28911 - c->access = *(products[i].access);
28912 + c->access = products[i].access;
28913 break;
28914 }
28915 }
28916 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28917 hba[ctlr]->intr = intr;
28918 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28919 hba[ctlr]->product_name = products[j].product_name;
28920 - hba[ctlr]->access = *(products[j].access);
28921 + hba[ctlr]->access = products[j].access;
28922 hba[ctlr]->ctlr = ctlr;
28923 hba[ctlr]->board_id = board_id;
28924 hba[ctlr]->pci_dev = NULL; /* not PCI */
28925 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28926
28927 while((c = h->reqQ) != NULL) {
28928 /* Can't do anything if we're busy */
28929 - if (h->access.fifo_full(h) == 0)
28930 + if (h->access->fifo_full(h) == 0)
28931 return;
28932
28933 /* Get the first entry from the request Q */
28934 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28935 h->Qdepth--;
28936
28937 /* Tell the controller to do our bidding */
28938 - h->access.submit_command(h, c);
28939 + h->access->submit_command(h, c);
28940
28941 /* Get onto the completion Q */
28942 addQ(&h->cmpQ, c);
28943 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28944 unsigned long flags;
28945 __u32 a,a1;
28946
28947 - istat = h->access.intr_pending(h);
28948 + istat = h->access->intr_pending(h);
28949 /* Is this interrupt for us? */
28950 if (istat == 0)
28951 return IRQ_NONE;
28952 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28953 */
28954 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28955 if (istat & FIFO_NOT_EMPTY) {
28956 - while((a = h->access.command_completed(h))) {
28957 + while((a = h->access->command_completed(h))) {
28958 a1 = a; a &= ~3;
28959 if ((c = h->cmpQ) == NULL)
28960 {
28961 @@ -1449,11 +1449,11 @@ static int sendcmd(
28962 /*
28963 * Disable interrupt
28964 */
28965 - info_p->access.set_intr_mask(info_p, 0);
28966 + info_p->access->set_intr_mask(info_p, 0);
28967 /* Make sure there is room in the command FIFO */
28968 /* Actually it should be completely empty at this time. */
28969 for (i = 200000; i > 0; i--) {
28970 - temp = info_p->access.fifo_full(info_p);
28971 + temp = info_p->access->fifo_full(info_p);
28972 if (temp != 0) {
28973 break;
28974 }
28975 @@ -1466,7 +1466,7 @@ DBG(
28976 /*
28977 * Send the cmd
28978 */
28979 - info_p->access.submit_command(info_p, c);
28980 + info_p->access->submit_command(info_p, c);
28981 complete = pollcomplete(ctlr);
28982
28983 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28984 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28985 * we check the new geometry. Then turn interrupts back on when
28986 * we're done.
28987 */
28988 - host->access.set_intr_mask(host, 0);
28989 + host->access->set_intr_mask(host, 0);
28990 getgeometry(ctlr);
28991 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28992 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28993
28994 for(i=0; i<NWD; i++) {
28995 struct gendisk *disk = ida_gendisk[ctlr][i];
28996 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28997 /* Wait (up to 2 seconds) for a command to complete */
28998
28999 for (i = 200000; i > 0; i--) {
29000 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
29001 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
29002 if (done == 0) {
29003 udelay(10); /* a short fixed delay */
29004 } else
29005 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29006 index be73e9d..7fbf140 100644
29007 --- a/drivers/block/cpqarray.h
29008 +++ b/drivers/block/cpqarray.h
29009 @@ -99,7 +99,7 @@ struct ctlr_info {
29010 drv_info_t drv[NWD];
29011 struct proc_dir_entry *proc;
29012
29013 - struct access_method access;
29014 + struct access_method *access;
29015
29016 cmdlist_t *reqQ;
29017 cmdlist_t *cmpQ;
29018 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29019 index 8d68056..e67050f 100644
29020 --- a/drivers/block/drbd/drbd_int.h
29021 +++ b/drivers/block/drbd/drbd_int.h
29022 @@ -736,7 +736,7 @@ struct drbd_request;
29023 struct drbd_epoch {
29024 struct list_head list;
29025 unsigned int barrier_nr;
29026 - atomic_t epoch_size; /* increased on every request added. */
29027 + atomic_unchecked_t epoch_size; /* increased on every request added. */
29028 atomic_t active; /* increased on every req. added, and dec on every finished. */
29029 unsigned long flags;
29030 };
29031 @@ -1108,7 +1108,7 @@ struct drbd_conf {
29032 void *int_dig_in;
29033 void *int_dig_vv;
29034 wait_queue_head_t seq_wait;
29035 - atomic_t packet_seq;
29036 + atomic_unchecked_t packet_seq;
29037 unsigned int peer_seq;
29038 spinlock_t peer_seq_lock;
29039 unsigned int minor;
29040 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29041
29042 static inline void drbd_tcp_cork(struct socket *sock)
29043 {
29044 - int __user val = 1;
29045 + int val = 1;
29046 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29047 - (char __user *)&val, sizeof(val));
29048 + (char __force_user *)&val, sizeof(val));
29049 }
29050
29051 static inline void drbd_tcp_uncork(struct socket *sock)
29052 {
29053 - int __user val = 0;
29054 + int val = 0;
29055 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29056 - (char __user *)&val, sizeof(val));
29057 + (char __force_user *)&val, sizeof(val));
29058 }
29059
29060 static inline void drbd_tcp_nodelay(struct socket *sock)
29061 {
29062 - int __user val = 1;
29063 + int val = 1;
29064 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29065 - (char __user *)&val, sizeof(val));
29066 + (char __force_user *)&val, sizeof(val));
29067 }
29068
29069 static inline void drbd_tcp_quickack(struct socket *sock)
29070 {
29071 - int __user val = 2;
29072 + int val = 2;
29073 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29074 - (char __user *)&val, sizeof(val));
29075 + (char __force_user *)&val, sizeof(val));
29076 }
29077
29078 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29079 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29080 index 211fc44..c5116f1 100644
29081 --- a/drivers/block/drbd/drbd_main.c
29082 +++ b/drivers/block/drbd/drbd_main.c
29083 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29084 p.sector = sector;
29085 p.block_id = block_id;
29086 p.blksize = blksize;
29087 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29088 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29089
29090 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29091 return false;
29092 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29093 p.sector = cpu_to_be64(req->sector);
29094 p.block_id = (unsigned long)req;
29095 p.seq_num = cpu_to_be32(req->seq_num =
29096 - atomic_add_return(1, &mdev->packet_seq));
29097 + atomic_add_return_unchecked(1, &mdev->packet_seq));
29098
29099 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29100
29101 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29102 atomic_set(&mdev->unacked_cnt, 0);
29103 atomic_set(&mdev->local_cnt, 0);
29104 atomic_set(&mdev->net_cnt, 0);
29105 - atomic_set(&mdev->packet_seq, 0);
29106 + atomic_set_unchecked(&mdev->packet_seq, 0);
29107 atomic_set(&mdev->pp_in_use, 0);
29108 atomic_set(&mdev->pp_in_use_by_net, 0);
29109 atomic_set(&mdev->rs_sect_in, 0);
29110 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29111 mdev->receiver.t_state);
29112
29113 /* no need to lock it, I'm the only thread alive */
29114 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29115 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29116 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29117 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29118 mdev->al_writ_cnt =
29119 mdev->bm_writ_cnt =
29120 mdev->read_cnt =
29121 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29122 index af2a250..0fdeb75 100644
29123 --- a/drivers/block/drbd/drbd_nl.c
29124 +++ b/drivers/block/drbd/drbd_nl.c
29125 @@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29126 return;
29127 }
29128
29129 - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
29130 + if (!capable(CAP_SYS_ADMIN)) {
29131 retcode = ERR_PERM;
29132 goto fail;
29133 }
29134 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29135 module_put(THIS_MODULE);
29136 }
29137
29138 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29139 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29140
29141 static unsigned short *
29142 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29143 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29144 cn_reply->id.idx = CN_IDX_DRBD;
29145 cn_reply->id.val = CN_VAL_DRBD;
29146
29147 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29148 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29149 cn_reply->ack = 0; /* not used here. */
29150 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29151 (int)((char *)tl - (char *)reply->tag_list);
29152 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29153 cn_reply->id.idx = CN_IDX_DRBD;
29154 cn_reply->id.val = CN_VAL_DRBD;
29155
29156 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29157 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29158 cn_reply->ack = 0; /* not used here. */
29159 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29160 (int)((char *)tl - (char *)reply->tag_list);
29161 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29162 cn_reply->id.idx = CN_IDX_DRBD;
29163 cn_reply->id.val = CN_VAL_DRBD;
29164
29165 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29166 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29167 cn_reply->ack = 0; // not used here.
29168 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29169 (int)((char*)tl - (char*)reply->tag_list);
29170 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29171 cn_reply->id.idx = CN_IDX_DRBD;
29172 cn_reply->id.val = CN_VAL_DRBD;
29173
29174 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29175 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29176 cn_reply->ack = 0; /* not used here. */
29177 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29178 (int)((char *)tl - (char *)reply->tag_list);
29179 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29180 index 43beaca..4a5b1dd 100644
29181 --- a/drivers/block/drbd/drbd_receiver.c
29182 +++ b/drivers/block/drbd/drbd_receiver.c
29183 @@ -894,7 +894,7 @@ retry:
29184 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29185 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29186
29187 - atomic_set(&mdev->packet_seq, 0);
29188 + atomic_set_unchecked(&mdev->packet_seq, 0);
29189 mdev->peer_seq = 0;
29190
29191 drbd_thread_start(&mdev->asender);
29192 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29193 do {
29194 next_epoch = NULL;
29195
29196 - epoch_size = atomic_read(&epoch->epoch_size);
29197 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29198
29199 switch (ev & ~EV_CLEANUP) {
29200 case EV_PUT:
29201 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29202 rv = FE_DESTROYED;
29203 } else {
29204 epoch->flags = 0;
29205 - atomic_set(&epoch->epoch_size, 0);
29206 + atomic_set_unchecked(&epoch->epoch_size, 0);
29207 /* atomic_set(&epoch->active, 0); is already zero */
29208 if (rv == FE_STILL_LIVE)
29209 rv = FE_RECYCLED;
29210 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29211 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29212 drbd_flush(mdev);
29213
29214 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29215 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29216 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29217 if (epoch)
29218 break;
29219 }
29220
29221 epoch = mdev->current_epoch;
29222 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29223 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29224
29225 D_ASSERT(atomic_read(&epoch->active) == 0);
29226 D_ASSERT(epoch->flags == 0);
29227 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29228 }
29229
29230 epoch->flags = 0;
29231 - atomic_set(&epoch->epoch_size, 0);
29232 + atomic_set_unchecked(&epoch->epoch_size, 0);
29233 atomic_set(&epoch->active, 0);
29234
29235 spin_lock(&mdev->epoch_lock);
29236 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29237 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29238 list_add(&epoch->list, &mdev->current_epoch->list);
29239 mdev->current_epoch = epoch;
29240 mdev->epochs++;
29241 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29242 spin_unlock(&mdev->peer_seq_lock);
29243
29244 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29245 - atomic_inc(&mdev->current_epoch->epoch_size);
29246 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29247 return drbd_drain_block(mdev, data_size);
29248 }
29249
29250 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29251
29252 spin_lock(&mdev->epoch_lock);
29253 e->epoch = mdev->current_epoch;
29254 - atomic_inc(&e->epoch->epoch_size);
29255 + atomic_inc_unchecked(&e->epoch->epoch_size);
29256 atomic_inc(&e->epoch->active);
29257 spin_unlock(&mdev->epoch_lock);
29258
29259 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29260 D_ASSERT(list_empty(&mdev->done_ee));
29261
29262 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29263 - atomic_set(&mdev->current_epoch->epoch_size, 0);
29264 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29265 D_ASSERT(list_empty(&mdev->current_epoch->list));
29266 }
29267
29268 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29269 index cd50435..ba1ffb5 100644
29270 --- a/drivers/block/loop.c
29271 +++ b/drivers/block/loop.c
29272 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29273 mm_segment_t old_fs = get_fs();
29274
29275 set_fs(get_ds());
29276 - bw = file->f_op->write(file, buf, len, &pos);
29277 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29278 set_fs(old_fs);
29279 if (likely(bw == len))
29280 return 0;
29281 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29282 index 4364303..9adf4ee 100644
29283 --- a/drivers/char/Kconfig
29284 +++ b/drivers/char/Kconfig
29285 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29286
29287 config DEVKMEM
29288 bool "/dev/kmem virtual device support"
29289 - default y
29290 + default n
29291 + depends on !GRKERNSEC_KMEM
29292 help
29293 Say Y here if you want to support the /dev/kmem device. The
29294 /dev/kmem device is rarely used, but can be used for certain
29295 @@ -596,6 +597,7 @@ config DEVPORT
29296 bool
29297 depends on !M68K
29298 depends on ISA || PCI
29299 + depends on !GRKERNSEC_KMEM
29300 default y
29301
29302 source "drivers/s390/char/Kconfig"
29303 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29304 index 2e04433..22afc64 100644
29305 --- a/drivers/char/agp/frontend.c
29306 +++ b/drivers/char/agp/frontend.c
29307 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29308 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29309 return -EFAULT;
29310
29311 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29312 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29313 return -EFAULT;
29314
29315 client = agp_find_client_by_pid(reserve.pid);
29316 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
29317 index 095ab90..afad0a4 100644
29318 --- a/drivers/char/briq_panel.c
29319 +++ b/drivers/char/briq_panel.c
29320 @@ -9,6 +9,7 @@
29321 #include <linux/types.h>
29322 #include <linux/errno.h>
29323 #include <linux/tty.h>
29324 +#include <linux/mutex.h>
29325 #include <linux/timer.h>
29326 #include <linux/kernel.h>
29327 #include <linux/wait.h>
29328 @@ -34,6 +35,7 @@ static int vfd_is_open;
29329 static unsigned char vfd[40];
29330 static int vfd_cursor;
29331 static unsigned char ledpb, led;
29332 +static DEFINE_MUTEX(vfd_mutex);
29333
29334 static void update_vfd(void)
29335 {
29336 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29337 if (!vfd_is_open)
29338 return -EBUSY;
29339
29340 + mutex_lock(&vfd_mutex);
29341 for (;;) {
29342 char c;
29343 if (!indx)
29344 break;
29345 - if (get_user(c, buf))
29346 + if (get_user(c, buf)) {
29347 + mutex_unlock(&vfd_mutex);
29348 return -EFAULT;
29349 + }
29350 if (esc) {
29351 set_led(c);
29352 esc = 0;
29353 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29354 buf++;
29355 }
29356 update_vfd();
29357 + mutex_unlock(&vfd_mutex);
29358
29359 return len;
29360 }
29361 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29362 index f773a9d..65cd683 100644
29363 --- a/drivers/char/genrtc.c
29364 +++ b/drivers/char/genrtc.c
29365 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
29366 switch (cmd) {
29367
29368 case RTC_PLL_GET:
29369 + memset(&pll, 0, sizeof(pll));
29370 if (get_rtc_pll(&pll))
29371 return -EINVAL;
29372 else
29373 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29374 index 0833896..cccce52 100644
29375 --- a/drivers/char/hpet.c
29376 +++ b/drivers/char/hpet.c
29377 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29378 }
29379
29380 static int
29381 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29382 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29383 struct hpet_info *info)
29384 {
29385 struct hpet_timer __iomem *timer;
29386 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29387 index 58c0e63..46c16bf 100644
29388 --- a/drivers/char/ipmi/ipmi_msghandler.c
29389 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29390 @@ -415,7 +415,7 @@ struct ipmi_smi {
29391 struct proc_dir_entry *proc_dir;
29392 char proc_dir_name[10];
29393
29394 - atomic_t stats[IPMI_NUM_STATS];
29395 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29396
29397 /*
29398 * run_to_completion duplicate of smb_info, smi_info
29399 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29400
29401
29402 #define ipmi_inc_stat(intf, stat) \
29403 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29404 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29405 #define ipmi_get_stat(intf, stat) \
29406 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29407 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29408
29409 static int is_lan_addr(struct ipmi_addr *addr)
29410 {
29411 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29412 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29413 init_waitqueue_head(&intf->waitq);
29414 for (i = 0; i < IPMI_NUM_STATS; i++)
29415 - atomic_set(&intf->stats[i], 0);
29416 + atomic_set_unchecked(&intf->stats[i], 0);
29417
29418 intf->proc_dir = NULL;
29419
29420 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29421 index 50fcf9c..91b5528 100644
29422 --- a/drivers/char/ipmi/ipmi_si_intf.c
29423 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29424 @@ -277,7 +277,7 @@ struct smi_info {
29425 unsigned char slave_addr;
29426
29427 /* Counters and things for the proc filesystem. */
29428 - atomic_t stats[SI_NUM_STATS];
29429 + atomic_unchecked_t stats[SI_NUM_STATS];
29430
29431 struct task_struct *thread;
29432
29433 @@ -286,9 +286,9 @@ struct smi_info {
29434 };
29435
29436 #define smi_inc_stat(smi, stat) \
29437 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29438 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29439 #define smi_get_stat(smi, stat) \
29440 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29441 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29442
29443 #define SI_MAX_PARMS 4
29444
29445 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
29446 atomic_set(&new_smi->req_events, 0);
29447 new_smi->run_to_completion = 0;
29448 for (i = 0; i < SI_NUM_STATS; i++)
29449 - atomic_set(&new_smi->stats[i], 0);
29450 + atomic_set_unchecked(&new_smi->stats[i], 0);
29451
29452 new_smi->interrupt_disabled = 1;
29453 atomic_set(&new_smi->stop_operation, 0);
29454 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29455 index 1aeaaba..e018570 100644
29456 --- a/drivers/char/mbcs.c
29457 +++ b/drivers/char/mbcs.c
29458 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
29459 return 0;
29460 }
29461
29462 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29463 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29464 {
29465 .part_num = MBCS_PART_NUM,
29466 .mfg_num = MBCS_MFG_NUM,
29467 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29468 index d6e9d08..4493e89 100644
29469 --- a/drivers/char/mem.c
29470 +++ b/drivers/char/mem.c
29471 @@ -18,6 +18,7 @@
29472 #include <linux/raw.h>
29473 #include <linux/tty.h>
29474 #include <linux/capability.h>
29475 +#include <linux/security.h>
29476 #include <linux/ptrace.h>
29477 #include <linux/device.h>
29478 #include <linux/highmem.h>
29479 @@ -35,6 +36,10 @@
29480 # include <linux/efi.h>
29481 #endif
29482
29483 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29484 +extern const struct file_operations grsec_fops;
29485 +#endif
29486 +
29487 static inline unsigned long size_inside_page(unsigned long start,
29488 unsigned long size)
29489 {
29490 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29491
29492 while (cursor < to) {
29493 if (!devmem_is_allowed(pfn)) {
29494 +#ifdef CONFIG_GRKERNSEC_KMEM
29495 + gr_handle_mem_readwrite(from, to);
29496 +#else
29497 printk(KERN_INFO
29498 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29499 current->comm, from, to);
29500 +#endif
29501 return 0;
29502 }
29503 cursor += PAGE_SIZE;
29504 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29505 }
29506 return 1;
29507 }
29508 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29509 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29510 +{
29511 + return 0;
29512 +}
29513 #else
29514 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29515 {
29516 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29517
29518 while (count > 0) {
29519 unsigned long remaining;
29520 + char *temp;
29521
29522 sz = size_inside_page(p, count);
29523
29524 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29525 if (!ptr)
29526 return -EFAULT;
29527
29528 - remaining = copy_to_user(buf, ptr, sz);
29529 +#ifdef CONFIG_PAX_USERCOPY
29530 + temp = kmalloc(sz, GFP_KERNEL);
29531 + if (!temp) {
29532 + unxlate_dev_mem_ptr(p, ptr);
29533 + return -ENOMEM;
29534 + }
29535 + memcpy(temp, ptr, sz);
29536 +#else
29537 + temp = ptr;
29538 +#endif
29539 +
29540 + remaining = copy_to_user(buf, temp, sz);
29541 +
29542 +#ifdef CONFIG_PAX_USERCOPY
29543 + kfree(temp);
29544 +#endif
29545 +
29546 unxlate_dev_mem_ptr(p, ptr);
29547 if (remaining)
29548 return -EFAULT;
29549 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29550 size_t count, loff_t *ppos)
29551 {
29552 unsigned long p = *ppos;
29553 - ssize_t low_count, read, sz;
29554 + ssize_t low_count, read, sz, err = 0;
29555 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29556 - int err = 0;
29557
29558 read = 0;
29559 if (p < (unsigned long) high_memory) {
29560 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29561 }
29562 #endif
29563 while (low_count > 0) {
29564 + char *temp;
29565 +
29566 sz = size_inside_page(p, low_count);
29567
29568 /*
29569 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29570 */
29571 kbuf = xlate_dev_kmem_ptr((char *)p);
29572
29573 - if (copy_to_user(buf, kbuf, sz))
29574 +#ifdef CONFIG_PAX_USERCOPY
29575 + temp = kmalloc(sz, GFP_KERNEL);
29576 + if (!temp)
29577 + return -ENOMEM;
29578 + memcpy(temp, kbuf, sz);
29579 +#else
29580 + temp = kbuf;
29581 +#endif
29582 +
29583 + err = copy_to_user(buf, temp, sz);
29584 +
29585 +#ifdef CONFIG_PAX_USERCOPY
29586 + kfree(temp);
29587 +#endif
29588 +
29589 + if (err)
29590 return -EFAULT;
29591 buf += sz;
29592 p += sz;
29593 @@ -867,6 +914,9 @@ static const struct memdev {
29594 #ifdef CONFIG_CRASH_DUMP
29595 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29596 #endif
29597 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29598 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29599 +#endif
29600 };
29601
29602 static int memory_open(struct inode *inode, struct file *filp)
29603 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29604 index da3cfee..a5a6606 100644
29605 --- a/drivers/char/nvram.c
29606 +++ b/drivers/char/nvram.c
29607 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29608
29609 spin_unlock_irq(&rtc_lock);
29610
29611 - if (copy_to_user(buf, contents, tmp - contents))
29612 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29613 return -EFAULT;
29614
29615 *ppos = i;
29616 diff --git a/drivers/char/random.c b/drivers/char/random.c
29617 index 54ca8b2..4a092ed 100644
29618 --- a/drivers/char/random.c
29619 +++ b/drivers/char/random.c
29620 @@ -261,8 +261,13 @@
29621 /*
29622 * Configuration information
29623 */
29624 +#ifdef CONFIG_GRKERNSEC_RANDNET
29625 +#define INPUT_POOL_WORDS 512
29626 +#define OUTPUT_POOL_WORDS 128
29627 +#else
29628 #define INPUT_POOL_WORDS 128
29629 #define OUTPUT_POOL_WORDS 32
29630 +#endif
29631 #define SEC_XFER_SIZE 512
29632 #define EXTRACT_SIZE 10
29633
29634 @@ -300,10 +305,17 @@ static struct poolinfo {
29635 int poolwords;
29636 int tap1, tap2, tap3, tap4, tap5;
29637 } poolinfo_table[] = {
29638 +#ifdef CONFIG_GRKERNSEC_RANDNET
29639 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29640 + { 512, 411, 308, 208, 104, 1 },
29641 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29642 + { 128, 103, 76, 51, 25, 1 },
29643 +#else
29644 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29645 { 128, 103, 76, 51, 25, 1 },
29646 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29647 { 32, 26, 20, 14, 7, 1 },
29648 +#endif
29649 #if 0
29650 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29651 { 2048, 1638, 1231, 819, 411, 1 },
29652 @@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29653
29654 extract_buf(r, tmp);
29655 i = min_t(int, nbytes, EXTRACT_SIZE);
29656 - if (copy_to_user(buf, tmp, i)) {
29657 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29658 ret = -EFAULT;
29659 break;
29660 }
29661 @@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29662 #include <linux/sysctl.h>
29663
29664 static int min_read_thresh = 8, min_write_thresh;
29665 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29666 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29667 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29668 static char sysctl_bootid[16];
29669
29670 @@ -1260,10 +1272,15 @@ static int proc_do_uuid(ctl_table *table, int write,
29671 uuid = table->data;
29672 if (!uuid) {
29673 uuid = tmp_uuid;
29674 - uuid[8] = 0;
29675 - }
29676 - if (uuid[8] == 0)
29677 generate_random_uuid(uuid);
29678 + } else {
29679 + static DEFINE_SPINLOCK(bootid_spinlock);
29680 +
29681 + spin_lock(&bootid_spinlock);
29682 + if (!uuid[8])
29683 + generate_random_uuid(uuid);
29684 + spin_unlock(&bootid_spinlock);
29685 + }
29686
29687 sprintf(buf, "%pU", uuid);
29688
29689 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29690 index 1ee8ce7..b778bef 100644
29691 --- a/drivers/char/sonypi.c
29692 +++ b/drivers/char/sonypi.c
29693 @@ -55,6 +55,7 @@
29694 #include <asm/uaccess.h>
29695 #include <asm/io.h>
29696 #include <asm/system.h>
29697 +#include <asm/local.h>
29698
29699 #include <linux/sonypi.h>
29700
29701 @@ -491,7 +492,7 @@ static struct sonypi_device {
29702 spinlock_t fifo_lock;
29703 wait_queue_head_t fifo_proc_list;
29704 struct fasync_struct *fifo_async;
29705 - int open_count;
29706 + local_t open_count;
29707 int model;
29708 struct input_dev *input_jog_dev;
29709 struct input_dev *input_key_dev;
29710 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29711 static int sonypi_misc_release(struct inode *inode, struct file *file)
29712 {
29713 mutex_lock(&sonypi_device.lock);
29714 - sonypi_device.open_count--;
29715 + local_dec(&sonypi_device.open_count);
29716 mutex_unlock(&sonypi_device.lock);
29717 return 0;
29718 }
29719 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29720 {
29721 mutex_lock(&sonypi_device.lock);
29722 /* Flush input queue on first open */
29723 - if (!sonypi_device.open_count)
29724 + if (!local_read(&sonypi_device.open_count))
29725 kfifo_reset(&sonypi_device.fifo);
29726 - sonypi_device.open_count++;
29727 + local_inc(&sonypi_device.open_count);
29728 mutex_unlock(&sonypi_device.lock);
29729
29730 return 0;
29731 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29732 index ad7c732..5aa8054 100644
29733 --- a/drivers/char/tpm/tpm.c
29734 +++ b/drivers/char/tpm/tpm.c
29735 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29736 chip->vendor.req_complete_val)
29737 goto out_recv;
29738
29739 - if ((status == chip->vendor.req_canceled)) {
29740 + if (status == chip->vendor.req_canceled) {
29741 dev_err(chip->dev, "Operation Canceled\n");
29742 rc = -ECANCELED;
29743 goto out;
29744 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29745 index 0636520..169c1d0 100644
29746 --- a/drivers/char/tpm/tpm_bios.c
29747 +++ b/drivers/char/tpm/tpm_bios.c
29748 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29749 event = addr;
29750
29751 if ((event->event_type == 0 && event->event_size == 0) ||
29752 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29753 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29754 return NULL;
29755
29756 return addr;
29757 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29758 return NULL;
29759
29760 if ((event->event_type == 0 && event->event_size == 0) ||
29761 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29762 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29763 return NULL;
29764
29765 (*pos)++;
29766 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29767 int i;
29768
29769 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29770 - seq_putc(m, data[i]);
29771 + if (!seq_putc(m, data[i]))
29772 + return -EFAULT;
29773
29774 return 0;
29775 }
29776 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29777 log->bios_event_log_end = log->bios_event_log + len;
29778
29779 virt = acpi_os_map_memory(start, len);
29780 + if (!virt) {
29781 + kfree(log->bios_event_log);
29782 + log->bios_event_log = NULL;
29783 + return -EFAULT;
29784 + }
29785
29786 - memcpy(log->bios_event_log, virt, len);
29787 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29788
29789 acpi_os_unmap_memory(virt, len);
29790 return 0;
29791 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29792 index 41fc148..0dba6dd 100644
29793 --- a/drivers/char/virtio_console.c
29794 +++ b/drivers/char/virtio_console.c
29795 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29796 if (to_user) {
29797 ssize_t ret;
29798
29799 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29800 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29801 if (ret)
29802 return -EFAULT;
29803 } else {
29804 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29805 if (!port_has_data(port) && !port->host_connected)
29806 return 0;
29807
29808 - return fill_readbuf(port, ubuf, count, true);
29809 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29810 }
29811
29812 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29813 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
29814 index c9eee6d..f9d5280 100644
29815 --- a/drivers/edac/amd64_edac.c
29816 +++ b/drivers/edac/amd64_edac.c
29817 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
29818 * PCI core identifies what devices are on a system during boot, and then
29819 * inquiry this table to see if this driver is for a given device found.
29820 */
29821 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
29822 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
29823 {
29824 .vendor = PCI_VENDOR_ID_AMD,
29825 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
29826 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
29827 index e47e73b..348e0bd 100644
29828 --- a/drivers/edac/amd76x_edac.c
29829 +++ b/drivers/edac/amd76x_edac.c
29830 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
29831 edac_mc_free(mci);
29832 }
29833
29834 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
29835 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
29836 {
29837 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29838 AMD762},
29839 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
29840 index 1af531a..3a8ff27 100644
29841 --- a/drivers/edac/e752x_edac.c
29842 +++ b/drivers/edac/e752x_edac.c
29843 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
29844 edac_mc_free(mci);
29845 }
29846
29847 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
29848 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
29849 {
29850 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29851 E7520},
29852 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
29853 index 6ffb6d2..383d8d7 100644
29854 --- a/drivers/edac/e7xxx_edac.c
29855 +++ b/drivers/edac/e7xxx_edac.c
29856 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
29857 edac_mc_free(mci);
29858 }
29859
29860 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
29861 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
29862 {
29863 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29864 E7205},
29865 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29866 index 97f5064..202b6e6 100644
29867 --- a/drivers/edac/edac_pci_sysfs.c
29868 +++ b/drivers/edac/edac_pci_sysfs.c
29869 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29870 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29871 static int edac_pci_poll_msec = 1000; /* one second workq period */
29872
29873 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
29874 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29875 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29876 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29877
29878 static struct kobject *edac_pci_top_main_kobj;
29879 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29880 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29881 edac_printk(KERN_CRIT, EDAC_PCI,
29882 "Signaled System Error on %s\n",
29883 pci_name(dev));
29884 - atomic_inc(&pci_nonparity_count);
29885 + atomic_inc_unchecked(&pci_nonparity_count);
29886 }
29887
29888 if (status & (PCI_STATUS_PARITY)) {
29889 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29890 "Master Data Parity Error on %s\n",
29891 pci_name(dev));
29892
29893 - atomic_inc(&pci_parity_count);
29894 + atomic_inc_unchecked(&pci_parity_count);
29895 }
29896
29897 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29898 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29899 "Detected Parity Error on %s\n",
29900 pci_name(dev));
29901
29902 - atomic_inc(&pci_parity_count);
29903 + atomic_inc_unchecked(&pci_parity_count);
29904 }
29905 }
29906
29907 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29908 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29909 "Signaled System Error on %s\n",
29910 pci_name(dev));
29911 - atomic_inc(&pci_nonparity_count);
29912 + atomic_inc_unchecked(&pci_nonparity_count);
29913 }
29914
29915 if (status & (PCI_STATUS_PARITY)) {
29916 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29917 "Master Data Parity Error on "
29918 "%s\n", pci_name(dev));
29919
29920 - atomic_inc(&pci_parity_count);
29921 + atomic_inc_unchecked(&pci_parity_count);
29922 }
29923
29924 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29925 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29926 "Detected Parity Error on %s\n",
29927 pci_name(dev));
29928
29929 - atomic_inc(&pci_parity_count);
29930 + atomic_inc_unchecked(&pci_parity_count);
29931 }
29932 }
29933 }
29934 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29935 if (!check_pci_errors)
29936 return;
29937
29938 - before_count = atomic_read(&pci_parity_count);
29939 + before_count = atomic_read_unchecked(&pci_parity_count);
29940
29941 /* scan all PCI devices looking for a Parity Error on devices and
29942 * bridges.
29943 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29944 /* Only if operator has selected panic on PCI Error */
29945 if (edac_pci_get_panic_on_pe()) {
29946 /* If the count is different 'after' from 'before' */
29947 - if (before_count != atomic_read(&pci_parity_count))
29948 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29949 panic("EDAC: PCI Parity Error");
29950 }
29951 }
29952 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
29953 index c0510b3..6e2a954 100644
29954 --- a/drivers/edac/i3000_edac.c
29955 +++ b/drivers/edac/i3000_edac.c
29956 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
29957 edac_mc_free(mci);
29958 }
29959
29960 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
29961 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
29962 {
29963 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29964 I3000},
29965 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
29966 index 73f55e200..5faaf59 100644
29967 --- a/drivers/edac/i3200_edac.c
29968 +++ b/drivers/edac/i3200_edac.c
29969 @@ -445,7 +445,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
29970 edac_mc_free(mci);
29971 }
29972
29973 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
29974 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
29975 {
29976 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29977 I3200},
29978 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
29979 index 4dc3ac2..67d05a6 100644
29980 --- a/drivers/edac/i5000_edac.c
29981 +++ b/drivers/edac/i5000_edac.c
29982 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
29983 *
29984 * The "E500P" device is the first device supported.
29985 */
29986 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
29987 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
29988 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
29989 .driver_data = I5000P},
29990
29991 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
29992 index bcbdeec..9886d16 100644
29993 --- a/drivers/edac/i5100_edac.c
29994 +++ b/drivers/edac/i5100_edac.c
29995 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
29996 edac_mc_free(mci);
29997 }
29998
29999 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
30000 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
30001 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
30002 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
30003 { 0, }
30004 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
30005 index 74d6ec34..baff517 100644
30006 --- a/drivers/edac/i5400_edac.c
30007 +++ b/drivers/edac/i5400_edac.c
30008 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
30009 *
30010 * The "E500P" device is the first device supported.
30011 */
30012 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
30013 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
30014 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
30015 {0,} /* 0 terminated list. */
30016 };
30017 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
30018 index 6104dba..e7ea8e1 100644
30019 --- a/drivers/edac/i7300_edac.c
30020 +++ b/drivers/edac/i7300_edac.c
30021 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
30022 *
30023 * Has only 8086:360c PCI ID
30024 */
30025 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
30026 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
30027 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
30028 {0,} /* 0 terminated list. */
30029 };
30030 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
30031 index 8568d9b..42b2fa8 100644
30032 --- a/drivers/edac/i7core_edac.c
30033 +++ b/drivers/edac/i7core_edac.c
30034 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
30035 /*
30036 * pci_device_id table for which devices we are looking for
30037 */
30038 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
30039 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
30040 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
30041 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
30042 {0,} /* 0 terminated list. */
30043 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
30044 index 4329d39..f3022ef 100644
30045 --- a/drivers/edac/i82443bxgx_edac.c
30046 +++ b/drivers/edac/i82443bxgx_edac.c
30047 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
30048
30049 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
30050
30051 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
30052 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
30053 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
30054 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
30055 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
30056 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
30057 index 931a057..fd28340 100644
30058 --- a/drivers/edac/i82860_edac.c
30059 +++ b/drivers/edac/i82860_edac.c
30060 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
30061 edac_mc_free(mci);
30062 }
30063
30064 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
30065 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
30066 {
30067 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30068 I82860},
30069 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
30070 index 33864c6..01edc61 100644
30071 --- a/drivers/edac/i82875p_edac.c
30072 +++ b/drivers/edac/i82875p_edac.c
30073 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
30074 edac_mc_free(mci);
30075 }
30076
30077 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
30078 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
30079 {
30080 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30081 I82875P},
30082 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
30083 index 4184e01..dcb2cd3 100644
30084 --- a/drivers/edac/i82975x_edac.c
30085 +++ b/drivers/edac/i82975x_edac.c
30086 @@ -612,7 +612,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
30087 edac_mc_free(mci);
30088 }
30089
30090 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
30091 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
30092 {
30093 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30094 I82975X
30095 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
30096 index 0106747..0b40417 100644
30097 --- a/drivers/edac/mce_amd.h
30098 +++ b/drivers/edac/mce_amd.h
30099 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
30100 bool (*dc_mce)(u16, u8);
30101 bool (*ic_mce)(u16, u8);
30102 bool (*nb_mce)(u16, u8);
30103 -};
30104 +} __no_const;
30105
30106 void amd_report_gart_errors(bool);
30107 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
30108 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
30109 index e294e1b..a41b05b 100644
30110 --- a/drivers/edac/r82600_edac.c
30111 +++ b/drivers/edac/r82600_edac.c
30112 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
30113 edac_mc_free(mci);
30114 }
30115
30116 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
30117 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
30118 {
30119 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
30120 },
30121 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
30122 index 1dc118d..8c68af9 100644
30123 --- a/drivers/edac/sb_edac.c
30124 +++ b/drivers/edac/sb_edac.c
30125 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
30126 /*
30127 * pci_device_id table for which devices we are looking for
30128 */
30129 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
30130 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
30131 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
30132 {0,} /* 0 terminated list. */
30133 };
30134 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
30135 index b6f47de..c5acf3a 100644
30136 --- a/drivers/edac/x38_edac.c
30137 +++ b/drivers/edac/x38_edac.c
30138 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
30139 edac_mc_free(mci);
30140 }
30141
30142 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
30143 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
30144 {
30145 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30146 X38},
30147 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
30148 index 85661b0..c784559a 100644
30149 --- a/drivers/firewire/core-card.c
30150 +++ b/drivers/firewire/core-card.c
30151 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
30152
30153 void fw_core_remove_card(struct fw_card *card)
30154 {
30155 - struct fw_card_driver dummy_driver = dummy_driver_template;
30156 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
30157
30158 card->driver->update_phy_reg(card, 4,
30159 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
30160 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
30161 index 4799393..37bd3ab 100644
30162 --- a/drivers/firewire/core-cdev.c
30163 +++ b/drivers/firewire/core-cdev.c
30164 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
30165 int ret;
30166
30167 if ((request->channels == 0 && request->bandwidth == 0) ||
30168 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
30169 - request->bandwidth < 0)
30170 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
30171 return -EINVAL;
30172
30173 r = kmalloc(sizeof(*r), GFP_KERNEL);
30174 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
30175 index 855ab3f..11f4bbd 100644
30176 --- a/drivers/firewire/core-transaction.c
30177 +++ b/drivers/firewire/core-transaction.c
30178 @@ -37,6 +37,7 @@
30179 #include <linux/timer.h>
30180 #include <linux/types.h>
30181 #include <linux/workqueue.h>
30182 +#include <linux/sched.h>
30183
30184 #include <asm/byteorder.h>
30185
30186 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30187 index b45be57..5fad18b 100644
30188 --- a/drivers/firewire/core.h
30189 +++ b/drivers/firewire/core.h
30190 @@ -101,6 +101,7 @@ struct fw_card_driver {
30191
30192 int (*stop_iso)(struct fw_iso_context *ctx);
30193 };
30194 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30195
30196 void fw_card_initialize(struct fw_card *card,
30197 const struct fw_card_driver *driver, struct device *device);
30198 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
30199 index 153980b..4b4d046 100644
30200 --- a/drivers/firmware/dmi_scan.c
30201 +++ b/drivers/firmware/dmi_scan.c
30202 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
30203 }
30204 }
30205 else {
30206 - /*
30207 - * no iounmap() for that ioremap(); it would be a no-op, but
30208 - * it's so early in setup that sucker gets confused into doing
30209 - * what it shouldn't if we actually call it.
30210 - */
30211 p = dmi_ioremap(0xF0000, 0x10000);
30212 if (p == NULL)
30213 goto error;
30214 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
30215 if (buf == NULL)
30216 return -1;
30217
30218 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30219 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30220
30221 iounmap(buf);
30222 return 0;
30223 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
30224 index 82d5c20..44a7177 100644
30225 --- a/drivers/gpio/gpio-vr41xx.c
30226 +++ b/drivers/gpio/gpio-vr41xx.c
30227 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30228 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30229 maskl, pendl, maskh, pendh);
30230
30231 - atomic_inc(&irq_err_count);
30232 + atomic_inc_unchecked(&irq_err_count);
30233
30234 return -EINVAL;
30235 }
30236 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30237 index 84a4a80..ce0306e 100644
30238 --- a/drivers/gpu/drm/drm_crtc_helper.c
30239 +++ b/drivers/gpu/drm/drm_crtc_helper.c
30240 @@ -280,7 +280,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30241 struct drm_crtc *tmp;
30242 int crtc_mask = 1;
30243
30244 - WARN(!crtc, "checking null crtc?\n");
30245 + BUG_ON(!crtc);
30246
30247 dev = crtc->dev;
30248
30249 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30250 index ebf7d3f..d64c436 100644
30251 --- a/drivers/gpu/drm/drm_drv.c
30252 +++ b/drivers/gpu/drm/drm_drv.c
30253 @@ -312,7 +312,7 @@ module_exit(drm_core_exit);
30254 /**
30255 * Copy and IOCTL return string to user space
30256 */
30257 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30258 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30259 {
30260 int len;
30261
30262 @@ -391,7 +391,7 @@ long drm_ioctl(struct file *filp,
30263
30264 dev = file_priv->minor->dev;
30265 atomic_inc(&dev->ioctl_count);
30266 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30267 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30268 ++file_priv->ioctl_count;
30269
30270 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30271 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30272 index 6263b01..7987f55 100644
30273 --- a/drivers/gpu/drm/drm_fops.c
30274 +++ b/drivers/gpu/drm/drm_fops.c
30275 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30276 }
30277
30278 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30279 - atomic_set(&dev->counts[i], 0);
30280 + atomic_set_unchecked(&dev->counts[i], 0);
30281
30282 dev->sigdata.lock = NULL;
30283
30284 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
30285
30286 retcode = drm_open_helper(inode, filp, dev);
30287 if (!retcode) {
30288 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30289 - if (!dev->open_count++)
30290 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30291 + if (local_inc_return(&dev->open_count) == 1)
30292 retcode = drm_setup(dev);
30293 }
30294 if (!retcode) {
30295 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
30296
30297 mutex_lock(&drm_global_mutex);
30298
30299 - DRM_DEBUG("open_count = %d\n", dev->open_count);
30300 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30301
30302 if (dev->driver->preclose)
30303 dev->driver->preclose(dev, file_priv);
30304 @@ -482,10 +482,10 @@ int drm_release(struct inode *inode, struct file *filp)
30305 * Begin inline drm_release
30306 */
30307
30308 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30309 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30310 task_pid_nr(current),
30311 (long)old_encode_dev(file_priv->minor->device),
30312 - dev->open_count);
30313 + local_read(&dev->open_count));
30314
30315 /* Release any auth tokens that might point to this file_priv,
30316 (do that under the drm_global_mutex) */
30317 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
30318 * End inline drm_release
30319 */
30320
30321 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30322 - if (!--dev->open_count) {
30323 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30324 + if (local_dec_and_test(&dev->open_count)) {
30325 if (atomic_read(&dev->ioctl_count)) {
30326 DRM_ERROR("Device busy: %d\n",
30327 atomic_read(&dev->ioctl_count));
30328 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30329 index c87dc96..326055d 100644
30330 --- a/drivers/gpu/drm/drm_global.c
30331 +++ b/drivers/gpu/drm/drm_global.c
30332 @@ -36,7 +36,7 @@
30333 struct drm_global_item {
30334 struct mutex mutex;
30335 void *object;
30336 - int refcount;
30337 + atomic_t refcount;
30338 };
30339
30340 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30341 @@ -49,7 +49,7 @@ void drm_global_init(void)
30342 struct drm_global_item *item = &glob[i];
30343 mutex_init(&item->mutex);
30344 item->object = NULL;
30345 - item->refcount = 0;
30346 + atomic_set(&item->refcount, 0);
30347 }
30348 }
30349
30350 @@ -59,7 +59,7 @@ void drm_global_release(void)
30351 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30352 struct drm_global_item *item = &glob[i];
30353 BUG_ON(item->object != NULL);
30354 - BUG_ON(item->refcount != 0);
30355 + BUG_ON(atomic_read(&item->refcount) != 0);
30356 }
30357 }
30358
30359 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30360 void *object;
30361
30362 mutex_lock(&item->mutex);
30363 - if (item->refcount == 0) {
30364 + if (atomic_read(&item->refcount) == 0) {
30365 item->object = kzalloc(ref->size, GFP_KERNEL);
30366 if (unlikely(item->object == NULL)) {
30367 ret = -ENOMEM;
30368 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30369 goto out_err;
30370
30371 }
30372 - ++item->refcount;
30373 + atomic_inc(&item->refcount);
30374 ref->object = item->object;
30375 object = item->object;
30376 mutex_unlock(&item->mutex);
30377 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30378 struct drm_global_item *item = &glob[ref->global_type];
30379
30380 mutex_lock(&item->mutex);
30381 - BUG_ON(item->refcount == 0);
30382 + BUG_ON(atomic_read(&item->refcount) == 0);
30383 BUG_ON(ref->object != item->object);
30384 - if (--item->refcount == 0) {
30385 + if (atomic_dec_and_test(&item->refcount)) {
30386 ref->release(ref);
30387 item->object = NULL;
30388 }
30389 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30390 index ab1162d..42587b2 100644
30391 --- a/drivers/gpu/drm/drm_info.c
30392 +++ b/drivers/gpu/drm/drm_info.c
30393 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30394 struct drm_local_map *map;
30395 struct drm_map_list *r_list;
30396
30397 - /* Hardcoded from _DRM_FRAME_BUFFER,
30398 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30399 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30400 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30401 + static const char * const types[] = {
30402 + [_DRM_FRAME_BUFFER] = "FB",
30403 + [_DRM_REGISTERS] = "REG",
30404 + [_DRM_SHM] = "SHM",
30405 + [_DRM_AGP] = "AGP",
30406 + [_DRM_SCATTER_GATHER] = "SG",
30407 + [_DRM_CONSISTENT] = "PCI",
30408 + [_DRM_GEM] = "GEM" };
30409 const char *type;
30410 int i;
30411
30412 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30413 map = r_list->map;
30414 if (!map)
30415 continue;
30416 - if (map->type < 0 || map->type > 5)
30417 + if (map->type >= ARRAY_SIZE(types))
30418 type = "??";
30419 else
30420 type = types[map->type];
30421 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30422 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30423 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30424 vma->vm_flags & VM_IO ? 'i' : '-',
30425 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30426 + 0);
30427 +#else
30428 vma->vm_pgoff);
30429 +#endif
30430
30431 #if defined(__i386__)
30432 pgprot = pgprot_val(vma->vm_page_prot);
30433 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30434 index 637fcc3..e890b33 100644
30435 --- a/drivers/gpu/drm/drm_ioc32.c
30436 +++ b/drivers/gpu/drm/drm_ioc32.c
30437 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30438 request = compat_alloc_user_space(nbytes);
30439 if (!access_ok(VERIFY_WRITE, request, nbytes))
30440 return -EFAULT;
30441 - list = (struct drm_buf_desc *) (request + 1);
30442 + list = (struct drm_buf_desc __user *) (request + 1);
30443
30444 if (__put_user(count, &request->count)
30445 || __put_user(list, &request->list))
30446 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30447 request = compat_alloc_user_space(nbytes);
30448 if (!access_ok(VERIFY_WRITE, request, nbytes))
30449 return -EFAULT;
30450 - list = (struct drm_buf_pub *) (request + 1);
30451 + list = (struct drm_buf_pub __user *) (request + 1);
30452
30453 if (__put_user(count, &request->count)
30454 || __put_user(list, &request->list))
30455 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30456 index 956fd38..e52167a 100644
30457 --- a/drivers/gpu/drm/drm_ioctl.c
30458 +++ b/drivers/gpu/drm/drm_ioctl.c
30459 @@ -251,7 +251,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30460 stats->data[i].value =
30461 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30462 else
30463 - stats->data[i].value = atomic_read(&dev->counts[i]);
30464 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30465 stats->data[i].type = dev->types[i];
30466 }
30467
30468 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30469 index c79c713..2048588 100644
30470 --- a/drivers/gpu/drm/drm_lock.c
30471 +++ b/drivers/gpu/drm/drm_lock.c
30472 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30473 if (drm_lock_take(&master->lock, lock->context)) {
30474 master->lock.file_priv = file_priv;
30475 master->lock.lock_time = jiffies;
30476 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30477 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30478 break; /* Got lock */
30479 }
30480
30481 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30482 return -EINVAL;
30483 }
30484
30485 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30486 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30487
30488 if (drm_lock_free(&master->lock, lock->context)) {
30489 /* FIXME: Should really bail out here. */
30490 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30491 index 7f4b4e1..bf4def2 100644
30492 --- a/drivers/gpu/drm/i810/i810_dma.c
30493 +++ b/drivers/gpu/drm/i810/i810_dma.c
30494 @@ -948,8 +948,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30495 dma->buflist[vertex->idx],
30496 vertex->discard, vertex->used);
30497
30498 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30499 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30500 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30501 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30502 sarea_priv->last_enqueue = dev_priv->counter - 1;
30503 sarea_priv->last_dispatch = (int)hw_status[5];
30504
30505 @@ -1109,8 +1109,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30506 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30507 mc->last_render);
30508
30509 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30510 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30511 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30512 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30513 sarea_priv->last_enqueue = dev_priv->counter - 1;
30514 sarea_priv->last_dispatch = (int)hw_status[5];
30515
30516 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30517 index c9339f4..f5e1b9d 100644
30518 --- a/drivers/gpu/drm/i810/i810_drv.h
30519 +++ b/drivers/gpu/drm/i810/i810_drv.h
30520 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30521 int page_flipping;
30522
30523 wait_queue_head_t irq_queue;
30524 - atomic_t irq_received;
30525 - atomic_t irq_emitted;
30526 + atomic_unchecked_t irq_received;
30527 + atomic_unchecked_t irq_emitted;
30528
30529 int front_offset;
30530 } drm_i810_private_t;
30531 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30532 index deaa657..e0fd296 100644
30533 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30534 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30535 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30536 I915_READ(GTIMR));
30537 }
30538 seq_printf(m, "Interrupts received: %d\n",
30539 - atomic_read(&dev_priv->irq_received));
30540 + atomic_read_unchecked(&dev_priv->irq_received));
30541 for (i = 0; i < I915_NUM_RINGS; i++) {
30542 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30543 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30544 @@ -1321,7 +1321,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30545 return ret;
30546
30547 if (opregion->header)
30548 - seq_write(m, opregion->header, OPREGION_SIZE);
30549 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30550
30551 mutex_unlock(&dev->struct_mutex);
30552
30553 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30554 index ddfe3d9..f6e6b21 100644
30555 --- a/drivers/gpu/drm/i915/i915_dma.c
30556 +++ b/drivers/gpu/drm/i915/i915_dma.c
30557 @@ -1175,7 +1175,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30558 bool can_switch;
30559
30560 spin_lock(&dev->count_lock);
30561 - can_switch = (dev->open_count == 0);
30562 + can_switch = (local_read(&dev->open_count) == 0);
30563 spin_unlock(&dev->count_lock);
30564 return can_switch;
30565 }
30566 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30567 index 9689ca3..294f9c1 100644
30568 --- a/drivers/gpu/drm/i915/i915_drv.h
30569 +++ b/drivers/gpu/drm/i915/i915_drv.h
30570 @@ -231,7 +231,7 @@ struct drm_i915_display_funcs {
30571 /* render clock increase/decrease */
30572 /* display clock increase/decrease */
30573 /* pll clock increase/decrease */
30574 -};
30575 +} __no_const;
30576
30577 struct intel_device_info {
30578 u8 gen;
30579 @@ -320,7 +320,7 @@ typedef struct drm_i915_private {
30580 int current_page;
30581 int page_flipping;
30582
30583 - atomic_t irq_received;
30584 + atomic_unchecked_t irq_received;
30585
30586 /* protects the irq masks */
30587 spinlock_t irq_lock;
30588 @@ -896,7 +896,7 @@ struct drm_i915_gem_object {
30589 * will be page flipped away on the next vblank. When it
30590 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30591 */
30592 - atomic_t pending_flip;
30593 + atomic_unchecked_t pending_flip;
30594 };
30595
30596 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30597 @@ -1276,7 +1276,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30598 extern void intel_teardown_gmbus(struct drm_device *dev);
30599 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30600 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30601 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30602 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30603 {
30604 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30605 }
30606 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30607 index e159e33..cdcc663 100644
30608 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30609 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30610 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30611 i915_gem_clflush_object(obj);
30612
30613 if (obj->base.pending_write_domain)
30614 - cd->flips |= atomic_read(&obj->pending_flip);
30615 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30616
30617 /* The actual obj->write_domain will be updated with
30618 * pending_write_domain after we emit the accumulated flush for all
30619 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30620
30621 static int
30622 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30623 - int count)
30624 + unsigned int count)
30625 {
30626 - int i;
30627 + unsigned int i;
30628
30629 for (i = 0; i < count; i++) {
30630 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30631 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30632 index 5bd4361..0241a42 100644
30633 --- a/drivers/gpu/drm/i915/i915_irq.c
30634 +++ b/drivers/gpu/drm/i915/i915_irq.c
30635 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30636 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30637 struct drm_i915_master_private *master_priv;
30638
30639 - atomic_inc(&dev_priv->irq_received);
30640 + atomic_inc_unchecked(&dev_priv->irq_received);
30641
30642 /* disable master interrupt before clearing iir */
30643 de_ier = I915_READ(DEIER);
30644 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30645 struct drm_i915_master_private *master_priv;
30646 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30647
30648 - atomic_inc(&dev_priv->irq_received);
30649 + atomic_inc_unchecked(&dev_priv->irq_received);
30650
30651 if (IS_GEN6(dev))
30652 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30653 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30654 int ret = IRQ_NONE, pipe;
30655 bool blc_event = false;
30656
30657 - atomic_inc(&dev_priv->irq_received);
30658 + atomic_inc_unchecked(&dev_priv->irq_received);
30659
30660 iir = I915_READ(IIR);
30661
30662 @@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30663 {
30664 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30665
30666 - atomic_set(&dev_priv->irq_received, 0);
30667 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30668
30669 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30670 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30671 @@ -1932,7 +1932,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30672 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30673 int pipe;
30674
30675 - atomic_set(&dev_priv->irq_received, 0);
30676 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30677
30678 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30679 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30680 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30681 index 2163818..cede019 100644
30682 --- a/drivers/gpu/drm/i915/intel_display.c
30683 +++ b/drivers/gpu/drm/i915/intel_display.c
30684 @@ -2238,7 +2238,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
30685
30686 wait_event(dev_priv->pending_flip_queue,
30687 atomic_read(&dev_priv->mm.wedged) ||
30688 - atomic_read(&obj->pending_flip) == 0);
30689 + atomic_read_unchecked(&obj->pending_flip) == 0);
30690
30691 /* Big Hammer, we also need to ensure that any pending
30692 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30693 @@ -2859,7 +2859,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30694 obj = to_intel_framebuffer(crtc->fb)->obj;
30695 dev_priv = crtc->dev->dev_private;
30696 wait_event(dev_priv->pending_flip_queue,
30697 - atomic_read(&obj->pending_flip) == 0);
30698 + atomic_read_unchecked(&obj->pending_flip) == 0);
30699 }
30700
30701 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30702 @@ -7171,7 +7171,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30703
30704 atomic_clear_mask(1 << intel_crtc->plane,
30705 &obj->pending_flip.counter);
30706 - if (atomic_read(&obj->pending_flip) == 0)
30707 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30708 wake_up(&dev_priv->pending_flip_queue);
30709
30710 schedule_work(&work->work);
30711 @@ -7354,7 +7354,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
30712 OUT_RING(fb->pitches[0] | obj->tiling_mode);
30713 OUT_RING(obj->gtt_offset);
30714
30715 - pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
30716 + /* Contrary to the suggestions in the documentation,
30717 + * "Enable Panel Fitter" does not seem to be required when page
30718 + * flipping with a non-native mode, and worse causes a normal
30719 + * modeset to fail.
30720 + * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
30721 + */
30722 + pf = 0;
30723 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
30724 OUT_RING(pf | pipesrc);
30725 ADVANCE_LP_RING();
30726 @@ -7461,7 +7467,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30727 /* Block clients from rendering to the new back buffer until
30728 * the flip occurs and the object is no longer visible.
30729 */
30730 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30731 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30732
30733 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30734 if (ret)
30735 @@ -7475,7 +7481,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30736 return 0;
30737
30738 cleanup_pending:
30739 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30740 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30741 drm_gem_object_unreference(&work->old_fb_obj->base);
30742 drm_gem_object_unreference(&obj->base);
30743 mutex_unlock(&dev->struct_mutex);
30744 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30745 index 54558a0..2d97005 100644
30746 --- a/drivers/gpu/drm/mga/mga_drv.h
30747 +++ b/drivers/gpu/drm/mga/mga_drv.h
30748 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30749 u32 clear_cmd;
30750 u32 maccess;
30751
30752 - atomic_t vbl_received; /**< Number of vblanks received. */
30753 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30754 wait_queue_head_t fence_queue;
30755 - atomic_t last_fence_retired;
30756 + atomic_unchecked_t last_fence_retired;
30757 u32 next_fence_to_post;
30758
30759 unsigned int fb_cpp;
30760 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30761 index 2581202..f230a8d9 100644
30762 --- a/drivers/gpu/drm/mga/mga_irq.c
30763 +++ b/drivers/gpu/drm/mga/mga_irq.c
30764 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30765 if (crtc != 0)
30766 return 0;
30767
30768 - return atomic_read(&dev_priv->vbl_received);
30769 + return atomic_read_unchecked(&dev_priv->vbl_received);
30770 }
30771
30772
30773 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30774 /* VBLANK interrupt */
30775 if (status & MGA_VLINEPEN) {
30776 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30777 - atomic_inc(&dev_priv->vbl_received);
30778 + atomic_inc_unchecked(&dev_priv->vbl_received);
30779 drm_handle_vblank(dev, 0);
30780 handled = 1;
30781 }
30782 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30783 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30784 MGA_WRITE(MGA_PRIMEND, prim_end);
30785
30786 - atomic_inc(&dev_priv->last_fence_retired);
30787 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30788 DRM_WAKEUP(&dev_priv->fence_queue);
30789 handled = 1;
30790 }
30791 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30792 * using fences.
30793 */
30794 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30795 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30796 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30797 - *sequence) <= (1 << 23)));
30798
30799 *sequence = cur_fence;
30800 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30801 index e5cbead..6c354a3 100644
30802 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30803 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30804 @@ -199,7 +199,7 @@ struct methods {
30805 const char desc[8];
30806 void (*loadbios)(struct drm_device *, uint8_t *);
30807 const bool rw;
30808 -};
30809 +} __do_const;
30810
30811 static struct methods shadow_methods[] = {
30812 { "PRAMIN", load_vbios_pramin, true },
30813 @@ -5290,7 +5290,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30814 struct bit_table {
30815 const char id;
30816 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30817 -};
30818 +} __no_const;
30819
30820 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30821
30822 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30823 index b827098..c31a797 100644
30824 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30825 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30826 @@ -242,7 +242,7 @@ struct nouveau_channel {
30827 struct list_head pending;
30828 uint32_t sequence;
30829 uint32_t sequence_ack;
30830 - atomic_t last_sequence_irq;
30831 + atomic_unchecked_t last_sequence_irq;
30832 struct nouveau_vma vma;
30833 } fence;
30834
30835 @@ -323,7 +323,7 @@ struct nouveau_exec_engine {
30836 u32 handle, u16 class);
30837 void (*set_tile_region)(struct drm_device *dev, int i);
30838 void (*tlb_flush)(struct drm_device *, int engine);
30839 -};
30840 +} __no_const;
30841
30842 struct nouveau_instmem_engine {
30843 void *priv;
30844 @@ -345,13 +345,13 @@ struct nouveau_instmem_engine {
30845 struct nouveau_mc_engine {
30846 int (*init)(struct drm_device *dev);
30847 void (*takedown)(struct drm_device *dev);
30848 -};
30849 +} __no_const;
30850
30851 struct nouveau_timer_engine {
30852 int (*init)(struct drm_device *dev);
30853 void (*takedown)(struct drm_device *dev);
30854 uint64_t (*read)(struct drm_device *dev);
30855 -};
30856 +} __no_const;
30857
30858 struct nouveau_fb_engine {
30859 int num_tiles;
30860 @@ -566,7 +566,7 @@ struct nouveau_vram_engine {
30861 void (*put)(struct drm_device *, struct nouveau_mem **);
30862
30863 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30864 -};
30865 +} __no_const;
30866
30867 struct nouveau_engine {
30868 struct nouveau_instmem_engine instmem;
30869 @@ -714,7 +714,7 @@ struct drm_nouveau_private {
30870 struct drm_global_reference mem_global_ref;
30871 struct ttm_bo_global_ref bo_global_ref;
30872 struct ttm_bo_device bdev;
30873 - atomic_t validate_sequence;
30874 + atomic_unchecked_t validate_sequence;
30875 } ttm;
30876
30877 struct {
30878 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30879 index 2f6daae..c9d7b9e 100644
30880 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30881 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30882 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30883 if (USE_REFCNT(dev))
30884 sequence = nvchan_rd32(chan, 0x48);
30885 else
30886 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30887 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30888
30889 if (chan->fence.sequence_ack == sequence)
30890 goto out;
30891 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30892 return ret;
30893 }
30894
30895 - atomic_set(&chan->fence.last_sequence_irq, 0);
30896 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30897 return 0;
30898 }
30899
30900 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30901 index 7ce3fde..cb3ea04 100644
30902 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30903 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30904 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30905 int trycnt = 0;
30906 int ret, i;
30907
30908 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30909 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30910 retry:
30911 if (++trycnt > 100000) {
30912 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30913 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30914 index f80c5e0..936baa7 100644
30915 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30916 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30917 @@ -543,7 +543,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30918 bool can_switch;
30919
30920 spin_lock(&dev->count_lock);
30921 - can_switch = (dev->open_count == 0);
30922 + can_switch = (local_read(&dev->open_count) == 0);
30923 spin_unlock(&dev->count_lock);
30924 return can_switch;
30925 }
30926 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30927 index dbdea8e..cd6eeeb 100644
30928 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30929 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30930 @@ -554,7 +554,7 @@ static int
30931 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30932 u32 class, u32 mthd, u32 data)
30933 {
30934 - atomic_set(&chan->fence.last_sequence_irq, data);
30935 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30936 return 0;
30937 }
30938
30939 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30940 index bcac90b..53bfc76 100644
30941 --- a/drivers/gpu/drm/r128/r128_cce.c
30942 +++ b/drivers/gpu/drm/r128/r128_cce.c
30943 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30944
30945 /* GH: Simple idle check.
30946 */
30947 - atomic_set(&dev_priv->idle_count, 0);
30948 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30949
30950 /* We don't support anything other than bus-mastering ring mode,
30951 * but the ring can be in either AGP or PCI space for the ring
30952 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30953 index 930c71b..499aded 100644
30954 --- a/drivers/gpu/drm/r128/r128_drv.h
30955 +++ b/drivers/gpu/drm/r128/r128_drv.h
30956 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30957 int is_pci;
30958 unsigned long cce_buffers_offset;
30959
30960 - atomic_t idle_count;
30961 + atomic_unchecked_t idle_count;
30962
30963 int page_flipping;
30964 int current_page;
30965 u32 crtc_offset;
30966 u32 crtc_offset_cntl;
30967
30968 - atomic_t vbl_received;
30969 + atomic_unchecked_t vbl_received;
30970
30971 u32 color_fmt;
30972 unsigned int front_offset;
30973 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30974 index 429d5a0..7e899ed 100644
30975 --- a/drivers/gpu/drm/r128/r128_irq.c
30976 +++ b/drivers/gpu/drm/r128/r128_irq.c
30977 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30978 if (crtc != 0)
30979 return 0;
30980
30981 - return atomic_read(&dev_priv->vbl_received);
30982 + return atomic_read_unchecked(&dev_priv->vbl_received);
30983 }
30984
30985 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30986 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30987 /* VBLANK interrupt */
30988 if (status & R128_CRTC_VBLANK_INT) {
30989 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30990 - atomic_inc(&dev_priv->vbl_received);
30991 + atomic_inc_unchecked(&dev_priv->vbl_received);
30992 drm_handle_vblank(dev, 0);
30993 return IRQ_HANDLED;
30994 }
30995 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30996 index a9e33ce..09edd4b 100644
30997 --- a/drivers/gpu/drm/r128/r128_state.c
30998 +++ b/drivers/gpu/drm/r128/r128_state.c
30999 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
31000
31001 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
31002 {
31003 - if (atomic_read(&dev_priv->idle_count) == 0)
31004 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
31005 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
31006 else
31007 - atomic_set(&dev_priv->idle_count, 0);
31008 + atomic_set_unchecked(&dev_priv->idle_count, 0);
31009 }
31010
31011 #endif
31012 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
31013 index 5a82b6b..9e69c73 100644
31014 --- a/drivers/gpu/drm/radeon/mkregtable.c
31015 +++ b/drivers/gpu/drm/radeon/mkregtable.c
31016 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
31017 regex_t mask_rex;
31018 regmatch_t match[4];
31019 char buf[1024];
31020 - size_t end;
31021 + long end;
31022 int len;
31023 int done = 0;
31024 int r;
31025 unsigned o;
31026 struct offset *offset;
31027 char last_reg_s[10];
31028 - int last_reg;
31029 + unsigned long last_reg;
31030
31031 if (regcomp
31032 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
31033 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
31034 index 1668ec1..30ebdab 100644
31035 --- a/drivers/gpu/drm/radeon/radeon.h
31036 +++ b/drivers/gpu/drm/radeon/radeon.h
31037 @@ -250,7 +250,7 @@ struct radeon_fence_driver {
31038 uint32_t scratch_reg;
31039 uint64_t gpu_addr;
31040 volatile uint32_t *cpu_addr;
31041 - atomic_t seq;
31042 + atomic_unchecked_t seq;
31043 uint32_t last_seq;
31044 unsigned long last_jiffies;
31045 unsigned long last_timeout;
31046 @@ -752,7 +752,7 @@ struct r600_blit_cp_primitives {
31047 int x2, int y2);
31048 void (*draw_auto)(struct radeon_device *rdev);
31049 void (*set_default_state)(struct radeon_device *rdev);
31050 -};
31051 +} __no_const;
31052
31053 struct r600_blit {
31054 struct mutex mutex;
31055 @@ -1201,7 +1201,7 @@ struct radeon_asic {
31056 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
31057 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
31058 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
31059 -};
31060 +} __no_const;
31061
31062 /*
31063 * Asic structures
31064 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
31065 index 49f7cb7..2fcb48f 100644
31066 --- a/drivers/gpu/drm/radeon/radeon_device.c
31067 +++ b/drivers/gpu/drm/radeon/radeon_device.c
31068 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
31069 bool can_switch;
31070
31071 spin_lock(&dev->count_lock);
31072 - can_switch = (dev->open_count == 0);
31073 + can_switch = (local_read(&dev->open_count) == 0);
31074 spin_unlock(&dev->count_lock);
31075 return can_switch;
31076 }
31077 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
31078 index a1b59ca..86f2d44 100644
31079 --- a/drivers/gpu/drm/radeon/radeon_drv.h
31080 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
31081 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
31082
31083 /* SW interrupt */
31084 wait_queue_head_t swi_queue;
31085 - atomic_t swi_emitted;
31086 + atomic_unchecked_t swi_emitted;
31087 int vblank_crtc;
31088 uint32_t irq_enable_reg;
31089 uint32_t r500_disp_irq_reg;
31090 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
31091 index 4bd36a3..e66fe9c 100644
31092 --- a/drivers/gpu/drm/radeon/radeon_fence.c
31093 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
31094 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
31095 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
31096 return 0;
31097 }
31098 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
31099 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
31100 if (!rdev->ring[fence->ring].ready)
31101 /* FIXME: cp is not running assume everythings is done right
31102 * away
31103 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
31104 }
31105 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
31106 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
31107 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
31108 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
31109 rdev->fence_drv[ring].initialized = true;
31110 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
31111 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
31112 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
31113 rdev->fence_drv[ring].scratch_reg = -1;
31114 rdev->fence_drv[ring].cpu_addr = NULL;
31115 rdev->fence_drv[ring].gpu_addr = 0;
31116 - atomic_set(&rdev->fence_drv[ring].seq, 0);
31117 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
31118 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
31119 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
31120 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
31121 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
31122 index 48b7cea..342236f 100644
31123 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
31124 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
31125 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
31126 request = compat_alloc_user_space(sizeof(*request));
31127 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
31128 || __put_user(req32.param, &request->param)
31129 - || __put_user((void __user *)(unsigned long)req32.value,
31130 + || __put_user((unsigned long)req32.value,
31131 &request->value))
31132 return -EFAULT;
31133
31134 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
31135 index 00da384..32f972d 100644
31136 --- a/drivers/gpu/drm/radeon/radeon_irq.c
31137 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
31138 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
31139 unsigned int ret;
31140 RING_LOCALS;
31141
31142 - atomic_inc(&dev_priv->swi_emitted);
31143 - ret = atomic_read(&dev_priv->swi_emitted);
31144 + atomic_inc_unchecked(&dev_priv->swi_emitted);
31145 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
31146
31147 BEGIN_RING(4);
31148 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
31149 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
31150 drm_radeon_private_t *dev_priv =
31151 (drm_radeon_private_t *) dev->dev_private;
31152
31153 - atomic_set(&dev_priv->swi_emitted, 0);
31154 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
31155 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
31156
31157 dev->max_vblank_count = 0x001fffff;
31158 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
31159 index e8422ae..d22d4a8 100644
31160 --- a/drivers/gpu/drm/radeon/radeon_state.c
31161 +++ b/drivers/gpu/drm/radeon/radeon_state.c
31162 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
31163 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31164 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31165
31166 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31167 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31168 sarea_priv->nbox * sizeof(depth_boxes[0])))
31169 return -EFAULT;
31170
31171 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
31172 {
31173 drm_radeon_private_t *dev_priv = dev->dev_private;
31174 drm_radeon_getparam_t *param = data;
31175 - int value;
31176 + int value = 0;
31177
31178 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31179
31180 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31181 index c421e77..e6bf2e8 100644
31182 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
31183 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31184 @@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31185 }
31186 if (unlikely(ttm_vm_ops == NULL)) {
31187 ttm_vm_ops = vma->vm_ops;
31188 - radeon_ttm_vm_ops = *ttm_vm_ops;
31189 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31190 + pax_open_kernel();
31191 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31192 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31193 + pax_close_kernel();
31194 }
31195 vma->vm_ops = &radeon_ttm_vm_ops;
31196 return 0;
31197 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
31198 index f68dff2..8df955c 100644
31199 --- a/drivers/gpu/drm/radeon/rs690.c
31200 +++ b/drivers/gpu/drm/radeon/rs690.c
31201 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
31202 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31203 rdev->pm.sideport_bandwidth.full)
31204 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31205 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31206 + read_delay_latency.full = dfixed_const(800 * 1000);
31207 read_delay_latency.full = dfixed_div(read_delay_latency,
31208 rdev->pm.igp_sideport_mclk);
31209 + a.full = dfixed_const(370);
31210 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31211 } else {
31212 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31213 rdev->pm.k8_bandwidth.full)
31214 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31215 index 499debd..66fce72 100644
31216 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31217 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31218 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
31219 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31220 struct shrink_control *sc)
31221 {
31222 - static atomic_t start_pool = ATOMIC_INIT(0);
31223 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31224 unsigned i;
31225 - unsigned pool_offset = atomic_add_return(1, &start_pool);
31226 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31227 struct ttm_page_pool *pool;
31228 int shrink_pages = sc->nr_to_scan;
31229
31230 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31231 index 88edacc..1e5412b 100644
31232 --- a/drivers/gpu/drm/via/via_drv.h
31233 +++ b/drivers/gpu/drm/via/via_drv.h
31234 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31235 typedef uint32_t maskarray_t[5];
31236
31237 typedef struct drm_via_irq {
31238 - atomic_t irq_received;
31239 + atomic_unchecked_t irq_received;
31240 uint32_t pending_mask;
31241 uint32_t enable_mask;
31242 wait_queue_head_t irq_queue;
31243 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
31244 struct timeval last_vblank;
31245 int last_vblank_valid;
31246 unsigned usec_per_vblank;
31247 - atomic_t vbl_received;
31248 + atomic_unchecked_t vbl_received;
31249 drm_via_state_t hc_state;
31250 char pci_buf[VIA_PCI_BUF_SIZE];
31251 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31252 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31253 index d391f48..10c8ca3 100644
31254 --- a/drivers/gpu/drm/via/via_irq.c
31255 +++ b/drivers/gpu/drm/via/via_irq.c
31256 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31257 if (crtc != 0)
31258 return 0;
31259
31260 - return atomic_read(&dev_priv->vbl_received);
31261 + return atomic_read_unchecked(&dev_priv->vbl_received);
31262 }
31263
31264 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31265 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31266
31267 status = VIA_READ(VIA_REG_INTERRUPT);
31268 if (status & VIA_IRQ_VBLANK_PENDING) {
31269 - atomic_inc(&dev_priv->vbl_received);
31270 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31271 + atomic_inc_unchecked(&dev_priv->vbl_received);
31272 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31273 do_gettimeofday(&cur_vblank);
31274 if (dev_priv->last_vblank_valid) {
31275 dev_priv->usec_per_vblank =
31276 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31277 dev_priv->last_vblank = cur_vblank;
31278 dev_priv->last_vblank_valid = 1;
31279 }
31280 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31281 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31282 DRM_DEBUG("US per vblank is: %u\n",
31283 dev_priv->usec_per_vblank);
31284 }
31285 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31286
31287 for (i = 0; i < dev_priv->num_irqs; ++i) {
31288 if (status & cur_irq->pending_mask) {
31289 - atomic_inc(&cur_irq->irq_received);
31290 + atomic_inc_unchecked(&cur_irq->irq_received);
31291 DRM_WAKEUP(&cur_irq->irq_queue);
31292 handled = 1;
31293 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31294 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31295 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31296 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31297 masks[irq][4]));
31298 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31299 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31300 } else {
31301 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31302 (((cur_irq_sequence =
31303 - atomic_read(&cur_irq->irq_received)) -
31304 + atomic_read_unchecked(&cur_irq->irq_received)) -
31305 *sequence) <= (1 << 23)));
31306 }
31307 *sequence = cur_irq_sequence;
31308 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31309 }
31310
31311 for (i = 0; i < dev_priv->num_irqs; ++i) {
31312 - atomic_set(&cur_irq->irq_received, 0);
31313 + atomic_set_unchecked(&cur_irq->irq_received, 0);
31314 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31315 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31316 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31317 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31318 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31319 case VIA_IRQ_RELATIVE:
31320 irqwait->request.sequence +=
31321 - atomic_read(&cur_irq->irq_received);
31322 + atomic_read_unchecked(&cur_irq->irq_received);
31323 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31324 case VIA_IRQ_ABSOLUTE:
31325 break;
31326 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31327 index dc27970..f18b008 100644
31328 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31329 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31330 @@ -260,7 +260,7 @@ struct vmw_private {
31331 * Fencing and IRQs.
31332 */
31333
31334 - atomic_t marker_seq;
31335 + atomic_unchecked_t marker_seq;
31336 wait_queue_head_t fence_queue;
31337 wait_queue_head_t fifo_queue;
31338 int fence_queue_waiters; /* Protected by hw_mutex */
31339 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31340 index a0c2f12..68ae6cb 100644
31341 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31342 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31343 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31344 (unsigned int) min,
31345 (unsigned int) fifo->capabilities);
31346
31347 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31348 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31349 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31350 vmw_marker_queue_init(&fifo->marker_queue);
31351 return vmw_fifo_send_fence(dev_priv, &dummy);
31352 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31353 if (reserveable)
31354 iowrite32(bytes, fifo_mem +
31355 SVGA_FIFO_RESERVED);
31356 - return fifo_mem + (next_cmd >> 2);
31357 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31358 } else {
31359 need_bounce = true;
31360 }
31361 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31362
31363 fm = vmw_fifo_reserve(dev_priv, bytes);
31364 if (unlikely(fm == NULL)) {
31365 - *seqno = atomic_read(&dev_priv->marker_seq);
31366 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31367 ret = -ENOMEM;
31368 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31369 false, 3*HZ);
31370 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31371 }
31372
31373 do {
31374 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31375 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31376 } while (*seqno == 0);
31377
31378 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31379 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31380 index cabc95f..14b3d77 100644
31381 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31382 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31383 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31384 * emitted. Then the fence is stale and signaled.
31385 */
31386
31387 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31388 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31389 > VMW_FENCE_WRAP);
31390
31391 return ret;
31392 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31393
31394 if (fifo_idle)
31395 down_read(&fifo_state->rwsem);
31396 - signal_seq = atomic_read(&dev_priv->marker_seq);
31397 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31398 ret = 0;
31399
31400 for (;;) {
31401 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31402 index 8a8725c..afed796 100644
31403 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31404 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31405 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31406 while (!vmw_lag_lt(queue, us)) {
31407 spin_lock(&queue->lock);
31408 if (list_empty(&queue->head))
31409 - seqno = atomic_read(&dev_priv->marker_seq);
31410 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31411 else {
31412 marker = list_first_entry(&queue->head,
31413 struct vmw_marker, head);
31414 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31415 index 75dbe34..f9204a8 100644
31416 --- a/drivers/hid/hid-core.c
31417 +++ b/drivers/hid/hid-core.c
31418 @@ -2021,7 +2021,7 @@ static bool hid_ignore(struct hid_device *hdev)
31419
31420 int hid_add_device(struct hid_device *hdev)
31421 {
31422 - static atomic_t id = ATOMIC_INIT(0);
31423 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31424 int ret;
31425
31426 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31427 @@ -2036,7 +2036,7 @@ int hid_add_device(struct hid_device *hdev)
31428 /* XXX hack, any other cleaner solution after the driver core
31429 * is converted to allow more than 20 bytes as the device name? */
31430 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31431 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31432 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31433
31434 hid_debug_register(hdev, dev_name(&hdev->dev));
31435 ret = device_add(&hdev->dev);
31436 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31437 index b1ec0e2..c295a61 100644
31438 --- a/drivers/hid/usbhid/hiddev.c
31439 +++ b/drivers/hid/usbhid/hiddev.c
31440 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31441 break;
31442
31443 case HIDIOCAPPLICATION:
31444 - if (arg < 0 || arg >= hid->maxapplication)
31445 + if (arg >= hid->maxapplication)
31446 break;
31447
31448 for (i = 0; i < hid->maxcollection; i++)
31449 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31450 index 4065374..10ed7dc 100644
31451 --- a/drivers/hv/channel.c
31452 +++ b/drivers/hv/channel.c
31453 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31454 int ret = 0;
31455 int t;
31456
31457 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31458 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31459 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31460 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31461
31462 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31463 if (ret)
31464 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31465 index 12aa97f..c0679f7 100644
31466 --- a/drivers/hv/hv.c
31467 +++ b/drivers/hv/hv.c
31468 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31469 u64 output_address = (output) ? virt_to_phys(output) : 0;
31470 u32 output_address_hi = output_address >> 32;
31471 u32 output_address_lo = output_address & 0xFFFFFFFF;
31472 - void *hypercall_page = hv_context.hypercall_page;
31473 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31474
31475 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31476 "=a"(hv_status_lo) : "d" (control_hi),
31477 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31478 index 6d7d286..92b0873 100644
31479 --- a/drivers/hv/hyperv_vmbus.h
31480 +++ b/drivers/hv/hyperv_vmbus.h
31481 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
31482 struct vmbus_connection {
31483 enum vmbus_connect_state conn_state;
31484
31485 - atomic_t next_gpadl_handle;
31486 + atomic_unchecked_t next_gpadl_handle;
31487
31488 /*
31489 * Represents channel interrupts. Each bit position represents a
31490 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31491 index a220e57..428f54d 100644
31492 --- a/drivers/hv/vmbus_drv.c
31493 +++ b/drivers/hv/vmbus_drv.c
31494 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31495 {
31496 int ret = 0;
31497
31498 - static atomic_t device_num = ATOMIC_INIT(0);
31499 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31500
31501 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31502 - atomic_inc_return(&device_num));
31503 + atomic_inc_return_unchecked(&device_num));
31504
31505 child_device_obj->device.bus = &hv_bus;
31506 child_device_obj->device.parent = &hv_acpi_dev->dev;
31507 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31508 index 554f046..f8b4729 100644
31509 --- a/drivers/hwmon/acpi_power_meter.c
31510 +++ b/drivers/hwmon/acpi_power_meter.c
31511 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31512 return res;
31513
31514 temp /= 1000;
31515 - if (temp < 0)
31516 - return -EINVAL;
31517
31518 mutex_lock(&resource->lock);
31519 resource->trip[attr->index - 7] = temp;
31520 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31521 index 91fdd1f..b66a686 100644
31522 --- a/drivers/hwmon/sht15.c
31523 +++ b/drivers/hwmon/sht15.c
31524 @@ -166,7 +166,7 @@ struct sht15_data {
31525 int supply_uV;
31526 bool supply_uV_valid;
31527 struct work_struct update_supply_work;
31528 - atomic_t interrupt_handled;
31529 + atomic_unchecked_t interrupt_handled;
31530 };
31531
31532 /**
31533 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31534 return ret;
31535
31536 gpio_direction_input(data->pdata->gpio_data);
31537 - atomic_set(&data->interrupt_handled, 0);
31538 + atomic_set_unchecked(&data->interrupt_handled, 0);
31539
31540 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31541 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31542 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31543 /* Only relevant if the interrupt hasn't occurred. */
31544 - if (!atomic_read(&data->interrupt_handled))
31545 + if (!atomic_read_unchecked(&data->interrupt_handled))
31546 schedule_work(&data->read_work);
31547 }
31548 ret = wait_event_timeout(data->wait_queue,
31549 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31550
31551 /* First disable the interrupt */
31552 disable_irq_nosync(irq);
31553 - atomic_inc(&data->interrupt_handled);
31554 + atomic_inc_unchecked(&data->interrupt_handled);
31555 /* Then schedule a reading work struct */
31556 if (data->state != SHT15_READING_NOTHING)
31557 schedule_work(&data->read_work);
31558 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31559 * If not, then start the interrupt again - care here as could
31560 * have gone low in meantime so verify it hasn't!
31561 */
31562 - atomic_set(&data->interrupt_handled, 0);
31563 + atomic_set_unchecked(&data->interrupt_handled, 0);
31564 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31565 /* If still not occurred or another handler has been scheduled */
31566 if (gpio_get_value(data->pdata->gpio_data)
31567 - || atomic_read(&data->interrupt_handled))
31568 + || atomic_read_unchecked(&data->interrupt_handled))
31569 return;
31570 }
31571
31572 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31573 index 378fcb5..5e91fa8 100644
31574 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31575 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31576 @@ -43,7 +43,7 @@
31577 extern struct i2c_adapter amd756_smbus;
31578
31579 static struct i2c_adapter *s4882_adapter;
31580 -static struct i2c_algorithm *s4882_algo;
31581 +static i2c_algorithm_no_const *s4882_algo;
31582
31583 /* Wrapper access functions for multiplexed SMBus */
31584 static DEFINE_MUTEX(amd756_lock);
31585 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31586 index 29015eb..af2d8e9 100644
31587 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31588 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31589 @@ -41,7 +41,7 @@
31590 extern struct i2c_adapter *nforce2_smbus;
31591
31592 static struct i2c_adapter *s4985_adapter;
31593 -static struct i2c_algorithm *s4985_algo;
31594 +static i2c_algorithm_no_const *s4985_algo;
31595
31596 /* Wrapper access functions for multiplexed SMBus */
31597 static DEFINE_MUTEX(nforce2_lock);
31598 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31599 index d7a4833..7fae376 100644
31600 --- a/drivers/i2c/i2c-mux.c
31601 +++ b/drivers/i2c/i2c-mux.c
31602 @@ -28,7 +28,7 @@
31603 /* multiplexer per channel data */
31604 struct i2c_mux_priv {
31605 struct i2c_adapter adap;
31606 - struct i2c_algorithm algo;
31607 + i2c_algorithm_no_const algo;
31608
31609 struct i2c_adapter *parent;
31610 void *mux_dev; /* the mux chip/device */
31611 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31612 index 57d00ca..0145194 100644
31613 --- a/drivers/ide/aec62xx.c
31614 +++ b/drivers/ide/aec62xx.c
31615 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31616 .cable_detect = atp86x_cable_detect,
31617 };
31618
31619 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31620 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31621 { /* 0: AEC6210 */
31622 .name = DRV_NAME,
31623 .init_chipset = init_chipset_aec62xx,
31624 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31625 index 2c8016a..911a27c 100644
31626 --- a/drivers/ide/alim15x3.c
31627 +++ b/drivers/ide/alim15x3.c
31628 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31629 .dma_sff_read_status = ide_dma_sff_read_status,
31630 };
31631
31632 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31633 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31634 .name = DRV_NAME,
31635 .init_chipset = init_chipset_ali15x3,
31636 .init_hwif = init_hwif_ali15x3,
31637 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31638 index 3747b25..56fc995 100644
31639 --- a/drivers/ide/amd74xx.c
31640 +++ b/drivers/ide/amd74xx.c
31641 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31642 .udma_mask = udma, \
31643 }
31644
31645 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31646 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31647 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31648 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31649 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31650 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31651 index 15f0ead..cb43480 100644
31652 --- a/drivers/ide/atiixp.c
31653 +++ b/drivers/ide/atiixp.c
31654 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31655 .cable_detect = atiixp_cable_detect,
31656 };
31657
31658 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31659 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31660 { /* 0: IXP200/300/400/700 */
31661 .name = DRV_NAME,
31662 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31663 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31664 index 5f80312..d1fc438 100644
31665 --- a/drivers/ide/cmd64x.c
31666 +++ b/drivers/ide/cmd64x.c
31667 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31668 .dma_sff_read_status = ide_dma_sff_read_status,
31669 };
31670
31671 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31672 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31673 { /* 0: CMD643 */
31674 .name = DRV_NAME,
31675 .init_chipset = init_chipset_cmd64x,
31676 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31677 index 2c1e5f7..1444762 100644
31678 --- a/drivers/ide/cs5520.c
31679 +++ b/drivers/ide/cs5520.c
31680 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31681 .set_dma_mode = cs5520_set_dma_mode,
31682 };
31683
31684 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31685 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31686 .name = DRV_NAME,
31687 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31688 .port_ops = &cs5520_port_ops,
31689 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31690 index 4dc4eb9..49b40ad 100644
31691 --- a/drivers/ide/cs5530.c
31692 +++ b/drivers/ide/cs5530.c
31693 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31694 .udma_filter = cs5530_udma_filter,
31695 };
31696
31697 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31698 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31699 .name = DRV_NAME,
31700 .init_chipset = init_chipset_cs5530,
31701 .init_hwif = init_hwif_cs5530,
31702 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31703 index 5059faf..18d4c85 100644
31704 --- a/drivers/ide/cs5535.c
31705 +++ b/drivers/ide/cs5535.c
31706 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31707 .cable_detect = cs5535_cable_detect,
31708 };
31709
31710 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31711 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31712 .name = DRV_NAME,
31713 .port_ops = &cs5535_port_ops,
31714 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31715 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31716 index 847553f..3ffb49d 100644
31717 --- a/drivers/ide/cy82c693.c
31718 +++ b/drivers/ide/cy82c693.c
31719 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31720 .set_dma_mode = cy82c693_set_dma_mode,
31721 };
31722
31723 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31724 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31725 .name = DRV_NAME,
31726 .init_iops = init_iops_cy82c693,
31727 .port_ops = &cy82c693_port_ops,
31728 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31729 index 58c51cd..4aec3b8 100644
31730 --- a/drivers/ide/hpt366.c
31731 +++ b/drivers/ide/hpt366.c
31732 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31733 }
31734 };
31735
31736 -static const struct hpt_info hpt36x __devinitdata = {
31737 +static const struct hpt_info hpt36x __devinitconst = {
31738 .chip_name = "HPT36x",
31739 .chip_type = HPT36x,
31740 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31741 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31742 .timings = &hpt36x_timings
31743 };
31744
31745 -static const struct hpt_info hpt370 __devinitdata = {
31746 +static const struct hpt_info hpt370 __devinitconst = {
31747 .chip_name = "HPT370",
31748 .chip_type = HPT370,
31749 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31750 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31751 .timings = &hpt37x_timings
31752 };
31753
31754 -static const struct hpt_info hpt370a __devinitdata = {
31755 +static const struct hpt_info hpt370a __devinitconst = {
31756 .chip_name = "HPT370A",
31757 .chip_type = HPT370A,
31758 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31759 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31760 .timings = &hpt37x_timings
31761 };
31762
31763 -static const struct hpt_info hpt374 __devinitdata = {
31764 +static const struct hpt_info hpt374 __devinitconst = {
31765 .chip_name = "HPT374",
31766 .chip_type = HPT374,
31767 .udma_mask = ATA_UDMA5,
31768 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31769 .timings = &hpt37x_timings
31770 };
31771
31772 -static const struct hpt_info hpt372 __devinitdata = {
31773 +static const struct hpt_info hpt372 __devinitconst = {
31774 .chip_name = "HPT372",
31775 .chip_type = HPT372,
31776 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31777 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31778 .timings = &hpt37x_timings
31779 };
31780
31781 -static const struct hpt_info hpt372a __devinitdata = {
31782 +static const struct hpt_info hpt372a __devinitconst = {
31783 .chip_name = "HPT372A",
31784 .chip_type = HPT372A,
31785 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31786 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31787 .timings = &hpt37x_timings
31788 };
31789
31790 -static const struct hpt_info hpt302 __devinitdata = {
31791 +static const struct hpt_info hpt302 __devinitconst = {
31792 .chip_name = "HPT302",
31793 .chip_type = HPT302,
31794 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31795 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31796 .timings = &hpt37x_timings
31797 };
31798
31799 -static const struct hpt_info hpt371 __devinitdata = {
31800 +static const struct hpt_info hpt371 __devinitconst = {
31801 .chip_name = "HPT371",
31802 .chip_type = HPT371,
31803 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31804 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31805 .timings = &hpt37x_timings
31806 };
31807
31808 -static const struct hpt_info hpt372n __devinitdata = {
31809 +static const struct hpt_info hpt372n __devinitconst = {
31810 .chip_name = "HPT372N",
31811 .chip_type = HPT372N,
31812 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31813 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31814 .timings = &hpt37x_timings
31815 };
31816
31817 -static const struct hpt_info hpt302n __devinitdata = {
31818 +static const struct hpt_info hpt302n __devinitconst = {
31819 .chip_name = "HPT302N",
31820 .chip_type = HPT302N,
31821 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31822 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31823 .timings = &hpt37x_timings
31824 };
31825
31826 -static const struct hpt_info hpt371n __devinitdata = {
31827 +static const struct hpt_info hpt371n __devinitconst = {
31828 .chip_name = "HPT371N",
31829 .chip_type = HPT371N,
31830 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31831 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31832 .dma_sff_read_status = ide_dma_sff_read_status,
31833 };
31834
31835 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31836 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31837 { /* 0: HPT36x */
31838 .name = DRV_NAME,
31839 .init_chipset = init_chipset_hpt366,
31840 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31841 index 8126824..55a2798 100644
31842 --- a/drivers/ide/ide-cd.c
31843 +++ b/drivers/ide/ide-cd.c
31844 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31845 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31846 if ((unsigned long)buf & alignment
31847 || blk_rq_bytes(rq) & q->dma_pad_mask
31848 - || object_is_on_stack(buf))
31849 + || object_starts_on_stack(buf))
31850 drive->dma = 0;
31851 }
31852 }
31853 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31854 index 7f56b73..dab5b67 100644
31855 --- a/drivers/ide/ide-pci-generic.c
31856 +++ b/drivers/ide/ide-pci-generic.c
31857 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31858 .udma_mask = ATA_UDMA6, \
31859 }
31860
31861 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31862 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31863 /* 0: Unknown */
31864 DECLARE_GENERIC_PCI_DEV(0),
31865
31866 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31867 index 560e66d..d5dd180 100644
31868 --- a/drivers/ide/it8172.c
31869 +++ b/drivers/ide/it8172.c
31870 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31871 .set_dma_mode = it8172_set_dma_mode,
31872 };
31873
31874 -static const struct ide_port_info it8172_port_info __devinitdata = {
31875 +static const struct ide_port_info it8172_port_info __devinitconst = {
31876 .name = DRV_NAME,
31877 .port_ops = &it8172_port_ops,
31878 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31879 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31880 index 46816ba..1847aeb 100644
31881 --- a/drivers/ide/it8213.c
31882 +++ b/drivers/ide/it8213.c
31883 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31884 .cable_detect = it8213_cable_detect,
31885 };
31886
31887 -static const struct ide_port_info it8213_chipset __devinitdata = {
31888 +static const struct ide_port_info it8213_chipset __devinitconst = {
31889 .name = DRV_NAME,
31890 .enablebits = { {0x41, 0x80, 0x80} },
31891 .port_ops = &it8213_port_ops,
31892 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31893 index 2e3169f..c5611db 100644
31894 --- a/drivers/ide/it821x.c
31895 +++ b/drivers/ide/it821x.c
31896 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31897 .cable_detect = it821x_cable_detect,
31898 };
31899
31900 -static const struct ide_port_info it821x_chipset __devinitdata = {
31901 +static const struct ide_port_info it821x_chipset __devinitconst = {
31902 .name = DRV_NAME,
31903 .init_chipset = init_chipset_it821x,
31904 .init_hwif = init_hwif_it821x,
31905 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31906 index 74c2c4a..efddd7d 100644
31907 --- a/drivers/ide/jmicron.c
31908 +++ b/drivers/ide/jmicron.c
31909 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31910 .cable_detect = jmicron_cable_detect,
31911 };
31912
31913 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31914 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31915 .name = DRV_NAME,
31916 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31917 .port_ops = &jmicron_port_ops,
31918 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31919 index 95327a2..73f78d8 100644
31920 --- a/drivers/ide/ns87415.c
31921 +++ b/drivers/ide/ns87415.c
31922 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31923 .dma_sff_read_status = superio_dma_sff_read_status,
31924 };
31925
31926 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31927 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31928 .name = DRV_NAME,
31929 .init_hwif = init_hwif_ns87415,
31930 .tp_ops = &ns87415_tp_ops,
31931 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31932 index 1a53a4c..39edc66 100644
31933 --- a/drivers/ide/opti621.c
31934 +++ b/drivers/ide/opti621.c
31935 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31936 .set_pio_mode = opti621_set_pio_mode,
31937 };
31938
31939 -static const struct ide_port_info opti621_chipset __devinitdata = {
31940 +static const struct ide_port_info opti621_chipset __devinitconst = {
31941 .name = DRV_NAME,
31942 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31943 .port_ops = &opti621_port_ops,
31944 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31945 index 9546fe2..2e5ceb6 100644
31946 --- a/drivers/ide/pdc202xx_new.c
31947 +++ b/drivers/ide/pdc202xx_new.c
31948 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31949 .udma_mask = udma, \
31950 }
31951
31952 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31953 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31954 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31955 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31956 };
31957 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31958 index 3a35ec6..5634510 100644
31959 --- a/drivers/ide/pdc202xx_old.c
31960 +++ b/drivers/ide/pdc202xx_old.c
31961 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31962 .max_sectors = sectors, \
31963 }
31964
31965 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31966 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31967 { /* 0: PDC20246 */
31968 .name = DRV_NAME,
31969 .init_chipset = init_chipset_pdc202xx,
31970 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31971 index 1892e81..fe0fd60 100644
31972 --- a/drivers/ide/piix.c
31973 +++ b/drivers/ide/piix.c
31974 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31975 .udma_mask = udma, \
31976 }
31977
31978 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31979 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31980 /* 0: MPIIX */
31981 { /*
31982 * MPIIX actually has only a single IDE channel mapped to
31983 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31984 index a6414a8..c04173e 100644
31985 --- a/drivers/ide/rz1000.c
31986 +++ b/drivers/ide/rz1000.c
31987 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31988 }
31989 }
31990
31991 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31992 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31993 .name = DRV_NAME,
31994 .host_flags = IDE_HFLAG_NO_DMA,
31995 };
31996 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31997 index 356b9b5..d4758eb 100644
31998 --- a/drivers/ide/sc1200.c
31999 +++ b/drivers/ide/sc1200.c
32000 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
32001 .dma_sff_read_status = ide_dma_sff_read_status,
32002 };
32003
32004 -static const struct ide_port_info sc1200_chipset __devinitdata = {
32005 +static const struct ide_port_info sc1200_chipset __devinitconst = {
32006 .name = DRV_NAME,
32007 .port_ops = &sc1200_port_ops,
32008 .dma_ops = &sc1200_dma_ops,
32009 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
32010 index b7f5b0c..9701038 100644
32011 --- a/drivers/ide/scc_pata.c
32012 +++ b/drivers/ide/scc_pata.c
32013 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
32014 .dma_sff_read_status = scc_dma_sff_read_status,
32015 };
32016
32017 -static const struct ide_port_info scc_chipset __devinitdata = {
32018 +static const struct ide_port_info scc_chipset __devinitconst = {
32019 .name = "sccIDE",
32020 .init_iops = init_iops_scc,
32021 .init_dma = scc_init_dma,
32022 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
32023 index 35fb8da..24d72ef 100644
32024 --- a/drivers/ide/serverworks.c
32025 +++ b/drivers/ide/serverworks.c
32026 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
32027 .cable_detect = svwks_cable_detect,
32028 };
32029
32030 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
32031 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
32032 { /* 0: OSB4 */
32033 .name = DRV_NAME,
32034 .init_chipset = init_chipset_svwks,
32035 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
32036 index ddeda44..46f7e30 100644
32037 --- a/drivers/ide/siimage.c
32038 +++ b/drivers/ide/siimage.c
32039 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
32040 .udma_mask = ATA_UDMA6, \
32041 }
32042
32043 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
32044 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
32045 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
32046 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
32047 };
32048 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
32049 index 4a00225..09e61b4 100644
32050 --- a/drivers/ide/sis5513.c
32051 +++ b/drivers/ide/sis5513.c
32052 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
32053 .cable_detect = sis_cable_detect,
32054 };
32055
32056 -static const struct ide_port_info sis5513_chipset __devinitdata = {
32057 +static const struct ide_port_info sis5513_chipset __devinitconst = {
32058 .name = DRV_NAME,
32059 .init_chipset = init_chipset_sis5513,
32060 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
32061 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
32062 index f21dc2a..d051cd2 100644
32063 --- a/drivers/ide/sl82c105.c
32064 +++ b/drivers/ide/sl82c105.c
32065 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
32066 .dma_sff_read_status = ide_dma_sff_read_status,
32067 };
32068
32069 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
32070 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
32071 .name = DRV_NAME,
32072 .init_chipset = init_chipset_sl82c105,
32073 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
32074 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
32075 index 864ffe0..863a5e9 100644
32076 --- a/drivers/ide/slc90e66.c
32077 +++ b/drivers/ide/slc90e66.c
32078 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
32079 .cable_detect = slc90e66_cable_detect,
32080 };
32081
32082 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
32083 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
32084 .name = DRV_NAME,
32085 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
32086 .port_ops = &slc90e66_port_ops,
32087 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
32088 index 4799d5c..1794678 100644
32089 --- a/drivers/ide/tc86c001.c
32090 +++ b/drivers/ide/tc86c001.c
32091 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
32092 .dma_sff_read_status = ide_dma_sff_read_status,
32093 };
32094
32095 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
32096 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
32097 .name = DRV_NAME,
32098 .init_hwif = init_hwif_tc86c001,
32099 .port_ops = &tc86c001_port_ops,
32100 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
32101 index 281c914..55ce1b8 100644
32102 --- a/drivers/ide/triflex.c
32103 +++ b/drivers/ide/triflex.c
32104 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
32105 .set_dma_mode = triflex_set_mode,
32106 };
32107
32108 -static const struct ide_port_info triflex_device __devinitdata = {
32109 +static const struct ide_port_info triflex_device __devinitconst = {
32110 .name = DRV_NAME,
32111 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
32112 .port_ops = &triflex_port_ops,
32113 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
32114 index 4b42ca0..e494a98 100644
32115 --- a/drivers/ide/trm290.c
32116 +++ b/drivers/ide/trm290.c
32117 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
32118 .dma_check = trm290_dma_check,
32119 };
32120
32121 -static const struct ide_port_info trm290_chipset __devinitdata = {
32122 +static const struct ide_port_info trm290_chipset __devinitconst = {
32123 .name = DRV_NAME,
32124 .init_hwif = init_hwif_trm290,
32125 .tp_ops = &trm290_tp_ops,
32126 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
32127 index f46f49c..eb77678 100644
32128 --- a/drivers/ide/via82cxxx.c
32129 +++ b/drivers/ide/via82cxxx.c
32130 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
32131 .cable_detect = via82cxxx_cable_detect,
32132 };
32133
32134 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
32135 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
32136 .name = DRV_NAME,
32137 .init_chipset = init_chipset_via82cxxx,
32138 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
32139 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
32140 index 73d4531..c90cd2d 100644
32141 --- a/drivers/ieee802154/fakehard.c
32142 +++ b/drivers/ieee802154/fakehard.c
32143 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
32144 phy->transmit_power = 0xbf;
32145
32146 dev->netdev_ops = &fake_ops;
32147 - dev->ml_priv = &fake_mlme;
32148 + dev->ml_priv = (void *)&fake_mlme;
32149
32150 priv = netdev_priv(dev);
32151 priv->phy = phy;
32152 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32153 index c889aae..6cf5aa7 100644
32154 --- a/drivers/infiniband/core/cm.c
32155 +++ b/drivers/infiniband/core/cm.c
32156 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
32157
32158 struct cm_counter_group {
32159 struct kobject obj;
32160 - atomic_long_t counter[CM_ATTR_COUNT];
32161 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32162 };
32163
32164 struct cm_counter_attribute {
32165 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
32166 struct ib_mad_send_buf *msg = NULL;
32167 int ret;
32168
32169 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32170 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32171 counter[CM_REQ_COUNTER]);
32172
32173 /* Quick state check to discard duplicate REQs. */
32174 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
32175 if (!cm_id_priv)
32176 return;
32177
32178 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32179 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32180 counter[CM_REP_COUNTER]);
32181 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32182 if (ret)
32183 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
32184 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32185 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32186 spin_unlock_irq(&cm_id_priv->lock);
32187 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32188 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32189 counter[CM_RTU_COUNTER]);
32190 goto out;
32191 }
32192 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
32193 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32194 dreq_msg->local_comm_id);
32195 if (!cm_id_priv) {
32196 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32197 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32198 counter[CM_DREQ_COUNTER]);
32199 cm_issue_drep(work->port, work->mad_recv_wc);
32200 return -EINVAL;
32201 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32202 case IB_CM_MRA_REP_RCVD:
32203 break;
32204 case IB_CM_TIMEWAIT:
32205 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32206 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32207 counter[CM_DREQ_COUNTER]);
32208 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32209 goto unlock;
32210 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32211 cm_free_msg(msg);
32212 goto deref;
32213 case IB_CM_DREQ_RCVD:
32214 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32215 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32216 counter[CM_DREQ_COUNTER]);
32217 goto unlock;
32218 default:
32219 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32220 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32221 cm_id_priv->msg, timeout)) {
32222 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32223 - atomic_long_inc(&work->port->
32224 + atomic_long_inc_unchecked(&work->port->
32225 counter_group[CM_RECV_DUPLICATES].
32226 counter[CM_MRA_COUNTER]);
32227 goto out;
32228 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32229 break;
32230 case IB_CM_MRA_REQ_RCVD:
32231 case IB_CM_MRA_REP_RCVD:
32232 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32233 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32234 counter[CM_MRA_COUNTER]);
32235 /* fall through */
32236 default:
32237 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32238 case IB_CM_LAP_IDLE:
32239 break;
32240 case IB_CM_MRA_LAP_SENT:
32241 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32242 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32243 counter[CM_LAP_COUNTER]);
32244 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32245 goto unlock;
32246 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32247 cm_free_msg(msg);
32248 goto deref;
32249 case IB_CM_LAP_RCVD:
32250 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32251 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32252 counter[CM_LAP_COUNTER]);
32253 goto unlock;
32254 default:
32255 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32256 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32257 if (cur_cm_id_priv) {
32258 spin_unlock_irq(&cm.lock);
32259 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32260 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32261 counter[CM_SIDR_REQ_COUNTER]);
32262 goto out; /* Duplicate message. */
32263 }
32264 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32265 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32266 msg->retries = 1;
32267
32268 - atomic_long_add(1 + msg->retries,
32269 + atomic_long_add_unchecked(1 + msg->retries,
32270 &port->counter_group[CM_XMIT].counter[attr_index]);
32271 if (msg->retries)
32272 - atomic_long_add(msg->retries,
32273 + atomic_long_add_unchecked(msg->retries,
32274 &port->counter_group[CM_XMIT_RETRIES].
32275 counter[attr_index]);
32276
32277 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32278 }
32279
32280 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32281 - atomic_long_inc(&port->counter_group[CM_RECV].
32282 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32283 counter[attr_id - CM_ATTR_ID_OFFSET]);
32284
32285 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32286 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32287 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32288
32289 return sprintf(buf, "%ld\n",
32290 - atomic_long_read(&group->counter[cm_attr->index]));
32291 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32292 }
32293
32294 static const struct sysfs_ops cm_counter_ops = {
32295 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32296 index 176c8f9..2627b62 100644
32297 --- a/drivers/infiniband/core/fmr_pool.c
32298 +++ b/drivers/infiniband/core/fmr_pool.c
32299 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
32300
32301 struct task_struct *thread;
32302
32303 - atomic_t req_ser;
32304 - atomic_t flush_ser;
32305 + atomic_unchecked_t req_ser;
32306 + atomic_unchecked_t flush_ser;
32307
32308 wait_queue_head_t force_wait;
32309 };
32310 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32311 struct ib_fmr_pool *pool = pool_ptr;
32312
32313 do {
32314 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32315 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32316 ib_fmr_batch_release(pool);
32317
32318 - atomic_inc(&pool->flush_ser);
32319 + atomic_inc_unchecked(&pool->flush_ser);
32320 wake_up_interruptible(&pool->force_wait);
32321
32322 if (pool->flush_function)
32323 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32324 }
32325
32326 set_current_state(TASK_INTERRUPTIBLE);
32327 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32328 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32329 !kthread_should_stop())
32330 schedule();
32331 __set_current_state(TASK_RUNNING);
32332 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32333 pool->dirty_watermark = params->dirty_watermark;
32334 pool->dirty_len = 0;
32335 spin_lock_init(&pool->pool_lock);
32336 - atomic_set(&pool->req_ser, 0);
32337 - atomic_set(&pool->flush_ser, 0);
32338 + atomic_set_unchecked(&pool->req_ser, 0);
32339 + atomic_set_unchecked(&pool->flush_ser, 0);
32340 init_waitqueue_head(&pool->force_wait);
32341
32342 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32343 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32344 }
32345 spin_unlock_irq(&pool->pool_lock);
32346
32347 - serial = atomic_inc_return(&pool->req_ser);
32348 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32349 wake_up_process(pool->thread);
32350
32351 if (wait_event_interruptible(pool->force_wait,
32352 - atomic_read(&pool->flush_ser) - serial >= 0))
32353 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32354 return -EINTR;
32355
32356 return 0;
32357 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32358 } else {
32359 list_add_tail(&fmr->list, &pool->dirty_list);
32360 if (++pool->dirty_len >= pool->dirty_watermark) {
32361 - atomic_inc(&pool->req_ser);
32362 + atomic_inc_unchecked(&pool->req_ser);
32363 wake_up_process(pool->thread);
32364 }
32365 }
32366 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32367 index 40c8353..946b0e4 100644
32368 --- a/drivers/infiniband/hw/cxgb4/mem.c
32369 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32370 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32371 int err;
32372 struct fw_ri_tpte tpt;
32373 u32 stag_idx;
32374 - static atomic_t key;
32375 + static atomic_unchecked_t key;
32376
32377 if (c4iw_fatal_error(rdev))
32378 return -EIO;
32379 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32380 &rdev->resource.tpt_fifo_lock);
32381 if (!stag_idx)
32382 return -ENOMEM;
32383 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32384 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32385 }
32386 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32387 __func__, stag_state, type, pdid, stag_idx);
32388 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32389 index 79b3dbc..96e5fcc 100644
32390 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32391 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32392 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32393 struct ib_atomic_eth *ateth;
32394 struct ipath_ack_entry *e;
32395 u64 vaddr;
32396 - atomic64_t *maddr;
32397 + atomic64_unchecked_t *maddr;
32398 u64 sdata;
32399 u32 rkey;
32400 u8 next;
32401 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32402 IB_ACCESS_REMOTE_ATOMIC)))
32403 goto nack_acc_unlck;
32404 /* Perform atomic OP and save result. */
32405 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32406 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32407 sdata = be64_to_cpu(ateth->swap_data);
32408 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32409 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32410 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32411 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32412 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32413 be64_to_cpu(ateth->compare_data),
32414 sdata);
32415 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32416 index 1f95bba..9530f87 100644
32417 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32418 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32419 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32420 unsigned long flags;
32421 struct ib_wc wc;
32422 u64 sdata;
32423 - atomic64_t *maddr;
32424 + atomic64_unchecked_t *maddr;
32425 enum ib_wc_status send_status;
32426
32427 /*
32428 @@ -382,11 +382,11 @@ again:
32429 IB_ACCESS_REMOTE_ATOMIC)))
32430 goto acc_err;
32431 /* Perform atomic OP and save result. */
32432 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32433 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32434 sdata = wqe->wr.wr.atomic.compare_add;
32435 *(u64 *) sqp->s_sge.sge.vaddr =
32436 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32437 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32438 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32439 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32440 sdata, wqe->wr.wr.atomic.swap);
32441 goto send_comp;
32442 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32443 index 7140199..da60063 100644
32444 --- a/drivers/infiniband/hw/nes/nes.c
32445 +++ b/drivers/infiniband/hw/nes/nes.c
32446 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32447 LIST_HEAD(nes_adapter_list);
32448 static LIST_HEAD(nes_dev_list);
32449
32450 -atomic_t qps_destroyed;
32451 +atomic_unchecked_t qps_destroyed;
32452
32453 static unsigned int ee_flsh_adapter;
32454 static unsigned int sysfs_nonidx_addr;
32455 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32456 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32457 struct nes_adapter *nesadapter = nesdev->nesadapter;
32458
32459 - atomic_inc(&qps_destroyed);
32460 + atomic_inc_unchecked(&qps_destroyed);
32461
32462 /* Free the control structures */
32463
32464 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32465 index c438e46..ca30356 100644
32466 --- a/drivers/infiniband/hw/nes/nes.h
32467 +++ b/drivers/infiniband/hw/nes/nes.h
32468 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32469 extern unsigned int wqm_quanta;
32470 extern struct list_head nes_adapter_list;
32471
32472 -extern atomic_t cm_connects;
32473 -extern atomic_t cm_accepts;
32474 -extern atomic_t cm_disconnects;
32475 -extern atomic_t cm_closes;
32476 -extern atomic_t cm_connecteds;
32477 -extern atomic_t cm_connect_reqs;
32478 -extern atomic_t cm_rejects;
32479 -extern atomic_t mod_qp_timouts;
32480 -extern atomic_t qps_created;
32481 -extern atomic_t qps_destroyed;
32482 -extern atomic_t sw_qps_destroyed;
32483 +extern atomic_unchecked_t cm_connects;
32484 +extern atomic_unchecked_t cm_accepts;
32485 +extern atomic_unchecked_t cm_disconnects;
32486 +extern atomic_unchecked_t cm_closes;
32487 +extern atomic_unchecked_t cm_connecteds;
32488 +extern atomic_unchecked_t cm_connect_reqs;
32489 +extern atomic_unchecked_t cm_rejects;
32490 +extern atomic_unchecked_t mod_qp_timouts;
32491 +extern atomic_unchecked_t qps_created;
32492 +extern atomic_unchecked_t qps_destroyed;
32493 +extern atomic_unchecked_t sw_qps_destroyed;
32494 extern u32 mh_detected;
32495 extern u32 mh_pauses_sent;
32496 extern u32 cm_packets_sent;
32497 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32498 extern u32 cm_packets_received;
32499 extern u32 cm_packets_dropped;
32500 extern u32 cm_packets_retrans;
32501 -extern atomic_t cm_listens_created;
32502 -extern atomic_t cm_listens_destroyed;
32503 +extern atomic_unchecked_t cm_listens_created;
32504 +extern atomic_unchecked_t cm_listens_destroyed;
32505 extern u32 cm_backlog_drops;
32506 -extern atomic_t cm_loopbacks;
32507 -extern atomic_t cm_nodes_created;
32508 -extern atomic_t cm_nodes_destroyed;
32509 -extern atomic_t cm_accel_dropped_pkts;
32510 -extern atomic_t cm_resets_recvd;
32511 -extern atomic_t pau_qps_created;
32512 -extern atomic_t pau_qps_destroyed;
32513 +extern atomic_unchecked_t cm_loopbacks;
32514 +extern atomic_unchecked_t cm_nodes_created;
32515 +extern atomic_unchecked_t cm_nodes_destroyed;
32516 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32517 +extern atomic_unchecked_t cm_resets_recvd;
32518 +extern atomic_unchecked_t pau_qps_created;
32519 +extern atomic_unchecked_t pau_qps_destroyed;
32520
32521 extern u32 int_mod_timer_init;
32522 extern u32 int_mod_cq_depth_256;
32523 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32524 index a4972ab..1bcfc31 100644
32525 --- a/drivers/infiniband/hw/nes/nes_cm.c
32526 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32527 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32528 u32 cm_packets_retrans;
32529 u32 cm_packets_created;
32530 u32 cm_packets_received;
32531 -atomic_t cm_listens_created;
32532 -atomic_t cm_listens_destroyed;
32533 +atomic_unchecked_t cm_listens_created;
32534 +atomic_unchecked_t cm_listens_destroyed;
32535 u32 cm_backlog_drops;
32536 -atomic_t cm_loopbacks;
32537 -atomic_t cm_nodes_created;
32538 -atomic_t cm_nodes_destroyed;
32539 -atomic_t cm_accel_dropped_pkts;
32540 -atomic_t cm_resets_recvd;
32541 +atomic_unchecked_t cm_loopbacks;
32542 +atomic_unchecked_t cm_nodes_created;
32543 +atomic_unchecked_t cm_nodes_destroyed;
32544 +atomic_unchecked_t cm_accel_dropped_pkts;
32545 +atomic_unchecked_t cm_resets_recvd;
32546
32547 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32548 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32549 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32550
32551 static struct nes_cm_core *g_cm_core;
32552
32553 -atomic_t cm_connects;
32554 -atomic_t cm_accepts;
32555 -atomic_t cm_disconnects;
32556 -atomic_t cm_closes;
32557 -atomic_t cm_connecteds;
32558 -atomic_t cm_connect_reqs;
32559 -atomic_t cm_rejects;
32560 +atomic_unchecked_t cm_connects;
32561 +atomic_unchecked_t cm_accepts;
32562 +atomic_unchecked_t cm_disconnects;
32563 +atomic_unchecked_t cm_closes;
32564 +atomic_unchecked_t cm_connecteds;
32565 +atomic_unchecked_t cm_connect_reqs;
32566 +atomic_unchecked_t cm_rejects;
32567
32568 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32569 {
32570 @@ -1274,7 +1274,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32571 kfree(listener);
32572 listener = NULL;
32573 ret = 0;
32574 - atomic_inc(&cm_listens_destroyed);
32575 + atomic_inc_unchecked(&cm_listens_destroyed);
32576 } else {
32577 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32578 }
32579 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32580 cm_node->rem_mac);
32581
32582 add_hte_node(cm_core, cm_node);
32583 - atomic_inc(&cm_nodes_created);
32584 + atomic_inc_unchecked(&cm_nodes_created);
32585
32586 return cm_node;
32587 }
32588 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32589 }
32590
32591 atomic_dec(&cm_core->node_cnt);
32592 - atomic_inc(&cm_nodes_destroyed);
32593 + atomic_inc_unchecked(&cm_nodes_destroyed);
32594 nesqp = cm_node->nesqp;
32595 if (nesqp) {
32596 nesqp->cm_node = NULL;
32597 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32598
32599 static void drop_packet(struct sk_buff *skb)
32600 {
32601 - atomic_inc(&cm_accel_dropped_pkts);
32602 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32603 dev_kfree_skb_any(skb);
32604 }
32605
32606 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32607 {
32608
32609 int reset = 0; /* whether to send reset in case of err.. */
32610 - atomic_inc(&cm_resets_recvd);
32611 + atomic_inc_unchecked(&cm_resets_recvd);
32612 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32613 " refcnt=%d\n", cm_node, cm_node->state,
32614 atomic_read(&cm_node->ref_count));
32615 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32616 rem_ref_cm_node(cm_node->cm_core, cm_node);
32617 return NULL;
32618 }
32619 - atomic_inc(&cm_loopbacks);
32620 + atomic_inc_unchecked(&cm_loopbacks);
32621 loopbackremotenode->loopbackpartner = cm_node;
32622 loopbackremotenode->tcp_cntxt.rcv_wscale =
32623 NES_CM_DEFAULT_RCV_WND_SCALE;
32624 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32625 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32626 else {
32627 rem_ref_cm_node(cm_core, cm_node);
32628 - atomic_inc(&cm_accel_dropped_pkts);
32629 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32630 dev_kfree_skb_any(skb);
32631 }
32632 break;
32633 @@ -2881,7 +2881,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32634
32635 if ((cm_id) && (cm_id->event_handler)) {
32636 if (issue_disconn) {
32637 - atomic_inc(&cm_disconnects);
32638 + atomic_inc_unchecked(&cm_disconnects);
32639 cm_event.event = IW_CM_EVENT_DISCONNECT;
32640 cm_event.status = disconn_status;
32641 cm_event.local_addr = cm_id->local_addr;
32642 @@ -2903,7 +2903,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32643 }
32644
32645 if (issue_close) {
32646 - atomic_inc(&cm_closes);
32647 + atomic_inc_unchecked(&cm_closes);
32648 nes_disconnect(nesqp, 1);
32649
32650 cm_id->provider_data = nesqp;
32651 @@ -3039,7 +3039,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32652
32653 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32654 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32655 - atomic_inc(&cm_accepts);
32656 + atomic_inc_unchecked(&cm_accepts);
32657
32658 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32659 netdev_refcnt_read(nesvnic->netdev));
32660 @@ -3241,7 +3241,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32661 struct nes_cm_core *cm_core;
32662 u8 *start_buff;
32663
32664 - atomic_inc(&cm_rejects);
32665 + atomic_inc_unchecked(&cm_rejects);
32666 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32667 loopback = cm_node->loopbackpartner;
32668 cm_core = cm_node->cm_core;
32669 @@ -3301,7 +3301,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32670 ntohl(cm_id->local_addr.sin_addr.s_addr),
32671 ntohs(cm_id->local_addr.sin_port));
32672
32673 - atomic_inc(&cm_connects);
32674 + atomic_inc_unchecked(&cm_connects);
32675 nesqp->active_conn = 1;
32676
32677 /* cache the cm_id in the qp */
32678 @@ -3407,7 +3407,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32679 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32680 return err;
32681 }
32682 - atomic_inc(&cm_listens_created);
32683 + atomic_inc_unchecked(&cm_listens_created);
32684 }
32685
32686 cm_id->add_ref(cm_id);
32687 @@ -3508,7 +3508,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32688
32689 if (nesqp->destroyed)
32690 return;
32691 - atomic_inc(&cm_connecteds);
32692 + atomic_inc_unchecked(&cm_connecteds);
32693 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32694 " local port 0x%04X. jiffies = %lu.\n",
32695 nesqp->hwqp.qp_id,
32696 @@ -3695,7 +3695,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32697
32698 cm_id->add_ref(cm_id);
32699 ret = cm_id->event_handler(cm_id, &cm_event);
32700 - atomic_inc(&cm_closes);
32701 + atomic_inc_unchecked(&cm_closes);
32702 cm_event.event = IW_CM_EVENT_CLOSE;
32703 cm_event.status = 0;
32704 cm_event.provider_data = cm_id->provider_data;
32705 @@ -3731,7 +3731,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32706 return;
32707 cm_id = cm_node->cm_id;
32708
32709 - atomic_inc(&cm_connect_reqs);
32710 + atomic_inc_unchecked(&cm_connect_reqs);
32711 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32712 cm_node, cm_id, jiffies);
32713
32714 @@ -3771,7 +3771,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32715 return;
32716 cm_id = cm_node->cm_id;
32717
32718 - atomic_inc(&cm_connect_reqs);
32719 + atomic_inc_unchecked(&cm_connect_reqs);
32720 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32721 cm_node, cm_id, jiffies);
32722
32723 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32724 index 3ba7be3..c81f6ff 100644
32725 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32726 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32727 @@ -40,8 +40,8 @@
32728 #include "nes.h"
32729 #include "nes_mgt.h"
32730
32731 -atomic_t pau_qps_created;
32732 -atomic_t pau_qps_destroyed;
32733 +atomic_unchecked_t pau_qps_created;
32734 +atomic_unchecked_t pau_qps_destroyed;
32735
32736 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32737 {
32738 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32739 {
32740 struct sk_buff *skb;
32741 unsigned long flags;
32742 - atomic_inc(&pau_qps_destroyed);
32743 + atomic_inc_unchecked(&pau_qps_destroyed);
32744
32745 /* Free packets that have not yet been forwarded */
32746 /* Lock is acquired by skb_dequeue when removing the skb */
32747 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32748 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32749 skb_queue_head_init(&nesqp->pau_list);
32750 spin_lock_init(&nesqp->pau_lock);
32751 - atomic_inc(&pau_qps_created);
32752 + atomic_inc_unchecked(&pau_qps_created);
32753 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32754 }
32755
32756 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32757 index f3a3ecf..57d311d 100644
32758 --- a/drivers/infiniband/hw/nes/nes_nic.c
32759 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32760 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32761 target_stat_values[++index] = mh_detected;
32762 target_stat_values[++index] = mh_pauses_sent;
32763 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32764 - target_stat_values[++index] = atomic_read(&cm_connects);
32765 - target_stat_values[++index] = atomic_read(&cm_accepts);
32766 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32767 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32768 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32769 - target_stat_values[++index] = atomic_read(&cm_rejects);
32770 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32771 - target_stat_values[++index] = atomic_read(&qps_created);
32772 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32773 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32774 - target_stat_values[++index] = atomic_read(&cm_closes);
32775 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32776 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32777 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32778 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32779 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32780 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32781 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32782 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32783 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32784 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32785 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32786 target_stat_values[++index] = cm_packets_sent;
32787 target_stat_values[++index] = cm_packets_bounced;
32788 target_stat_values[++index] = cm_packets_created;
32789 target_stat_values[++index] = cm_packets_received;
32790 target_stat_values[++index] = cm_packets_dropped;
32791 target_stat_values[++index] = cm_packets_retrans;
32792 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32793 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32794 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32795 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32796 target_stat_values[++index] = cm_backlog_drops;
32797 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32798 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32799 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32800 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32801 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32802 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32803 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32804 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32805 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32806 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32807 target_stat_values[++index] = nesadapter->free_4kpbl;
32808 target_stat_values[++index] = nesadapter->free_256pbl;
32809 target_stat_values[++index] = int_mod_timer_init;
32810 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32811 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32812 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32813 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32814 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32815 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32816 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32817 }
32818
32819 /**
32820 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32821 index 0927b5c..ed67986 100644
32822 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32823 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32824 @@ -46,9 +46,9 @@
32825
32826 #include <rdma/ib_umem.h>
32827
32828 -atomic_t mod_qp_timouts;
32829 -atomic_t qps_created;
32830 -atomic_t sw_qps_destroyed;
32831 +atomic_unchecked_t mod_qp_timouts;
32832 +atomic_unchecked_t qps_created;
32833 +atomic_unchecked_t sw_qps_destroyed;
32834
32835 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32836
32837 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32838 if (init_attr->create_flags)
32839 return ERR_PTR(-EINVAL);
32840
32841 - atomic_inc(&qps_created);
32842 + atomic_inc_unchecked(&qps_created);
32843 switch (init_attr->qp_type) {
32844 case IB_QPT_RC:
32845 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32846 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32847 struct iw_cm_event cm_event;
32848 int ret = 0;
32849
32850 - atomic_inc(&sw_qps_destroyed);
32851 + atomic_inc_unchecked(&sw_qps_destroyed);
32852 nesqp->destroyed = 1;
32853
32854 /* Blow away the connection if it exists. */
32855 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32856 index b881bdc..c2e360c 100644
32857 --- a/drivers/infiniband/hw/qib/qib.h
32858 +++ b/drivers/infiniband/hw/qib/qib.h
32859 @@ -51,6 +51,7 @@
32860 #include <linux/completion.h>
32861 #include <linux/kref.h>
32862 #include <linux/sched.h>
32863 +#include <linux/slab.h>
32864
32865 #include "qib_common.h"
32866 #include "qib_verbs.h"
32867 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32868 index c351aa4..e6967c2 100644
32869 --- a/drivers/input/gameport/gameport.c
32870 +++ b/drivers/input/gameport/gameport.c
32871 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32872 */
32873 static void gameport_init_port(struct gameport *gameport)
32874 {
32875 - static atomic_t gameport_no = ATOMIC_INIT(0);
32876 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32877
32878 __module_get(THIS_MODULE);
32879
32880 mutex_init(&gameport->drv_mutex);
32881 device_initialize(&gameport->dev);
32882 dev_set_name(&gameport->dev, "gameport%lu",
32883 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32884 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32885 gameport->dev.bus = &gameport_bus;
32886 gameport->dev.release = gameport_release_port;
32887 if (gameport->parent)
32888 diff --git a/drivers/input/input.c b/drivers/input/input.c
32889 index 1f78c95..3cddc6c 100644
32890 --- a/drivers/input/input.c
32891 +++ b/drivers/input/input.c
32892 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32893 */
32894 int input_register_device(struct input_dev *dev)
32895 {
32896 - static atomic_t input_no = ATOMIC_INIT(0);
32897 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32898 struct input_handler *handler;
32899 const char *path;
32900 int error;
32901 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32902 dev->setkeycode = input_default_setkeycode;
32903
32904 dev_set_name(&dev->dev, "input%ld",
32905 - (unsigned long) atomic_inc_return(&input_no) - 1);
32906 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32907
32908 error = device_add(&dev->dev);
32909 if (error)
32910 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32911 index b8d8611..7a4a04b 100644
32912 --- a/drivers/input/joystick/sidewinder.c
32913 +++ b/drivers/input/joystick/sidewinder.c
32914 @@ -30,6 +30,7 @@
32915 #include <linux/kernel.h>
32916 #include <linux/module.h>
32917 #include <linux/slab.h>
32918 +#include <linux/sched.h>
32919 #include <linux/init.h>
32920 #include <linux/input.h>
32921 #include <linux/gameport.h>
32922 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32923 index fd7a0d5..a4af10c 100644
32924 --- a/drivers/input/joystick/xpad.c
32925 +++ b/drivers/input/joystick/xpad.c
32926 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32927
32928 static int xpad_led_probe(struct usb_xpad *xpad)
32929 {
32930 - static atomic_t led_seq = ATOMIC_INIT(0);
32931 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32932 long led_no;
32933 struct xpad_led *led;
32934 struct led_classdev *led_cdev;
32935 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32936 if (!led)
32937 return -ENOMEM;
32938
32939 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32940 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32941
32942 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32943 led->xpad = xpad;
32944 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32945 index 0110b5a..d3ad144 100644
32946 --- a/drivers/input/mousedev.c
32947 +++ b/drivers/input/mousedev.c
32948 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32949
32950 spin_unlock_irq(&client->packet_lock);
32951
32952 - if (copy_to_user(buffer, data, count))
32953 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32954 return -EFAULT;
32955
32956 return count;
32957 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32958 index ba70058..571d25d 100644
32959 --- a/drivers/input/serio/serio.c
32960 +++ b/drivers/input/serio/serio.c
32961 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
32962 */
32963 static void serio_init_port(struct serio *serio)
32964 {
32965 - static atomic_t serio_no = ATOMIC_INIT(0);
32966 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32967
32968 __module_get(THIS_MODULE);
32969
32970 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
32971 mutex_init(&serio->drv_mutex);
32972 device_initialize(&serio->dev);
32973 dev_set_name(&serio->dev, "serio%ld",
32974 - (long)atomic_inc_return(&serio_no) - 1);
32975 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32976 serio->dev.bus = &serio_bus;
32977 serio->dev.release = serio_release_port;
32978 serio->dev.groups = serio_device_attr_groups;
32979 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32980 index e44933d..9ba484a 100644
32981 --- a/drivers/isdn/capi/capi.c
32982 +++ b/drivers/isdn/capi/capi.c
32983 @@ -83,8 +83,8 @@ struct capiminor {
32984
32985 struct capi20_appl *ap;
32986 u32 ncci;
32987 - atomic_t datahandle;
32988 - atomic_t msgid;
32989 + atomic_unchecked_t datahandle;
32990 + atomic_unchecked_t msgid;
32991
32992 struct tty_port port;
32993 int ttyinstop;
32994 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32995 capimsg_setu16(s, 2, mp->ap->applid);
32996 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32997 capimsg_setu8 (s, 5, CAPI_RESP);
32998 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32999 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
33000 capimsg_setu32(s, 8, mp->ncci);
33001 capimsg_setu16(s, 12, datahandle);
33002 }
33003 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
33004 mp->outbytes -= len;
33005 spin_unlock_bh(&mp->outlock);
33006
33007 - datahandle = atomic_inc_return(&mp->datahandle);
33008 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
33009 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
33010 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33011 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33012 capimsg_setu16(skb->data, 2, mp->ap->applid);
33013 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
33014 capimsg_setu8 (skb->data, 5, CAPI_REQ);
33015 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
33016 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
33017 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
33018 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
33019 capimsg_setu16(skb->data, 16, len); /* Data length */
33020 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
33021 index db621db..825ea1a 100644
33022 --- a/drivers/isdn/gigaset/common.c
33023 +++ b/drivers/isdn/gigaset/common.c
33024 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
33025 cs->commands_pending = 0;
33026 cs->cur_at_seq = 0;
33027 cs->gotfwver = -1;
33028 - cs->open_count = 0;
33029 + local_set(&cs->open_count, 0);
33030 cs->dev = NULL;
33031 cs->tty = NULL;
33032 cs->tty_dev = NULL;
33033 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
33034 index 212efaf..f187c6b 100644
33035 --- a/drivers/isdn/gigaset/gigaset.h
33036 +++ b/drivers/isdn/gigaset/gigaset.h
33037 @@ -35,6 +35,7 @@
33038 #include <linux/tty_driver.h>
33039 #include <linux/list.h>
33040 #include <linux/atomic.h>
33041 +#include <asm/local.h>
33042
33043 #define GIG_VERSION {0, 5, 0, 0}
33044 #define GIG_COMPAT {0, 4, 0, 0}
33045 @@ -433,7 +434,7 @@ struct cardstate {
33046 spinlock_t cmdlock;
33047 unsigned curlen, cmdbytes;
33048
33049 - unsigned open_count;
33050 + local_t open_count;
33051 struct tty_struct *tty;
33052 struct tasklet_struct if_wake_tasklet;
33053 unsigned control_state;
33054 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
33055 index ee0a549..a7c9798 100644
33056 --- a/drivers/isdn/gigaset/interface.c
33057 +++ b/drivers/isdn/gigaset/interface.c
33058 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
33059 }
33060 tty->driver_data = cs;
33061
33062 - ++cs->open_count;
33063 -
33064 - if (cs->open_count == 1) {
33065 + if (local_inc_return(&cs->open_count) == 1) {
33066 spin_lock_irqsave(&cs->lock, flags);
33067 cs->tty = tty;
33068 spin_unlock_irqrestore(&cs->lock, flags);
33069 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
33070
33071 if (!cs->connected)
33072 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33073 - else if (!cs->open_count)
33074 + else if (!local_read(&cs->open_count))
33075 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33076 else {
33077 - if (!--cs->open_count) {
33078 + if (!local_dec_return(&cs->open_count)) {
33079 spin_lock_irqsave(&cs->lock, flags);
33080 cs->tty = NULL;
33081 spin_unlock_irqrestore(&cs->lock, flags);
33082 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
33083 if (!cs->connected) {
33084 gig_dbg(DEBUG_IF, "not connected");
33085 retval = -ENODEV;
33086 - } else if (!cs->open_count)
33087 + } else if (!local_read(&cs->open_count))
33088 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33089 else {
33090 retval = 0;
33091 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
33092 retval = -ENODEV;
33093 goto done;
33094 }
33095 - if (!cs->open_count) {
33096 + if (!local_read(&cs->open_count)) {
33097 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33098 retval = -ENODEV;
33099 goto done;
33100 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
33101 if (!cs->connected) {
33102 gig_dbg(DEBUG_IF, "not connected");
33103 retval = -ENODEV;
33104 - } else if (!cs->open_count)
33105 + } else if (!local_read(&cs->open_count))
33106 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33107 else if (cs->mstate != MS_LOCKED) {
33108 dev_warn(cs->dev, "can't write to unlocked device\n");
33109 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
33110
33111 if (!cs->connected)
33112 gig_dbg(DEBUG_IF, "not connected");
33113 - else if (!cs->open_count)
33114 + else if (!local_read(&cs->open_count))
33115 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33116 else if (cs->mstate != MS_LOCKED)
33117 dev_warn(cs->dev, "can't write to unlocked device\n");
33118 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
33119
33120 if (!cs->connected)
33121 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33122 - else if (!cs->open_count)
33123 + else if (!local_read(&cs->open_count))
33124 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33125 else
33126 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
33127 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
33128
33129 if (!cs->connected)
33130 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33131 - else if (!cs->open_count)
33132 + else if (!local_read(&cs->open_count))
33133 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33134 else
33135 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
33136 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
33137 goto out;
33138 }
33139
33140 - if (!cs->open_count) {
33141 + if (!local_read(&cs->open_count)) {
33142 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33143 goto out;
33144 }
33145 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
33146 index 2a57da59..e7a12ed 100644
33147 --- a/drivers/isdn/hardware/avm/b1.c
33148 +++ b/drivers/isdn/hardware/avm/b1.c
33149 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
33150 }
33151 if (left) {
33152 if (t4file->user) {
33153 - if (copy_from_user(buf, dp, left))
33154 + if (left > sizeof buf || copy_from_user(buf, dp, left))
33155 return -EFAULT;
33156 } else {
33157 memcpy(buf, dp, left);
33158 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
33159 }
33160 if (left) {
33161 if (config->user) {
33162 - if (copy_from_user(buf, dp, left))
33163 + if (left > sizeof buf || copy_from_user(buf, dp, left))
33164 return -EFAULT;
33165 } else {
33166 memcpy(buf, dp, left);
33167 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
33168 index 85784a7..a19ca98 100644
33169 --- a/drivers/isdn/hardware/eicon/divasync.h
33170 +++ b/drivers/isdn/hardware/eicon/divasync.h
33171 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
33172 } diva_didd_add_adapter_t;
33173 typedef struct _diva_didd_remove_adapter {
33174 IDI_CALL p_request;
33175 -} diva_didd_remove_adapter_t;
33176 +} __no_const diva_didd_remove_adapter_t;
33177 typedef struct _diva_didd_read_adapter_array {
33178 void * buffer;
33179 dword length;
33180 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
33181 index a3bd163..8956575 100644
33182 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
33183 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
33184 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
33185 typedef struct _diva_os_idi_adapter_interface {
33186 diva_init_card_proc_t cleanup_adapter_proc;
33187 diva_cmd_card_proc_t cmd_proc;
33188 -} diva_os_idi_adapter_interface_t;
33189 +} __no_const diva_os_idi_adapter_interface_t;
33190
33191 typedef struct _diva_os_xdi_adapter {
33192 struct list_head link;
33193 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
33194 index 1f355bb..43f1fea 100644
33195 --- a/drivers/isdn/icn/icn.c
33196 +++ b/drivers/isdn/icn/icn.c
33197 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
33198 if (count > len)
33199 count = len;
33200 if (user) {
33201 - if (copy_from_user(msg, buf, count))
33202 + if (count > sizeof msg || copy_from_user(msg, buf, count))
33203 return -EFAULT;
33204 } else
33205 memcpy(msg, buf, count);
33206 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
33207 index b5fdcb7..5b6c59f 100644
33208 --- a/drivers/lguest/core.c
33209 +++ b/drivers/lguest/core.c
33210 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
33211 * it's worked so far. The end address needs +1 because __get_vm_area
33212 * allocates an extra guard page, so we need space for that.
33213 */
33214 +
33215 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33216 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33217 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
33218 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33219 +#else
33220 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33221 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
33222 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33223 +#endif
33224 +
33225 if (!switcher_vma) {
33226 err = -ENOMEM;
33227 printk("lguest: could not map switcher pages high\n");
33228 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
33229 * Now the Switcher is mapped at the right address, we can't fail!
33230 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
33231 */
33232 - memcpy(switcher_vma->addr, start_switcher_text,
33233 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
33234 end_switcher_text - start_switcher_text);
33235
33236 printk(KERN_INFO "lguest: mapped switcher at %p\n",
33237 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
33238 index 3980903..ce25c5e 100644
33239 --- a/drivers/lguest/x86/core.c
33240 +++ b/drivers/lguest/x86/core.c
33241 @@ -59,7 +59,7 @@ static struct {
33242 /* Offset from where switcher.S was compiled to where we've copied it */
33243 static unsigned long switcher_offset(void)
33244 {
33245 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
33246 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
33247 }
33248
33249 /* This cpu's struct lguest_pages. */
33250 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
33251 * These copies are pretty cheap, so we do them unconditionally: */
33252 /* Save the current Host top-level page directory.
33253 */
33254 +
33255 +#ifdef CONFIG_PAX_PER_CPU_PGD
33256 + pages->state.host_cr3 = read_cr3();
33257 +#else
33258 pages->state.host_cr3 = __pa(current->mm->pgd);
33259 +#endif
33260 +
33261 /*
33262 * Set up the Guest's page tables to see this CPU's pages (and no
33263 * other CPU's pages).
33264 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
33265 * compiled-in switcher code and the high-mapped copy we just made.
33266 */
33267 for (i = 0; i < IDT_ENTRIES; i++)
33268 - default_idt_entries[i] += switcher_offset();
33269 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33270
33271 /*
33272 * Set up the Switcher's per-cpu areas.
33273 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
33274 * it will be undisturbed when we switch. To change %cs and jump we
33275 * need this structure to feed to Intel's "lcall" instruction.
33276 */
33277 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33278 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33279 lguest_entry.segment = LGUEST_CS;
33280
33281 /*
33282 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33283 index 40634b0..4f5855e 100644
33284 --- a/drivers/lguest/x86/switcher_32.S
33285 +++ b/drivers/lguest/x86/switcher_32.S
33286 @@ -87,6 +87,7 @@
33287 #include <asm/page.h>
33288 #include <asm/segment.h>
33289 #include <asm/lguest.h>
33290 +#include <asm/processor-flags.h>
33291
33292 // We mark the start of the code to copy
33293 // It's placed in .text tho it's never run here
33294 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33295 // Changes type when we load it: damn Intel!
33296 // For after we switch over our page tables
33297 // That entry will be read-only: we'd crash.
33298 +
33299 +#ifdef CONFIG_PAX_KERNEXEC
33300 + mov %cr0, %edx
33301 + xor $X86_CR0_WP, %edx
33302 + mov %edx, %cr0
33303 +#endif
33304 +
33305 movl $(GDT_ENTRY_TSS*8), %edx
33306 ltr %dx
33307
33308 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33309 // Let's clear it again for our return.
33310 // The GDT descriptor of the Host
33311 // Points to the table after two "size" bytes
33312 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33313 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33314 // Clear "used" from type field (byte 5, bit 2)
33315 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33316 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33317 +
33318 +#ifdef CONFIG_PAX_KERNEXEC
33319 + mov %cr0, %eax
33320 + xor $X86_CR0_WP, %eax
33321 + mov %eax, %cr0
33322 +#endif
33323
33324 // Once our page table's switched, the Guest is live!
33325 // The Host fades as we run this final step.
33326 @@ -295,13 +309,12 @@ deliver_to_host:
33327 // I consulted gcc, and it gave
33328 // These instructions, which I gladly credit:
33329 leal (%edx,%ebx,8), %eax
33330 - movzwl (%eax),%edx
33331 - movl 4(%eax), %eax
33332 - xorw %ax, %ax
33333 - orl %eax, %edx
33334 + movl 4(%eax), %edx
33335 + movw (%eax), %dx
33336 // Now the address of the handler's in %edx
33337 // We call it now: its "iret" drops us home.
33338 - jmp *%edx
33339 + ljmp $__KERNEL_CS, $1f
33340 +1: jmp *%edx
33341
33342 // Every interrupt can come to us here
33343 // But we must truly tell each apart.
33344 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33345 index 4daf9e5..b8d1d0f 100644
33346 --- a/drivers/macintosh/macio_asic.c
33347 +++ b/drivers/macintosh/macio_asic.c
33348 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33349 * MacIO is matched against any Apple ID, it's probe() function
33350 * will then decide wether it applies or not
33351 */
33352 -static const struct pci_device_id __devinitdata pci_ids [] = { {
33353 +static const struct pci_device_id __devinitconst pci_ids [] = { {
33354 .vendor = PCI_VENDOR_ID_APPLE,
33355 .device = PCI_ANY_ID,
33356 .subvendor = PCI_ANY_ID,
33357 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33358 index 1ce84ed..0fdd40a 100644
33359 --- a/drivers/md/dm-ioctl.c
33360 +++ b/drivers/md/dm-ioctl.c
33361 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33362 cmd == DM_LIST_VERSIONS_CMD)
33363 return 0;
33364
33365 - if ((cmd == DM_DEV_CREATE_CMD)) {
33366 + if (cmd == DM_DEV_CREATE_CMD) {
33367 if (!*param->name) {
33368 DMWARN("name not supplied when creating device");
33369 return -EINVAL;
33370 diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
33371 index 1f23e04..08d9a20 100644
33372 --- a/drivers/md/dm-log-userspace-transfer.c
33373 +++ b/drivers/md/dm-log-userspace-transfer.c
33374 @@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
33375 {
33376 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
33377
33378 - if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
33379 + if (!capable(CAP_SYS_ADMIN))
33380 return;
33381
33382 spin_lock(&receiving_list_lock);
33383 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33384 index 9bfd057..01180bc 100644
33385 --- a/drivers/md/dm-raid1.c
33386 +++ b/drivers/md/dm-raid1.c
33387 @@ -40,7 +40,7 @@ enum dm_raid1_error {
33388
33389 struct mirror {
33390 struct mirror_set *ms;
33391 - atomic_t error_count;
33392 + atomic_unchecked_t error_count;
33393 unsigned long error_type;
33394 struct dm_dev *dev;
33395 sector_t offset;
33396 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33397 struct mirror *m;
33398
33399 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33400 - if (!atomic_read(&m->error_count))
33401 + if (!atomic_read_unchecked(&m->error_count))
33402 return m;
33403
33404 return NULL;
33405 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33406 * simple way to tell if a device has encountered
33407 * errors.
33408 */
33409 - atomic_inc(&m->error_count);
33410 + atomic_inc_unchecked(&m->error_count);
33411
33412 if (test_and_set_bit(error_type, &m->error_type))
33413 return;
33414 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33415 struct mirror *m = get_default_mirror(ms);
33416
33417 do {
33418 - if (likely(!atomic_read(&m->error_count)))
33419 + if (likely(!atomic_read_unchecked(&m->error_count)))
33420 return m;
33421
33422 if (m-- == ms->mirror)
33423 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33424 {
33425 struct mirror *default_mirror = get_default_mirror(m->ms);
33426
33427 - return !atomic_read(&default_mirror->error_count);
33428 + return !atomic_read_unchecked(&default_mirror->error_count);
33429 }
33430
33431 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33432 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33433 */
33434 if (likely(region_in_sync(ms, region, 1)))
33435 m = choose_mirror(ms, bio->bi_sector);
33436 - else if (m && atomic_read(&m->error_count))
33437 + else if (m && atomic_read_unchecked(&m->error_count))
33438 m = NULL;
33439
33440 if (likely(m))
33441 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33442 }
33443
33444 ms->mirror[mirror].ms = ms;
33445 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33446 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33447 ms->mirror[mirror].error_type = 0;
33448 ms->mirror[mirror].offset = offset;
33449
33450 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
33451 */
33452 static char device_status_char(struct mirror *m)
33453 {
33454 - if (!atomic_read(&(m->error_count)))
33455 + if (!atomic_read_unchecked(&(m->error_count)))
33456 return 'A';
33457
33458 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33459 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33460 index 3d80cf0..b77cc47 100644
33461 --- a/drivers/md/dm-stripe.c
33462 +++ b/drivers/md/dm-stripe.c
33463 @@ -20,7 +20,7 @@ struct stripe {
33464 struct dm_dev *dev;
33465 sector_t physical_start;
33466
33467 - atomic_t error_count;
33468 + atomic_unchecked_t error_count;
33469 };
33470
33471 struct stripe_c {
33472 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33473 kfree(sc);
33474 return r;
33475 }
33476 - atomic_set(&(sc->stripe[i].error_count), 0);
33477 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33478 }
33479
33480 ti->private = sc;
33481 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
33482 DMEMIT("%d ", sc->stripes);
33483 for (i = 0; i < sc->stripes; i++) {
33484 DMEMIT("%s ", sc->stripe[i].dev->name);
33485 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33486 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33487 'D' : 'A';
33488 }
33489 buffer[i] = '\0';
33490 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33491 */
33492 for (i = 0; i < sc->stripes; i++)
33493 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33494 - atomic_inc(&(sc->stripe[i].error_count));
33495 - if (atomic_read(&(sc->stripe[i].error_count)) <
33496 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33497 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33498 DM_IO_ERROR_THRESHOLD)
33499 schedule_work(&sc->trigger_event);
33500 }
33501 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33502 index 63cc542..8d45caf3 100644
33503 --- a/drivers/md/dm-table.c
33504 +++ b/drivers/md/dm-table.c
33505 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33506 if (!dev_size)
33507 return 0;
33508
33509 - if ((start >= dev_size) || (start + len > dev_size)) {
33510 + if ((start >= dev_size) || (len > dev_size - start)) {
33511 DMWARN("%s: %s too small for target: "
33512 "start=%llu, len=%llu, dev_size=%llu",
33513 dm_device_name(ti->table->md), bdevname(bdev, b),
33514 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33515 index 237571a..fb6d19b 100644
33516 --- a/drivers/md/dm-thin-metadata.c
33517 +++ b/drivers/md/dm-thin-metadata.c
33518 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33519
33520 pmd->info.tm = tm;
33521 pmd->info.levels = 2;
33522 - pmd->info.value_type.context = pmd->data_sm;
33523 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33524 pmd->info.value_type.size = sizeof(__le64);
33525 pmd->info.value_type.inc = data_block_inc;
33526 pmd->info.value_type.dec = data_block_dec;
33527 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33528
33529 pmd->bl_info.tm = tm;
33530 pmd->bl_info.levels = 1;
33531 - pmd->bl_info.value_type.context = pmd->data_sm;
33532 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33533 pmd->bl_info.value_type.size = sizeof(__le64);
33534 pmd->bl_info.value_type.inc = data_block_inc;
33535 pmd->bl_info.value_type.dec = data_block_dec;
33536 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33537 index b89c548..2af3ce4 100644
33538 --- a/drivers/md/dm.c
33539 +++ b/drivers/md/dm.c
33540 @@ -176,9 +176,9 @@ struct mapped_device {
33541 /*
33542 * Event handling.
33543 */
33544 - atomic_t event_nr;
33545 + atomic_unchecked_t event_nr;
33546 wait_queue_head_t eventq;
33547 - atomic_t uevent_seq;
33548 + atomic_unchecked_t uevent_seq;
33549 struct list_head uevent_list;
33550 spinlock_t uevent_lock; /* Protect access to uevent_list */
33551
33552 @@ -1844,8 +1844,8 @@ static struct mapped_device *alloc_dev(int minor)
33553 rwlock_init(&md->map_lock);
33554 atomic_set(&md->holders, 1);
33555 atomic_set(&md->open_count, 0);
33556 - atomic_set(&md->event_nr, 0);
33557 - atomic_set(&md->uevent_seq, 0);
33558 + atomic_set_unchecked(&md->event_nr, 0);
33559 + atomic_set_unchecked(&md->uevent_seq, 0);
33560 INIT_LIST_HEAD(&md->uevent_list);
33561 spin_lock_init(&md->uevent_lock);
33562
33563 @@ -1979,7 +1979,7 @@ static void event_callback(void *context)
33564
33565 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33566
33567 - atomic_inc(&md->event_nr);
33568 + atomic_inc_unchecked(&md->event_nr);
33569 wake_up(&md->eventq);
33570 }
33571
33572 @@ -2621,18 +2621,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33573
33574 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33575 {
33576 - return atomic_add_return(1, &md->uevent_seq);
33577 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33578 }
33579
33580 uint32_t dm_get_event_nr(struct mapped_device *md)
33581 {
33582 - return atomic_read(&md->event_nr);
33583 + return atomic_read_unchecked(&md->event_nr);
33584 }
33585
33586 int dm_wait_event(struct mapped_device *md, int event_nr)
33587 {
33588 return wait_event_interruptible(md->eventq,
33589 - (event_nr != atomic_read(&md->event_nr)));
33590 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33591 }
33592
33593 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33594 diff --git a/drivers/md/md.c b/drivers/md/md.c
33595 index 363aaf4..d875264 100644
33596 --- a/drivers/md/md.c
33597 +++ b/drivers/md/md.c
33598 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33599 * start build, activate spare
33600 */
33601 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33602 -static atomic_t md_event_count;
33603 +static atomic_unchecked_t md_event_count;
33604 void md_new_event(struct mddev *mddev)
33605 {
33606 - atomic_inc(&md_event_count);
33607 + atomic_inc_unchecked(&md_event_count);
33608 wake_up(&md_event_waiters);
33609 }
33610 EXPORT_SYMBOL_GPL(md_new_event);
33611 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33612 */
33613 static void md_new_event_inintr(struct mddev *mddev)
33614 {
33615 - atomic_inc(&md_event_count);
33616 + atomic_inc_unchecked(&md_event_count);
33617 wake_up(&md_event_waiters);
33618 }
33619
33620 @@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33621
33622 rdev->preferred_minor = 0xffff;
33623 rdev->data_offset = le64_to_cpu(sb->data_offset);
33624 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33625 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33626
33627 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33628 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33629 @@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33630 else
33631 sb->resync_offset = cpu_to_le64(0);
33632
33633 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33634 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33635
33636 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33637 sb->size = cpu_to_le64(mddev->dev_sectors);
33638 @@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33639 static ssize_t
33640 errors_show(struct md_rdev *rdev, char *page)
33641 {
33642 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33643 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33644 }
33645
33646 static ssize_t
33647 @@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33648 char *e;
33649 unsigned long n = simple_strtoul(buf, &e, 10);
33650 if (*buf && (*e == 0 || *e == '\n')) {
33651 - atomic_set(&rdev->corrected_errors, n);
33652 + atomic_set_unchecked(&rdev->corrected_errors, n);
33653 return len;
33654 }
33655 return -EINVAL;
33656 @@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
33657 rdev->sb_loaded = 0;
33658 rdev->bb_page = NULL;
33659 atomic_set(&rdev->nr_pending, 0);
33660 - atomic_set(&rdev->read_errors, 0);
33661 - atomic_set(&rdev->corrected_errors, 0);
33662 + atomic_set_unchecked(&rdev->read_errors, 0);
33663 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33664
33665 INIT_LIST_HEAD(&rdev->same_set);
33666 init_waitqueue_head(&rdev->blocked_wait);
33667 @@ -6738,7 +6738,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33668
33669 spin_unlock(&pers_lock);
33670 seq_printf(seq, "\n");
33671 - seq->poll_event = atomic_read(&md_event_count);
33672 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33673 return 0;
33674 }
33675 if (v == (void*)2) {
33676 @@ -6830,7 +6830,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33677 chunk_kb ? "KB" : "B");
33678 if (bitmap->file) {
33679 seq_printf(seq, ", file: ");
33680 - seq_path(seq, &bitmap->file->f_path, " \t\n");
33681 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33682 }
33683
33684 seq_printf(seq, "\n");
33685 @@ -6861,7 +6861,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33686 return error;
33687
33688 seq = file->private_data;
33689 - seq->poll_event = atomic_read(&md_event_count);
33690 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33691 return error;
33692 }
33693
33694 @@ -6875,7 +6875,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33695 /* always allow read */
33696 mask = POLLIN | POLLRDNORM;
33697
33698 - if (seq->poll_event != atomic_read(&md_event_count))
33699 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33700 mask |= POLLERR | POLLPRI;
33701 return mask;
33702 }
33703 @@ -6919,7 +6919,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33704 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33705 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33706 (int)part_stat_read(&disk->part0, sectors[1]) -
33707 - atomic_read(&disk->sync_io);
33708 + atomic_read_unchecked(&disk->sync_io);
33709 /* sync IO will cause sync_io to increase before the disk_stats
33710 * as sync_io is counted when a request starts, and
33711 * disk_stats is counted when it completes.
33712 diff --git a/drivers/md/md.h b/drivers/md/md.h
33713 index 44c63df..b795d1a 100644
33714 --- a/drivers/md/md.h
33715 +++ b/drivers/md/md.h
33716 @@ -93,13 +93,13 @@ struct md_rdev {
33717 * only maintained for arrays that
33718 * support hot removal
33719 */
33720 - atomic_t read_errors; /* number of consecutive read errors that
33721 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33722 * we have tried to ignore.
33723 */
33724 struct timespec last_read_error; /* monotonic time since our
33725 * last read error
33726 */
33727 - atomic_t corrected_errors; /* number of corrected read errors,
33728 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33729 * for reporting to userspace and storing
33730 * in superblock.
33731 */
33732 @@ -421,7 +421,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33733
33734 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33735 {
33736 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33737 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33738 }
33739
33740 struct md_personality
33741 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33742 index 50ed53b..4f29d7d 100644
33743 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33744 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33745 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
33746 /*----------------------------------------------------------------*/
33747
33748 struct sm_checker {
33749 - struct dm_space_map sm;
33750 + dm_space_map_no_const sm;
33751
33752 struct count_array old_counts;
33753 struct count_array counts;
33754 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33755 index fc469ba..2d91555 100644
33756 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33757 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33758 @@ -23,7 +23,7 @@
33759 * Space map interface.
33760 */
33761 struct sm_disk {
33762 - struct dm_space_map sm;
33763 + dm_space_map_no_const sm;
33764
33765 struct ll_disk ll;
33766 struct ll_disk old_ll;
33767 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33768 index e89ae5e..062e4c2 100644
33769 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33770 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33771 @@ -43,7 +43,7 @@ struct block_op {
33772 };
33773
33774 struct sm_metadata {
33775 - struct dm_space_map sm;
33776 + dm_space_map_no_const sm;
33777
33778 struct ll_disk ll;
33779 struct ll_disk old_ll;
33780 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33781 index 1cbfc6b..56e1dbb 100644
33782 --- a/drivers/md/persistent-data/dm-space-map.h
33783 +++ b/drivers/md/persistent-data/dm-space-map.h
33784 @@ -60,6 +60,7 @@ struct dm_space_map {
33785 int (*root_size)(struct dm_space_map *sm, size_t *result);
33786 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33787 };
33788 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33789
33790 /*----------------------------------------------------------------*/
33791
33792 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33793 index edc735a..e9b97f1 100644
33794 --- a/drivers/md/raid1.c
33795 +++ b/drivers/md/raid1.c
33796 @@ -1645,7 +1645,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33797 if (r1_sync_page_io(rdev, sect, s,
33798 bio->bi_io_vec[idx].bv_page,
33799 READ) != 0)
33800 - atomic_add(s, &rdev->corrected_errors);
33801 + atomic_add_unchecked(s, &rdev->corrected_errors);
33802 }
33803 sectors -= s;
33804 sect += s;
33805 @@ -1859,7 +1859,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33806 test_bit(In_sync, &rdev->flags)) {
33807 if (r1_sync_page_io(rdev, sect, s,
33808 conf->tmppage, READ)) {
33809 - atomic_add(s, &rdev->corrected_errors);
33810 + atomic_add_unchecked(s, &rdev->corrected_errors);
33811 printk(KERN_INFO
33812 "md/raid1:%s: read error corrected "
33813 "(%d sectors at %llu on %s)\n",
33814 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33815 index 1898389..a3aa617 100644
33816 --- a/drivers/md/raid10.c
33817 +++ b/drivers/md/raid10.c
33818 @@ -1636,7 +1636,7 @@ static void end_sync_read(struct bio *bio, int error)
33819 /* The write handler will notice the lack of
33820 * R10BIO_Uptodate and record any errors etc
33821 */
33822 - atomic_add(r10_bio->sectors,
33823 + atomic_add_unchecked(r10_bio->sectors,
33824 &conf->mirrors[d].rdev->corrected_errors);
33825
33826 /* for reconstruct, we always reschedule after a read.
33827 @@ -1987,7 +1987,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33828 {
33829 struct timespec cur_time_mon;
33830 unsigned long hours_since_last;
33831 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33832 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33833
33834 ktime_get_ts(&cur_time_mon);
33835
33836 @@ -2009,9 +2009,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33837 * overflowing the shift of read_errors by hours_since_last.
33838 */
33839 if (hours_since_last >= 8 * sizeof(read_errors))
33840 - atomic_set(&rdev->read_errors, 0);
33841 + atomic_set_unchecked(&rdev->read_errors, 0);
33842 else
33843 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33844 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33845 }
33846
33847 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33848 @@ -2065,8 +2065,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33849 return;
33850
33851 check_decay_read_errors(mddev, rdev);
33852 - atomic_inc(&rdev->read_errors);
33853 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33854 + atomic_inc_unchecked(&rdev->read_errors);
33855 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33856 char b[BDEVNAME_SIZE];
33857 bdevname(rdev->bdev, b);
33858
33859 @@ -2074,7 +2074,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33860 "md/raid10:%s: %s: Raid device exceeded "
33861 "read_error threshold [cur %d:max %d]\n",
33862 mdname(mddev), b,
33863 - atomic_read(&rdev->read_errors), max_read_errors);
33864 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33865 printk(KERN_NOTICE
33866 "md/raid10:%s: %s: Failing raid device\n",
33867 mdname(mddev), b);
33868 @@ -2223,7 +2223,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33869 (unsigned long long)(
33870 sect + rdev->data_offset),
33871 bdevname(rdev->bdev, b));
33872 - atomic_add(s, &rdev->corrected_errors);
33873 + atomic_add_unchecked(s, &rdev->corrected_errors);
33874 }
33875
33876 rdev_dec_pending(rdev, mddev);
33877 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33878 index d1162e5..c7cd902 100644
33879 --- a/drivers/md/raid5.c
33880 +++ b/drivers/md/raid5.c
33881 @@ -1687,18 +1687,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33882 (unsigned long long)(sh->sector
33883 + rdev->data_offset),
33884 bdevname(rdev->bdev, b));
33885 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33886 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33887 clear_bit(R5_ReadError, &sh->dev[i].flags);
33888 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33889 }
33890 - if (atomic_read(&rdev->read_errors))
33891 - atomic_set(&rdev->read_errors, 0);
33892 + if (atomic_read_unchecked(&rdev->read_errors))
33893 + atomic_set_unchecked(&rdev->read_errors, 0);
33894 } else {
33895 const char *bdn = bdevname(rdev->bdev, b);
33896 int retry = 0;
33897
33898 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33899 - atomic_inc(&rdev->read_errors);
33900 + atomic_inc_unchecked(&rdev->read_errors);
33901 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33902 printk_ratelimited(
33903 KERN_WARNING
33904 @@ -1727,7 +1727,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33905 (unsigned long long)(sh->sector
33906 + rdev->data_offset),
33907 bdn);
33908 - else if (atomic_read(&rdev->read_errors)
33909 + else if (atomic_read_unchecked(&rdev->read_errors)
33910 > conf->max_nr_stripes)
33911 printk(KERN_WARNING
33912 "md/raid:%s: Too many read errors, failing device %s.\n",
33913 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33914 index ce4f858..7bcfb46 100644
33915 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33916 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33917 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
33918 .subvendor = _subvend, .subdevice = _subdev, \
33919 .driver_data = (unsigned long)&_driverdata }
33920
33921 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33922 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33923 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33924 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33925 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33926 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33927 index a7d876f..8c21b61 100644
33928 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33929 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33930 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33931 union {
33932 dmx_ts_cb ts;
33933 dmx_section_cb sec;
33934 - } cb;
33935 + } __no_const cb;
33936
33937 struct dvb_demux *demux;
33938 void *priv;
33939 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33940 index 00a6732..70a682e 100644
33941 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33942 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33943 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33944 const struct dvb_device *template, void *priv, int type)
33945 {
33946 struct dvb_device *dvbdev;
33947 - struct file_operations *dvbdevfops;
33948 + file_operations_no_const *dvbdevfops;
33949 struct device *clsdev;
33950 int minor;
33951 int id;
33952 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33953 index 3940bb0..fb3952a 100644
33954 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33955 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33956 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33957
33958 struct dib0700_adapter_state {
33959 int (*set_param_save) (struct dvb_frontend *);
33960 -};
33961 +} __no_const;
33962
33963 static int dib7070_set_param_override(struct dvb_frontend *fe)
33964 {
33965 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33966 index 451c5a7..649f711 100644
33967 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33968 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33969 @@ -95,7 +95,7 @@ struct su3000_state {
33970
33971 struct s6x0_state {
33972 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33973 -};
33974 +} __no_const;
33975
33976 /* debug */
33977 static int dvb_usb_dw2102_debug;
33978 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33979 index 404f63a..4796533 100644
33980 --- a/drivers/media/dvb/frontends/dib3000.h
33981 +++ b/drivers/media/dvb/frontends/dib3000.h
33982 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33983 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33984 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33985 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33986 -};
33987 +} __no_const;
33988
33989 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33990 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33991 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33992 index 8418c02..8555013 100644
33993 --- a/drivers/media/dvb/ngene/ngene-cards.c
33994 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33995 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
33996
33997 /****************************************************************************/
33998
33999 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
34000 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
34001 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
34002 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
34003 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
34004 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
34005 index 16a089f..ab1667d 100644
34006 --- a/drivers/media/radio/radio-cadet.c
34007 +++ b/drivers/media/radio/radio-cadet.c
34008 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
34009 unsigned char readbuf[RDS_BUFFER];
34010 int i = 0;
34011
34012 + if (count > RDS_BUFFER)
34013 + return -EFAULT;
34014 mutex_lock(&dev->lock);
34015 if (dev->rdsstat == 0) {
34016 dev->rdsstat = 1;
34017 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
34018 index 9cde353..8c6a1c3 100644
34019 --- a/drivers/media/video/au0828/au0828.h
34020 +++ b/drivers/media/video/au0828/au0828.h
34021 @@ -191,7 +191,7 @@ struct au0828_dev {
34022
34023 /* I2C */
34024 struct i2c_adapter i2c_adap;
34025 - struct i2c_algorithm i2c_algo;
34026 + i2c_algorithm_no_const i2c_algo;
34027 struct i2c_client i2c_client;
34028 u32 i2c_rc;
34029
34030 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
34031 index 04bf662..e0ac026 100644
34032 --- a/drivers/media/video/cx88/cx88-alsa.c
34033 +++ b/drivers/media/video/cx88/cx88-alsa.c
34034 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
34035 * Only boards with eeprom and byte 1 at eeprom=1 have it
34036 */
34037
34038 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
34039 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
34040 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34041 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34042 {0, }
34043 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
34044 index 1fb7d5b..3901e77 100644
34045 --- a/drivers/media/video/omap/omap_vout.c
34046 +++ b/drivers/media/video/omap/omap_vout.c
34047 @@ -64,7 +64,6 @@ enum omap_vout_channels {
34048 OMAP_VIDEO2,
34049 };
34050
34051 -static struct videobuf_queue_ops video_vbq_ops;
34052 /* Variables configurable through module params*/
34053 static u32 video1_numbuffers = 3;
34054 static u32 video2_numbuffers = 3;
34055 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
34056 {
34057 struct videobuf_queue *q;
34058 struct omap_vout_device *vout = NULL;
34059 + static struct videobuf_queue_ops video_vbq_ops = {
34060 + .buf_setup = omap_vout_buffer_setup,
34061 + .buf_prepare = omap_vout_buffer_prepare,
34062 + .buf_release = omap_vout_buffer_release,
34063 + .buf_queue = omap_vout_buffer_queue,
34064 + };
34065
34066 vout = video_drvdata(file);
34067 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
34068 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
34069 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
34070
34071 q = &vout->vbq;
34072 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
34073 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
34074 - video_vbq_ops.buf_release = omap_vout_buffer_release;
34075 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
34076 spin_lock_init(&vout->vbq_lock);
34077
34078 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
34079 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34080 index 305e6aa..0143317 100644
34081 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34082 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34083 @@ -196,7 +196,7 @@ struct pvr2_hdw {
34084
34085 /* I2C stuff */
34086 struct i2c_adapter i2c_adap;
34087 - struct i2c_algorithm i2c_algo;
34088 + i2c_algorithm_no_const i2c_algo;
34089 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
34090 int i2c_cx25840_hack_state;
34091 int i2c_linked;
34092 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
34093 index 4ed1c7c2..8f15e13 100644
34094 --- a/drivers/media/video/timblogiw.c
34095 +++ b/drivers/media/video/timblogiw.c
34096 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
34097
34098 /* Platform device functions */
34099
34100 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34101 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
34102 .vidioc_querycap = timblogiw_querycap,
34103 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
34104 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
34105 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34106 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
34107 };
34108
34109 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
34110 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
34111 .owner = THIS_MODULE,
34112 .open = timblogiw_open,
34113 .release = timblogiw_close,
34114 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
34115 index a7dc467..a55c423 100644
34116 --- a/drivers/message/fusion/mptbase.c
34117 +++ b/drivers/message/fusion/mptbase.c
34118 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
34119 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34120 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
34121
34122 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34123 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
34124 +#else
34125 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
34126 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34127 +#endif
34128 +
34129 /*
34130 * Rounding UP to nearest 4-kB boundary here...
34131 */
34132 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
34133 index 551262e..7551198 100644
34134 --- a/drivers/message/fusion/mptsas.c
34135 +++ b/drivers/message/fusion/mptsas.c
34136 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
34137 return 0;
34138 }
34139
34140 +static inline void
34141 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34142 +{
34143 + if (phy_info->port_details) {
34144 + phy_info->port_details->rphy = rphy;
34145 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34146 + ioc->name, rphy));
34147 + }
34148 +
34149 + if (rphy) {
34150 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34151 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34152 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34153 + ioc->name, rphy, rphy->dev.release));
34154 + }
34155 +}
34156 +
34157 /* no mutex */
34158 static void
34159 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34160 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34161 return NULL;
34162 }
34163
34164 -static inline void
34165 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34166 -{
34167 - if (phy_info->port_details) {
34168 - phy_info->port_details->rphy = rphy;
34169 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34170 - ioc->name, rphy));
34171 - }
34172 -
34173 - if (rphy) {
34174 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34175 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34176 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34177 - ioc->name, rphy, rphy->dev.release));
34178 - }
34179 -}
34180 -
34181 static inline struct sas_port *
34182 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34183 {
34184 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34185 index 0c3ced7..1fe34ec 100644
34186 --- a/drivers/message/fusion/mptscsih.c
34187 +++ b/drivers/message/fusion/mptscsih.c
34188 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34189
34190 h = shost_priv(SChost);
34191
34192 - if (h) {
34193 - if (h->info_kbuf == NULL)
34194 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34195 - return h->info_kbuf;
34196 - h->info_kbuf[0] = '\0';
34197 + if (!h)
34198 + return NULL;
34199
34200 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34201 - h->info_kbuf[size-1] = '\0';
34202 - }
34203 + if (h->info_kbuf == NULL)
34204 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34205 + return h->info_kbuf;
34206 + h->info_kbuf[0] = '\0';
34207 +
34208 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34209 + h->info_kbuf[size-1] = '\0';
34210
34211 return h->info_kbuf;
34212 }
34213 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34214 index 6d115c7..58ff7fd 100644
34215 --- a/drivers/message/i2o/i2o_proc.c
34216 +++ b/drivers/message/i2o/i2o_proc.c
34217 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34218 "Array Controller Device"
34219 };
34220
34221 -static char *chtostr(u8 * chars, int n)
34222 -{
34223 - char tmp[256];
34224 - tmp[0] = 0;
34225 - return strncat(tmp, (char *)chars, n);
34226 -}
34227 -
34228 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34229 char *group)
34230 {
34231 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34232
34233 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34234 seq_printf(seq, "%-#8x", ddm_table.module_id);
34235 - seq_printf(seq, "%-29s",
34236 - chtostr(ddm_table.module_name_version, 28));
34237 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34238 seq_printf(seq, "%9d ", ddm_table.data_size);
34239 seq_printf(seq, "%8d", ddm_table.code_size);
34240
34241 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34242
34243 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34244 seq_printf(seq, "%-#8x", dst->module_id);
34245 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34246 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34247 + seq_printf(seq, "%-.28s", dst->module_name_version);
34248 + seq_printf(seq, "%-.8s", dst->date);
34249 seq_printf(seq, "%8d ", dst->module_size);
34250 seq_printf(seq, "%8d ", dst->mpb_size);
34251 seq_printf(seq, "0x%04x", dst->module_flags);
34252 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34253 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34254 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34255 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34256 - seq_printf(seq, "Vendor info : %s\n",
34257 - chtostr((u8 *) (work32 + 2), 16));
34258 - seq_printf(seq, "Product info : %s\n",
34259 - chtostr((u8 *) (work32 + 6), 16));
34260 - seq_printf(seq, "Description : %s\n",
34261 - chtostr((u8 *) (work32 + 10), 16));
34262 - seq_printf(seq, "Product rev. : %s\n",
34263 - chtostr((u8 *) (work32 + 14), 8));
34264 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34265 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34266 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34267 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34268
34269 seq_printf(seq, "Serial number : ");
34270 print_serial_number(seq, (u8 *) (work32 + 16),
34271 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34272 }
34273
34274 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34275 - seq_printf(seq, "Module name : %s\n",
34276 - chtostr(result.module_name, 24));
34277 - seq_printf(seq, "Module revision : %s\n",
34278 - chtostr(result.module_rev, 8));
34279 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
34280 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34281
34282 seq_printf(seq, "Serial number : ");
34283 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34284 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34285 return 0;
34286 }
34287
34288 - seq_printf(seq, "Device name : %s\n",
34289 - chtostr(result.device_name, 64));
34290 - seq_printf(seq, "Service name : %s\n",
34291 - chtostr(result.service_name, 64));
34292 - seq_printf(seq, "Physical name : %s\n",
34293 - chtostr(result.physical_location, 64));
34294 - seq_printf(seq, "Instance number : %s\n",
34295 - chtostr(result.instance_number, 4));
34296 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
34297 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
34298 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34299 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34300
34301 return 0;
34302 }
34303 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34304 index a8c08f3..155fe3d 100644
34305 --- a/drivers/message/i2o/iop.c
34306 +++ b/drivers/message/i2o/iop.c
34307 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34308
34309 spin_lock_irqsave(&c->context_list_lock, flags);
34310
34311 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34312 - atomic_inc(&c->context_list_counter);
34313 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34314 + atomic_inc_unchecked(&c->context_list_counter);
34315
34316 - entry->context = atomic_read(&c->context_list_counter);
34317 + entry->context = atomic_read_unchecked(&c->context_list_counter);
34318
34319 list_add(&entry->list, &c->context_list);
34320
34321 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34322
34323 #if BITS_PER_LONG == 64
34324 spin_lock_init(&c->context_list_lock);
34325 - atomic_set(&c->context_list_counter, 0);
34326 + atomic_set_unchecked(&c->context_list_counter, 0);
34327 INIT_LIST_HEAD(&c->context_list);
34328 #endif
34329
34330 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34331 index 7ce65f4..e66e9bc 100644
34332 --- a/drivers/mfd/abx500-core.c
34333 +++ b/drivers/mfd/abx500-core.c
34334 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34335
34336 struct abx500_device_entry {
34337 struct list_head list;
34338 - struct abx500_ops ops;
34339 + abx500_ops_no_const ops;
34340 struct device *dev;
34341 };
34342
34343 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34344 index a9223ed..4127b13 100644
34345 --- a/drivers/mfd/janz-cmodio.c
34346 +++ b/drivers/mfd/janz-cmodio.c
34347 @@ -13,6 +13,7 @@
34348
34349 #include <linux/kernel.h>
34350 #include <linux/module.h>
34351 +#include <linux/slab.h>
34352 #include <linux/init.h>
34353 #include <linux/pci.h>
34354 #include <linux/interrupt.h>
34355 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34356 index a981e2a..5ca0c8b 100644
34357 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
34358 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34359 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34360 * the lid is closed. This leads to interrupts as soon as a little move
34361 * is done.
34362 */
34363 - atomic_inc(&lis3->count);
34364 + atomic_inc_unchecked(&lis3->count);
34365
34366 wake_up_interruptible(&lis3->misc_wait);
34367 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34368 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34369 if (lis3->pm_dev)
34370 pm_runtime_get_sync(lis3->pm_dev);
34371
34372 - atomic_set(&lis3->count, 0);
34373 + atomic_set_unchecked(&lis3->count, 0);
34374 return 0;
34375 }
34376
34377 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34378 add_wait_queue(&lis3->misc_wait, &wait);
34379 while (true) {
34380 set_current_state(TASK_INTERRUPTIBLE);
34381 - data = atomic_xchg(&lis3->count, 0);
34382 + data = atomic_xchg_unchecked(&lis3->count, 0);
34383 if (data)
34384 break;
34385
34386 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34387 struct lis3lv02d, miscdev);
34388
34389 poll_wait(file, &lis3->misc_wait, wait);
34390 - if (atomic_read(&lis3->count))
34391 + if (atomic_read_unchecked(&lis3->count))
34392 return POLLIN | POLLRDNORM;
34393 return 0;
34394 }
34395 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34396 index 2b1482a..5d33616 100644
34397 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
34398 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34399 @@ -266,7 +266,7 @@ struct lis3lv02d {
34400 struct input_polled_dev *idev; /* input device */
34401 struct platform_device *pdev; /* platform device */
34402 struct regulator_bulk_data regulators[2];
34403 - atomic_t count; /* interrupt count after last read */
34404 + atomic_unchecked_t count; /* interrupt count after last read */
34405 union axis_conversion ac; /* hw -> logical axis */
34406 int mapped_btns[3];
34407
34408 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34409 index 2f30bad..c4c13d0 100644
34410 --- a/drivers/misc/sgi-gru/gruhandles.c
34411 +++ b/drivers/misc/sgi-gru/gruhandles.c
34412 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34413 unsigned long nsec;
34414
34415 nsec = CLKS2NSEC(clks);
34416 - atomic_long_inc(&mcs_op_statistics[op].count);
34417 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34418 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34419 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34420 if (mcs_op_statistics[op].max < nsec)
34421 mcs_op_statistics[op].max = nsec;
34422 }
34423 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34424 index 950dbe9..eeef0f8 100644
34425 --- a/drivers/misc/sgi-gru/gruprocfs.c
34426 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34427 @@ -32,9 +32,9 @@
34428
34429 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34430
34431 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34432 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34433 {
34434 - unsigned long val = atomic_long_read(v);
34435 + unsigned long val = atomic_long_read_unchecked(v);
34436
34437 seq_printf(s, "%16lu %s\n", val, id);
34438 }
34439 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34440
34441 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34442 for (op = 0; op < mcsop_last; op++) {
34443 - count = atomic_long_read(&mcs_op_statistics[op].count);
34444 - total = atomic_long_read(&mcs_op_statistics[op].total);
34445 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34446 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34447 max = mcs_op_statistics[op].max;
34448 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34449 count ? total / count : 0, max);
34450 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34451 index 5c3ce24..4915ccb 100644
34452 --- a/drivers/misc/sgi-gru/grutables.h
34453 +++ b/drivers/misc/sgi-gru/grutables.h
34454 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34455 * GRU statistics.
34456 */
34457 struct gru_stats_s {
34458 - atomic_long_t vdata_alloc;
34459 - atomic_long_t vdata_free;
34460 - atomic_long_t gts_alloc;
34461 - atomic_long_t gts_free;
34462 - atomic_long_t gms_alloc;
34463 - atomic_long_t gms_free;
34464 - atomic_long_t gts_double_allocate;
34465 - atomic_long_t assign_context;
34466 - atomic_long_t assign_context_failed;
34467 - atomic_long_t free_context;
34468 - atomic_long_t load_user_context;
34469 - atomic_long_t load_kernel_context;
34470 - atomic_long_t lock_kernel_context;
34471 - atomic_long_t unlock_kernel_context;
34472 - atomic_long_t steal_user_context;
34473 - atomic_long_t steal_kernel_context;
34474 - atomic_long_t steal_context_failed;
34475 - atomic_long_t nopfn;
34476 - atomic_long_t asid_new;
34477 - atomic_long_t asid_next;
34478 - atomic_long_t asid_wrap;
34479 - atomic_long_t asid_reuse;
34480 - atomic_long_t intr;
34481 - atomic_long_t intr_cbr;
34482 - atomic_long_t intr_tfh;
34483 - atomic_long_t intr_spurious;
34484 - atomic_long_t intr_mm_lock_failed;
34485 - atomic_long_t call_os;
34486 - atomic_long_t call_os_wait_queue;
34487 - atomic_long_t user_flush_tlb;
34488 - atomic_long_t user_unload_context;
34489 - atomic_long_t user_exception;
34490 - atomic_long_t set_context_option;
34491 - atomic_long_t check_context_retarget_intr;
34492 - atomic_long_t check_context_unload;
34493 - atomic_long_t tlb_dropin;
34494 - atomic_long_t tlb_preload_page;
34495 - atomic_long_t tlb_dropin_fail_no_asid;
34496 - atomic_long_t tlb_dropin_fail_upm;
34497 - atomic_long_t tlb_dropin_fail_invalid;
34498 - atomic_long_t tlb_dropin_fail_range_active;
34499 - atomic_long_t tlb_dropin_fail_idle;
34500 - atomic_long_t tlb_dropin_fail_fmm;
34501 - atomic_long_t tlb_dropin_fail_no_exception;
34502 - atomic_long_t tfh_stale_on_fault;
34503 - atomic_long_t mmu_invalidate_range;
34504 - atomic_long_t mmu_invalidate_page;
34505 - atomic_long_t flush_tlb;
34506 - atomic_long_t flush_tlb_gru;
34507 - atomic_long_t flush_tlb_gru_tgh;
34508 - atomic_long_t flush_tlb_gru_zero_asid;
34509 + atomic_long_unchecked_t vdata_alloc;
34510 + atomic_long_unchecked_t vdata_free;
34511 + atomic_long_unchecked_t gts_alloc;
34512 + atomic_long_unchecked_t gts_free;
34513 + atomic_long_unchecked_t gms_alloc;
34514 + atomic_long_unchecked_t gms_free;
34515 + atomic_long_unchecked_t gts_double_allocate;
34516 + atomic_long_unchecked_t assign_context;
34517 + atomic_long_unchecked_t assign_context_failed;
34518 + atomic_long_unchecked_t free_context;
34519 + atomic_long_unchecked_t load_user_context;
34520 + atomic_long_unchecked_t load_kernel_context;
34521 + atomic_long_unchecked_t lock_kernel_context;
34522 + atomic_long_unchecked_t unlock_kernel_context;
34523 + atomic_long_unchecked_t steal_user_context;
34524 + atomic_long_unchecked_t steal_kernel_context;
34525 + atomic_long_unchecked_t steal_context_failed;
34526 + atomic_long_unchecked_t nopfn;
34527 + atomic_long_unchecked_t asid_new;
34528 + atomic_long_unchecked_t asid_next;
34529 + atomic_long_unchecked_t asid_wrap;
34530 + atomic_long_unchecked_t asid_reuse;
34531 + atomic_long_unchecked_t intr;
34532 + atomic_long_unchecked_t intr_cbr;
34533 + atomic_long_unchecked_t intr_tfh;
34534 + atomic_long_unchecked_t intr_spurious;
34535 + atomic_long_unchecked_t intr_mm_lock_failed;
34536 + atomic_long_unchecked_t call_os;
34537 + atomic_long_unchecked_t call_os_wait_queue;
34538 + atomic_long_unchecked_t user_flush_tlb;
34539 + atomic_long_unchecked_t user_unload_context;
34540 + atomic_long_unchecked_t user_exception;
34541 + atomic_long_unchecked_t set_context_option;
34542 + atomic_long_unchecked_t check_context_retarget_intr;
34543 + atomic_long_unchecked_t check_context_unload;
34544 + atomic_long_unchecked_t tlb_dropin;
34545 + atomic_long_unchecked_t tlb_preload_page;
34546 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34547 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34548 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34549 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34550 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34551 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34552 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34553 + atomic_long_unchecked_t tfh_stale_on_fault;
34554 + atomic_long_unchecked_t mmu_invalidate_range;
34555 + atomic_long_unchecked_t mmu_invalidate_page;
34556 + atomic_long_unchecked_t flush_tlb;
34557 + atomic_long_unchecked_t flush_tlb_gru;
34558 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34559 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34560
34561 - atomic_long_t copy_gpa;
34562 - atomic_long_t read_gpa;
34563 + atomic_long_unchecked_t copy_gpa;
34564 + atomic_long_unchecked_t read_gpa;
34565
34566 - atomic_long_t mesq_receive;
34567 - atomic_long_t mesq_receive_none;
34568 - atomic_long_t mesq_send;
34569 - atomic_long_t mesq_send_failed;
34570 - atomic_long_t mesq_noop;
34571 - atomic_long_t mesq_send_unexpected_error;
34572 - atomic_long_t mesq_send_lb_overflow;
34573 - atomic_long_t mesq_send_qlimit_reached;
34574 - atomic_long_t mesq_send_amo_nacked;
34575 - atomic_long_t mesq_send_put_nacked;
34576 - atomic_long_t mesq_page_overflow;
34577 - atomic_long_t mesq_qf_locked;
34578 - atomic_long_t mesq_qf_noop_not_full;
34579 - atomic_long_t mesq_qf_switch_head_failed;
34580 - atomic_long_t mesq_qf_unexpected_error;
34581 - atomic_long_t mesq_noop_unexpected_error;
34582 - atomic_long_t mesq_noop_lb_overflow;
34583 - atomic_long_t mesq_noop_qlimit_reached;
34584 - atomic_long_t mesq_noop_amo_nacked;
34585 - atomic_long_t mesq_noop_put_nacked;
34586 - atomic_long_t mesq_noop_page_overflow;
34587 + atomic_long_unchecked_t mesq_receive;
34588 + atomic_long_unchecked_t mesq_receive_none;
34589 + atomic_long_unchecked_t mesq_send;
34590 + atomic_long_unchecked_t mesq_send_failed;
34591 + atomic_long_unchecked_t mesq_noop;
34592 + atomic_long_unchecked_t mesq_send_unexpected_error;
34593 + atomic_long_unchecked_t mesq_send_lb_overflow;
34594 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34595 + atomic_long_unchecked_t mesq_send_amo_nacked;
34596 + atomic_long_unchecked_t mesq_send_put_nacked;
34597 + atomic_long_unchecked_t mesq_page_overflow;
34598 + atomic_long_unchecked_t mesq_qf_locked;
34599 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34600 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34601 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34602 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34603 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34604 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34605 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34606 + atomic_long_unchecked_t mesq_noop_put_nacked;
34607 + atomic_long_unchecked_t mesq_noop_page_overflow;
34608
34609 };
34610
34611 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34612 tghop_invalidate, mcsop_last};
34613
34614 struct mcs_op_statistic {
34615 - atomic_long_t count;
34616 - atomic_long_t total;
34617 + atomic_long_unchecked_t count;
34618 + atomic_long_unchecked_t total;
34619 unsigned long max;
34620 };
34621
34622 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34623
34624 #define STAT(id) do { \
34625 if (gru_options & OPT_STATS) \
34626 - atomic_long_inc(&gru_stats.id); \
34627 + atomic_long_inc_unchecked(&gru_stats.id); \
34628 } while (0)
34629
34630 #ifdef CONFIG_SGI_GRU_DEBUG
34631 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34632 index 851b2f2..a4ec097 100644
34633 --- a/drivers/misc/sgi-xp/xp.h
34634 +++ b/drivers/misc/sgi-xp/xp.h
34635 @@ -289,7 +289,7 @@ struct xpc_interface {
34636 xpc_notify_func, void *);
34637 void (*received) (short, int, void *);
34638 enum xp_retval (*partid_to_nasids) (short, void *);
34639 -};
34640 +} __no_const;
34641
34642 extern struct xpc_interface xpc_interface;
34643
34644 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34645 index b94d5f7..7f494c5 100644
34646 --- a/drivers/misc/sgi-xp/xpc.h
34647 +++ b/drivers/misc/sgi-xp/xpc.h
34648 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34649 void (*received_payload) (struct xpc_channel *, void *);
34650 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34651 };
34652 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34653
34654 /* struct xpc_partition act_state values (for XPC HB) */
34655
34656 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34657 /* found in xpc_main.c */
34658 extern struct device *xpc_part;
34659 extern struct device *xpc_chan;
34660 -extern struct xpc_arch_operations xpc_arch_ops;
34661 +extern xpc_arch_operations_no_const xpc_arch_ops;
34662 extern int xpc_disengage_timelimit;
34663 extern int xpc_disengage_timedout;
34664 extern int xpc_activate_IRQ_rcvd;
34665 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34666 index 8d082b4..aa749ae 100644
34667 --- a/drivers/misc/sgi-xp/xpc_main.c
34668 +++ b/drivers/misc/sgi-xp/xpc_main.c
34669 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34670 .notifier_call = xpc_system_die,
34671 };
34672
34673 -struct xpc_arch_operations xpc_arch_ops;
34674 +xpc_arch_operations_no_const xpc_arch_ops;
34675
34676 /*
34677 * Timer function to enforce the timelimit on the partition disengage.
34678 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34679 index 6ebdc40..9edf5d8 100644
34680 --- a/drivers/mmc/host/sdhci-pci.c
34681 +++ b/drivers/mmc/host/sdhci-pci.c
34682 @@ -631,7 +631,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34683 .probe = via_probe,
34684 };
34685
34686 -static const struct pci_device_id pci_ids[] __devinitdata = {
34687 +static const struct pci_device_id pci_ids[] __devinitconst = {
34688 {
34689 .vendor = PCI_VENDOR_ID_RICOH,
34690 .device = PCI_DEVICE_ID_RICOH_R5C822,
34691 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34692 index 87a431c..4959b43 100644
34693 --- a/drivers/mtd/devices/doc2000.c
34694 +++ b/drivers/mtd/devices/doc2000.c
34695 @@ -764,7 +764,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34696
34697 /* The ECC will not be calculated correctly if less than 512 is written */
34698 /* DBB-
34699 - if (len != 0x200 && eccbuf)
34700 + if (len != 0x200)
34701 printk(KERN_WARNING
34702 "ECC needs a full sector write (adr: %lx size %lx)\n",
34703 (long) to, (long) len);
34704 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
34705 index 9eacf67..4534b5b 100644
34706 --- a/drivers/mtd/devices/doc2001.c
34707 +++ b/drivers/mtd/devices/doc2001.c
34708 @@ -384,7 +384,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
34709 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
34710
34711 /* Don't allow read past end of device */
34712 - if (from >= this->totlen)
34713 + if (from >= this->totlen || !len)
34714 return -EINVAL;
34715
34716 /* Don't allow a single read to cross a 512-byte block boundary */
34717 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34718 index 3984d48..28aa897 100644
34719 --- a/drivers/mtd/nand/denali.c
34720 +++ b/drivers/mtd/nand/denali.c
34721 @@ -26,6 +26,7 @@
34722 #include <linux/pci.h>
34723 #include <linux/mtd/mtd.h>
34724 #include <linux/module.h>
34725 +#include <linux/slab.h>
34726
34727 #include "denali.h"
34728
34729 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34730 index 51b9d6a..52af9a7 100644
34731 --- a/drivers/mtd/nftlmount.c
34732 +++ b/drivers/mtd/nftlmount.c
34733 @@ -24,6 +24,7 @@
34734 #include <asm/errno.h>
34735 #include <linux/delay.h>
34736 #include <linux/slab.h>
34737 +#include <linux/sched.h>
34738 #include <linux/mtd/mtd.h>
34739 #include <linux/mtd/nand.h>
34740 #include <linux/mtd/nftl.h>
34741 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34742 index 071f4c8..440862e 100644
34743 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34744 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34745 @@ -2862,7 +2862,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34746 */
34747
34748 #define ATL2_PARAM(X, desc) \
34749 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34750 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34751 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34752 MODULE_PARM_DESC(X, desc);
34753 #else
34754 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34755 index 66da39f..5dc436d 100644
34756 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34757 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34758 @@ -473,7 +473,7 @@ struct bnx2x_rx_mode_obj {
34759
34760 int (*wait_comp)(struct bnx2x *bp,
34761 struct bnx2x_rx_mode_ramrod_params *p);
34762 -};
34763 +} __no_const;
34764
34765 /********************** Set multicast group ***********************************/
34766
34767 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34768 index aea8f72..fcebf75 100644
34769 --- a/drivers/net/ethernet/broadcom/tg3.h
34770 +++ b/drivers/net/ethernet/broadcom/tg3.h
34771 @@ -140,6 +140,7 @@
34772 #define CHIPREV_ID_5750_A0 0x4000
34773 #define CHIPREV_ID_5750_A1 0x4001
34774 #define CHIPREV_ID_5750_A3 0x4003
34775 +#define CHIPREV_ID_5750_C1 0x4201
34776 #define CHIPREV_ID_5750_C2 0x4202
34777 #define CHIPREV_ID_5752_A0_HW 0x5000
34778 #define CHIPREV_ID_5752_A0 0x6000
34779 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34780 index c4e8643..0979484 100644
34781 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34782 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34783 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34784 */
34785 struct l2t_skb_cb {
34786 arp_failure_handler_func arp_failure_handler;
34787 -};
34788 +} __no_const;
34789
34790 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34791
34792 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34793 index 4d71f5a..8004440 100644
34794 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34795 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34796 @@ -5392,7 +5392,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34797 for (i=0; i<ETH_ALEN; i++) {
34798 tmp.addr[i] = dev->dev_addr[i];
34799 }
34800 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34801 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34802 break;
34803
34804 case DE4X5_SET_HWADDR: /* Set the hardware address */
34805 @@ -5432,7 +5432,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34806 spin_lock_irqsave(&lp->lock, flags);
34807 memcpy(&statbuf, &lp->pktStats, ioc->len);
34808 spin_unlock_irqrestore(&lp->lock, flags);
34809 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34810 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34811 return -EFAULT;
34812 break;
34813 }
34814 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34815 index 14d5b61..1398636 100644
34816 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34817 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34818 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34819 {NULL}};
34820
34821
34822 -static const char *block_name[] __devinitdata = {
34823 +static const char *block_name[] __devinitconst = {
34824 "21140 non-MII",
34825 "21140 MII PHY",
34826 "21142 Serial PHY",
34827 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34828 index 52da7b2..4ddfe1c 100644
34829 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34830 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34831 @@ -236,7 +236,7 @@ struct pci_id_info {
34832 int drv_flags; /* Driver use, intended as capability flags. */
34833 };
34834
34835 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34836 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34837 { /* Sometime a Level-One switch card. */
34838 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34839 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34840 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34841 index 28a3a9b..d96cb63 100644
34842 --- a/drivers/net/ethernet/dlink/sundance.c
34843 +++ b/drivers/net/ethernet/dlink/sundance.c
34844 @@ -218,7 +218,7 @@ enum {
34845 struct pci_id_info {
34846 const char *name;
34847 };
34848 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34849 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34850 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34851 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34852 {"D-Link DFE-580TX 4 port Server Adapter"},
34853 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34854 index e703d64..d62ecf9 100644
34855 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34856 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34857 @@ -402,7 +402,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34858
34859 if (wrapped)
34860 newacc += 65536;
34861 - ACCESS_ONCE(*acc) = newacc;
34862 + ACCESS_ONCE_RW(*acc) = newacc;
34863 }
34864
34865 void be_parse_stats(struct be_adapter *adapter)
34866 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34867 index 47f85c3..82ab6c4 100644
34868 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34869 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34870 @@ -31,6 +31,8 @@
34871 #include <linux/netdevice.h>
34872 #include <linux/phy.h>
34873 #include <linux/platform_device.h>
34874 +#include <linux/interrupt.h>
34875 +#include <linux/irqreturn.h>
34876 #include <net/ip.h>
34877
34878 #include "ftgmac100.h"
34879 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34880 index bb336a0..4b472da 100644
34881 --- a/drivers/net/ethernet/faraday/ftmac100.c
34882 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34883 @@ -31,6 +31,8 @@
34884 #include <linux/module.h>
34885 #include <linux/netdevice.h>
34886 #include <linux/platform_device.h>
34887 +#include <linux/interrupt.h>
34888 +#include <linux/irqreturn.h>
34889
34890 #include "ftmac100.h"
34891
34892 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34893 index c82d444..0007fb4 100644
34894 --- a/drivers/net/ethernet/fealnx.c
34895 +++ b/drivers/net/ethernet/fealnx.c
34896 @@ -150,7 +150,7 @@ struct chip_info {
34897 int flags;
34898 };
34899
34900 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34901 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34902 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34903 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34904 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34905 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34906 index e1159e5..e18684d 100644
34907 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34908 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34909 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
34910 {
34911 struct e1000_hw *hw = &adapter->hw;
34912 struct e1000_mac_info *mac = &hw->mac;
34913 - struct e1000_mac_operations *func = &mac->ops;
34914 + e1000_mac_operations_no_const *func = &mac->ops;
34915
34916 /* Set media type */
34917 switch (adapter->pdev->device) {
34918 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
34919 index a3e65fd..f451444 100644
34920 --- a/drivers/net/ethernet/intel/e1000e/82571.c
34921 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
34922 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
34923 {
34924 struct e1000_hw *hw = &adapter->hw;
34925 struct e1000_mac_info *mac = &hw->mac;
34926 - struct e1000_mac_operations *func = &mac->ops;
34927 + e1000_mac_operations_no_const *func = &mac->ops;
34928 u32 swsm = 0;
34929 u32 swsm2 = 0;
34930 bool force_clear_smbi = false;
34931 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34932 index 2967039..ca8c40c 100644
34933 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34934 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34935 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
34936 void (*write_vfta)(struct e1000_hw *, u32, u32);
34937 s32 (*read_mac_addr)(struct e1000_hw *);
34938 };
34939 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34940
34941 /*
34942 * When to use various PHY register access functions:
34943 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
34944 void (*power_up)(struct e1000_hw *);
34945 void (*power_down)(struct e1000_hw *);
34946 };
34947 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34948
34949 /* Function pointers for the NVM. */
34950 struct e1000_nvm_operations {
34951 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
34952 s32 (*validate)(struct e1000_hw *);
34953 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34954 };
34955 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34956
34957 struct e1000_mac_info {
34958 - struct e1000_mac_operations ops;
34959 + e1000_mac_operations_no_const ops;
34960 u8 addr[ETH_ALEN];
34961 u8 perm_addr[ETH_ALEN];
34962
34963 @@ -872,7 +875,7 @@ struct e1000_mac_info {
34964 };
34965
34966 struct e1000_phy_info {
34967 - struct e1000_phy_operations ops;
34968 + e1000_phy_operations_no_const ops;
34969
34970 enum e1000_phy_type type;
34971
34972 @@ -906,7 +909,7 @@ struct e1000_phy_info {
34973 };
34974
34975 struct e1000_nvm_info {
34976 - struct e1000_nvm_operations ops;
34977 + e1000_nvm_operations_no_const ops;
34978
34979 enum e1000_nvm_type type;
34980 enum e1000_nvm_override override;
34981 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34982 index f67cbd3..cef9e3d 100644
34983 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34984 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34985 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34986 s32 (*read_mac_addr)(struct e1000_hw *);
34987 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34988 };
34989 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34990
34991 struct e1000_phy_operations {
34992 s32 (*acquire)(struct e1000_hw *);
34993 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34994 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34995 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34996 };
34997 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34998
34999 struct e1000_nvm_operations {
35000 s32 (*acquire)(struct e1000_hw *);
35001 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
35002 s32 (*update)(struct e1000_hw *);
35003 s32 (*validate)(struct e1000_hw *);
35004 };
35005 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
35006
35007 struct e1000_info {
35008 s32 (*get_invariants)(struct e1000_hw *);
35009 @@ -350,7 +353,7 @@ struct e1000_info {
35010 extern const struct e1000_info e1000_82575_info;
35011
35012 struct e1000_mac_info {
35013 - struct e1000_mac_operations ops;
35014 + e1000_mac_operations_no_const ops;
35015
35016 u8 addr[6];
35017 u8 perm_addr[6];
35018 @@ -388,7 +391,7 @@ struct e1000_mac_info {
35019 };
35020
35021 struct e1000_phy_info {
35022 - struct e1000_phy_operations ops;
35023 + e1000_phy_operations_no_const ops;
35024
35025 enum e1000_phy_type type;
35026
35027 @@ -423,7 +426,7 @@ struct e1000_phy_info {
35028 };
35029
35030 struct e1000_nvm_info {
35031 - struct e1000_nvm_operations ops;
35032 + e1000_nvm_operations_no_const ops;
35033 enum e1000_nvm_type type;
35034 enum e1000_nvm_override override;
35035
35036 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
35037 s32 (*check_for_ack)(struct e1000_hw *, u16);
35038 s32 (*check_for_rst)(struct e1000_hw *, u16);
35039 };
35040 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35041
35042 struct e1000_mbx_stats {
35043 u32 msgs_tx;
35044 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
35045 };
35046
35047 struct e1000_mbx_info {
35048 - struct e1000_mbx_operations ops;
35049 + e1000_mbx_operations_no_const ops;
35050 struct e1000_mbx_stats stats;
35051 u32 timeout;
35052 u32 usec_delay;
35053 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
35054 index 57db3c6..aa825fc 100644
35055 --- a/drivers/net/ethernet/intel/igbvf/vf.h
35056 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
35057 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
35058 s32 (*read_mac_addr)(struct e1000_hw *);
35059 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
35060 };
35061 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35062
35063 struct e1000_mac_info {
35064 - struct e1000_mac_operations ops;
35065 + e1000_mac_operations_no_const ops;
35066 u8 addr[6];
35067 u8 perm_addr[6];
35068
35069 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
35070 s32 (*check_for_ack)(struct e1000_hw *);
35071 s32 (*check_for_rst)(struct e1000_hw *);
35072 };
35073 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35074
35075 struct e1000_mbx_stats {
35076 u32 msgs_tx;
35077 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
35078 };
35079
35080 struct e1000_mbx_info {
35081 - struct e1000_mbx_operations ops;
35082 + e1000_mbx_operations_no_const ops;
35083 struct e1000_mbx_stats stats;
35084 u32 timeout;
35085 u32 usec_delay;
35086 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35087 index 9b95bef..7e254ee 100644
35088 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35089 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35090 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
35091 s32 (*update_checksum)(struct ixgbe_hw *);
35092 u16 (*calc_checksum)(struct ixgbe_hw *);
35093 };
35094 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
35095
35096 struct ixgbe_mac_operations {
35097 s32 (*init_hw)(struct ixgbe_hw *);
35098 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
35099 /* Manageability interface */
35100 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
35101 };
35102 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35103
35104 struct ixgbe_phy_operations {
35105 s32 (*identify)(struct ixgbe_hw *);
35106 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
35107 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
35108 s32 (*check_overtemp)(struct ixgbe_hw *);
35109 };
35110 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
35111
35112 struct ixgbe_eeprom_info {
35113 - struct ixgbe_eeprom_operations ops;
35114 + ixgbe_eeprom_operations_no_const ops;
35115 enum ixgbe_eeprom_type type;
35116 u32 semaphore_delay;
35117 u16 word_size;
35118 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
35119
35120 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35121 struct ixgbe_mac_info {
35122 - struct ixgbe_mac_operations ops;
35123 + ixgbe_mac_operations_no_const ops;
35124 enum ixgbe_mac_type type;
35125 u8 addr[ETH_ALEN];
35126 u8 perm_addr[ETH_ALEN];
35127 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
35128 };
35129
35130 struct ixgbe_phy_info {
35131 - struct ixgbe_phy_operations ops;
35132 + ixgbe_phy_operations_no_const ops;
35133 struct mdio_if_info mdio;
35134 enum ixgbe_phy_type type;
35135 u32 id;
35136 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
35137 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35138 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35139 };
35140 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35141
35142 struct ixgbe_mbx_stats {
35143 u32 msgs_tx;
35144 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
35145 };
35146
35147 struct ixgbe_mbx_info {
35148 - struct ixgbe_mbx_operations ops;
35149 + ixgbe_mbx_operations_no_const ops;
35150 struct ixgbe_mbx_stats stats;
35151 u32 timeout;
35152 u32 usec_delay;
35153 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35154 index 25c951d..cc7cf33 100644
35155 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35156 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35157 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35158 s32 (*clear_vfta)(struct ixgbe_hw *);
35159 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35160 };
35161 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35162
35163 enum ixgbe_mac_type {
35164 ixgbe_mac_unknown = 0,
35165 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35166 };
35167
35168 struct ixgbe_mac_info {
35169 - struct ixgbe_mac_operations ops;
35170 + ixgbe_mac_operations_no_const ops;
35171 u8 addr[6];
35172 u8 perm_addr[6];
35173
35174 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35175 s32 (*check_for_ack)(struct ixgbe_hw *);
35176 s32 (*check_for_rst)(struct ixgbe_hw *);
35177 };
35178 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35179
35180 struct ixgbe_mbx_stats {
35181 u32 msgs_tx;
35182 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35183 };
35184
35185 struct ixgbe_mbx_info {
35186 - struct ixgbe_mbx_operations ops;
35187 + ixgbe_mbx_operations_no_const ops;
35188 struct ixgbe_mbx_stats stats;
35189 u32 timeout;
35190 u32 udelay;
35191 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35192 index 8bf22b6..7f5baaa 100644
35193 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
35194 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35195 @@ -41,6 +41,7 @@
35196 #include <linux/slab.h>
35197 #include <linux/io-mapping.h>
35198 #include <linux/delay.h>
35199 +#include <linux/sched.h>
35200
35201 #include <linux/mlx4/device.h>
35202 #include <linux/mlx4/doorbell.h>
35203 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35204 index 5046a64..71ca936 100644
35205 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35206 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35207 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35208 void (*link_down)(struct __vxge_hw_device *devh);
35209 void (*crit_err)(struct __vxge_hw_device *devh,
35210 enum vxge_hw_event type, u64 ext_data);
35211 -};
35212 +} __no_const;
35213
35214 /*
35215 * struct __vxge_hw_blockpool_entry - Block private data structure
35216 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35217 index 4a518a3..936b334 100644
35218 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35219 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35220 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35221 struct vxge_hw_mempool_dma *dma_object,
35222 u32 index,
35223 u32 is_last);
35224 -};
35225 +} __no_const;
35226
35227 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35228 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35229 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35230 index bbacb37..d60887d 100644
35231 --- a/drivers/net/ethernet/realtek/r8169.c
35232 +++ b/drivers/net/ethernet/realtek/r8169.c
35233 @@ -695,17 +695,17 @@ struct rtl8169_private {
35234 struct mdio_ops {
35235 void (*write)(void __iomem *, int, int);
35236 int (*read)(void __iomem *, int);
35237 - } mdio_ops;
35238 + } __no_const mdio_ops;
35239
35240 struct pll_power_ops {
35241 void (*down)(struct rtl8169_private *);
35242 void (*up)(struct rtl8169_private *);
35243 - } pll_power_ops;
35244 + } __no_const pll_power_ops;
35245
35246 struct jumbo_ops {
35247 void (*enable)(struct rtl8169_private *);
35248 void (*disable)(struct rtl8169_private *);
35249 - } jumbo_ops;
35250 + } __no_const jumbo_ops;
35251
35252 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35253 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35254 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35255 index 5b118cd..858b523 100644
35256 --- a/drivers/net/ethernet/sis/sis190.c
35257 +++ b/drivers/net/ethernet/sis/sis190.c
35258 @@ -1622,7 +1622,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35259 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35260 struct net_device *dev)
35261 {
35262 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35263 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35264 struct sis190_private *tp = netdev_priv(dev);
35265 struct pci_dev *isa_bridge;
35266 u8 reg, tmp8;
35267 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35268 index c07cfe9..81cbf7e 100644
35269 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35270 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35271 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35272
35273 writel(value, ioaddr + MMC_CNTRL);
35274
35275 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35276 - MMC_CNTRL, value);
35277 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35278 +// MMC_CNTRL, value);
35279 }
35280
35281 /* To mask all all interrupts.*/
35282 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35283 index 6ee593a..3f513b1 100644
35284 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35285 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35286 @@ -1585,7 +1585,7 @@ static const struct file_operations stmmac_rings_status_fops = {
35287 .open = stmmac_sysfs_ring_open,
35288 .read = seq_read,
35289 .llseek = seq_lseek,
35290 - .release = seq_release,
35291 + .release = single_release,
35292 };
35293
35294 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
35295 @@ -1657,7 +1657,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
35296 .open = stmmac_sysfs_dma_cap_open,
35297 .read = seq_read,
35298 .llseek = seq_lseek,
35299 - .release = seq_release,
35300 + .release = single_release,
35301 };
35302
35303 static int stmmac_init_fs(struct net_device *dev)
35304 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35305 index dec5836..6d4db7d 100644
35306 --- a/drivers/net/hyperv/hyperv_net.h
35307 +++ b/drivers/net/hyperv/hyperv_net.h
35308 @@ -97,7 +97,7 @@ struct rndis_device {
35309
35310 enum rndis_device_state state;
35311 bool link_state;
35312 - atomic_t new_req_id;
35313 + atomic_unchecked_t new_req_id;
35314
35315 spinlock_t request_lock;
35316 struct list_head req_list;
35317 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35318 index 133b7fb..d58c559 100644
35319 --- a/drivers/net/hyperv/rndis_filter.c
35320 +++ b/drivers/net/hyperv/rndis_filter.c
35321 @@ -96,7 +96,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35322 * template
35323 */
35324 set = &rndis_msg->msg.set_req;
35325 - set->req_id = atomic_inc_return(&dev->new_req_id);
35326 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35327
35328 /* Add to the request list */
35329 spin_lock_irqsave(&dev->request_lock, flags);
35330 @@ -627,7 +627,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35331
35332 /* Setup the rndis set */
35333 halt = &request->request_msg.msg.halt_req;
35334 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35335 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35336
35337 /* Ignore return since this msg is optional. */
35338 rndis_filter_send_request(dev, request);
35339 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
35340 index 58dc117..f140c77 100644
35341 --- a/drivers/net/macvtap.c
35342 +++ b/drivers/net/macvtap.c
35343 @@ -526,6 +526,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
35344 }
35345 base = (unsigned long)from->iov_base + offset1;
35346 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
35347 + if (i + size >= MAX_SKB_FRAGS)
35348 + return -EFAULT;
35349 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
35350 if ((num_pages != size) ||
35351 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
35352 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35353 index 3ed983c..a1bb418 100644
35354 --- a/drivers/net/ppp/ppp_generic.c
35355 +++ b/drivers/net/ppp/ppp_generic.c
35356 @@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35357 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35358 struct ppp_stats stats;
35359 struct ppp_comp_stats cstats;
35360 - char *vers;
35361
35362 switch (cmd) {
35363 case SIOCGPPPSTATS:
35364 @@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35365 break;
35366
35367 case SIOCGPPPVER:
35368 - vers = PPP_VERSION;
35369 - if (copy_to_user(addr, vers, strlen(vers) + 1))
35370 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35371 break;
35372 err = 0;
35373 break;
35374 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
35375 index 515f122..41dd273 100644
35376 --- a/drivers/net/tokenring/abyss.c
35377 +++ b/drivers/net/tokenring/abyss.c
35378 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
35379
35380 static int __init abyss_init (void)
35381 {
35382 - abyss_netdev_ops = tms380tr_netdev_ops;
35383 + pax_open_kernel();
35384 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35385
35386 - abyss_netdev_ops.ndo_open = abyss_open;
35387 - abyss_netdev_ops.ndo_stop = abyss_close;
35388 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35389 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35390 + pax_close_kernel();
35391
35392 return pci_register_driver(&abyss_driver);
35393 }
35394 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
35395 index 6153cfd..cf69c1c 100644
35396 --- a/drivers/net/tokenring/madgemc.c
35397 +++ b/drivers/net/tokenring/madgemc.c
35398 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
35399
35400 static int __init madgemc_init (void)
35401 {
35402 - madgemc_netdev_ops = tms380tr_netdev_ops;
35403 - madgemc_netdev_ops.ndo_open = madgemc_open;
35404 - madgemc_netdev_ops.ndo_stop = madgemc_close;
35405 + pax_open_kernel();
35406 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35407 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35408 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35409 + pax_close_kernel();
35410
35411 return mca_register_driver (&madgemc_driver);
35412 }
35413 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
35414 index 8d362e6..f91cc52 100644
35415 --- a/drivers/net/tokenring/proteon.c
35416 +++ b/drivers/net/tokenring/proteon.c
35417 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
35418 struct platform_device *pdev;
35419 int i, num = 0, err = 0;
35420
35421 - proteon_netdev_ops = tms380tr_netdev_ops;
35422 - proteon_netdev_ops.ndo_open = proteon_open;
35423 - proteon_netdev_ops.ndo_stop = tms380tr_close;
35424 + pax_open_kernel();
35425 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35426 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35427 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35428 + pax_close_kernel();
35429
35430 err = platform_driver_register(&proteon_driver);
35431 if (err)
35432 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35433 index 46db5c5..37c1536 100644
35434 --- a/drivers/net/tokenring/skisa.c
35435 +++ b/drivers/net/tokenring/skisa.c
35436 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
35437 struct platform_device *pdev;
35438 int i, num = 0, err = 0;
35439
35440 - sk_isa_netdev_ops = tms380tr_netdev_ops;
35441 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
35442 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35443 + pax_open_kernel();
35444 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35445 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35446 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35447 + pax_close_kernel();
35448
35449 err = platform_driver_register(&sk_isa_driver);
35450 if (err)
35451 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35452 index e1324b4..e1b0041 100644
35453 --- a/drivers/net/usb/hso.c
35454 +++ b/drivers/net/usb/hso.c
35455 @@ -71,7 +71,7 @@
35456 #include <asm/byteorder.h>
35457 #include <linux/serial_core.h>
35458 #include <linux/serial.h>
35459 -
35460 +#include <asm/local.h>
35461
35462 #define MOD_AUTHOR "Option Wireless"
35463 #define MOD_DESCRIPTION "USB High Speed Option driver"
35464 @@ -257,7 +257,7 @@ struct hso_serial {
35465
35466 /* from usb_serial_port */
35467 struct tty_struct *tty;
35468 - int open_count;
35469 + local_t open_count;
35470 spinlock_t serial_lock;
35471
35472 int (*write_data) (struct hso_serial *serial);
35473 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35474 struct urb *urb;
35475
35476 urb = serial->rx_urb[0];
35477 - if (serial->open_count > 0) {
35478 + if (local_read(&serial->open_count) > 0) {
35479 count = put_rxbuf_data(urb, serial);
35480 if (count == -1)
35481 return;
35482 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35483 DUMP1(urb->transfer_buffer, urb->actual_length);
35484
35485 /* Anyone listening? */
35486 - if (serial->open_count == 0)
35487 + if (local_read(&serial->open_count) == 0)
35488 return;
35489
35490 if (status == 0) {
35491 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35492 spin_unlock_irq(&serial->serial_lock);
35493
35494 /* check for port already opened, if not set the termios */
35495 - serial->open_count++;
35496 - if (serial->open_count == 1) {
35497 + if (local_inc_return(&serial->open_count) == 1) {
35498 serial->rx_state = RX_IDLE;
35499 /* Force default termio settings */
35500 _hso_serial_set_termios(tty, NULL);
35501 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35502 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35503 if (result) {
35504 hso_stop_serial_device(serial->parent);
35505 - serial->open_count--;
35506 + local_dec(&serial->open_count);
35507 kref_put(&serial->parent->ref, hso_serial_ref_free);
35508 }
35509 } else {
35510 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35511
35512 /* reset the rts and dtr */
35513 /* do the actual close */
35514 - serial->open_count--;
35515 + local_dec(&serial->open_count);
35516
35517 - if (serial->open_count <= 0) {
35518 - serial->open_count = 0;
35519 + if (local_read(&serial->open_count) <= 0) {
35520 + local_set(&serial->open_count, 0);
35521 spin_lock_irq(&serial->serial_lock);
35522 if (serial->tty == tty) {
35523 serial->tty->driver_data = NULL;
35524 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35525
35526 /* the actual setup */
35527 spin_lock_irqsave(&serial->serial_lock, flags);
35528 - if (serial->open_count)
35529 + if (local_read(&serial->open_count))
35530 _hso_serial_set_termios(tty, old);
35531 else
35532 tty->termios = old;
35533 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35534 D1("Pending read interrupt on port %d\n", i);
35535 spin_lock(&serial->serial_lock);
35536 if (serial->rx_state == RX_IDLE &&
35537 - serial->open_count > 0) {
35538 + local_read(&serial->open_count) > 0) {
35539 /* Setup and send a ctrl req read on
35540 * port i */
35541 if (!serial->rx_urb_filled[0]) {
35542 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35543 /* Start all serial ports */
35544 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35545 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35546 - if (dev2ser(serial_table[i])->open_count) {
35547 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35548 result =
35549 hso_start_serial_device(serial_table[i], GFP_NOIO);
35550 hso_kick_transmit(dev2ser(serial_table[i]));
35551 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35552 index efc0111..79c8f5b 100644
35553 --- a/drivers/net/wireless/ath/ath.h
35554 +++ b/drivers/net/wireless/ath/ath.h
35555 @@ -119,6 +119,7 @@ struct ath_ops {
35556 void (*write_flush) (void *);
35557 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35558 };
35559 +typedef struct ath_ops __no_const ath_ops_no_const;
35560
35561 struct ath_common;
35562 struct ath_bus_ops;
35563 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35564 index 7b6417b..ab5db98 100644
35565 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35566 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35567 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35568 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35569 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35570
35571 - ACCESS_ONCE(ads->ds_link) = i->link;
35572 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35573 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35574 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35575
35576 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35577 ctl6 = SM(i->keytype, AR_EncrType);
35578 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35579
35580 if ((i->is_first || i->is_last) &&
35581 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35582 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35583 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35584 | set11nTries(i->rates, 1)
35585 | set11nTries(i->rates, 2)
35586 | set11nTries(i->rates, 3)
35587 | (i->dur_update ? AR_DurUpdateEna : 0)
35588 | SM(0, AR_BurstDur);
35589
35590 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35591 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35592 | set11nRate(i->rates, 1)
35593 | set11nRate(i->rates, 2)
35594 | set11nRate(i->rates, 3);
35595 } else {
35596 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35597 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35598 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35599 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35600 }
35601
35602 if (!i->is_first) {
35603 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35604 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35605 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35606 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35607 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35608 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35609 return;
35610 }
35611
35612 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35613 break;
35614 }
35615
35616 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35617 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35618 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35619 | SM(i->txpower, AR_XmitPower)
35620 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35621 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35622 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35623 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35624
35625 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35626 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35627 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35628 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35629
35630 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35631 return;
35632
35633 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35634 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35635 | set11nPktDurRTSCTS(i->rates, 1);
35636
35637 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35638 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35639 | set11nPktDurRTSCTS(i->rates, 3);
35640
35641 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35642 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35643 | set11nRateFlags(i->rates, 1)
35644 | set11nRateFlags(i->rates, 2)
35645 | set11nRateFlags(i->rates, 3)
35646 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35647 index 09b8c9d..905339e 100644
35648 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35649 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35650 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35651 (i->qcu << AR_TxQcuNum_S) | 0x17;
35652
35653 checksum += val;
35654 - ACCESS_ONCE(ads->info) = val;
35655 + ACCESS_ONCE_RW(ads->info) = val;
35656
35657 checksum += i->link;
35658 - ACCESS_ONCE(ads->link) = i->link;
35659 + ACCESS_ONCE_RW(ads->link) = i->link;
35660
35661 checksum += i->buf_addr[0];
35662 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35663 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35664 checksum += i->buf_addr[1];
35665 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35666 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35667 checksum += i->buf_addr[2];
35668 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35669 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35670 checksum += i->buf_addr[3];
35671 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35672 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35673
35674 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35675 - ACCESS_ONCE(ads->ctl3) = val;
35676 + ACCESS_ONCE_RW(ads->ctl3) = val;
35677 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35678 - ACCESS_ONCE(ads->ctl5) = val;
35679 + ACCESS_ONCE_RW(ads->ctl5) = val;
35680 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35681 - ACCESS_ONCE(ads->ctl7) = val;
35682 + ACCESS_ONCE_RW(ads->ctl7) = val;
35683 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35684 - ACCESS_ONCE(ads->ctl9) = val;
35685 + ACCESS_ONCE_RW(ads->ctl9) = val;
35686
35687 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35688 - ACCESS_ONCE(ads->ctl10) = checksum;
35689 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
35690
35691 if (i->is_first || i->is_last) {
35692 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35693 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35694 | set11nTries(i->rates, 1)
35695 | set11nTries(i->rates, 2)
35696 | set11nTries(i->rates, 3)
35697 | (i->dur_update ? AR_DurUpdateEna : 0)
35698 | SM(0, AR_BurstDur);
35699
35700 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35701 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35702 | set11nRate(i->rates, 1)
35703 | set11nRate(i->rates, 2)
35704 | set11nRate(i->rates, 3);
35705 } else {
35706 - ACCESS_ONCE(ads->ctl13) = 0;
35707 - ACCESS_ONCE(ads->ctl14) = 0;
35708 + ACCESS_ONCE_RW(ads->ctl13) = 0;
35709 + ACCESS_ONCE_RW(ads->ctl14) = 0;
35710 }
35711
35712 ads->ctl20 = 0;
35713 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35714
35715 ctl17 = SM(i->keytype, AR_EncrType);
35716 if (!i->is_first) {
35717 - ACCESS_ONCE(ads->ctl11) = 0;
35718 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35719 - ACCESS_ONCE(ads->ctl15) = 0;
35720 - ACCESS_ONCE(ads->ctl16) = 0;
35721 - ACCESS_ONCE(ads->ctl17) = ctl17;
35722 - ACCESS_ONCE(ads->ctl18) = 0;
35723 - ACCESS_ONCE(ads->ctl19) = 0;
35724 + ACCESS_ONCE_RW(ads->ctl11) = 0;
35725 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35726 + ACCESS_ONCE_RW(ads->ctl15) = 0;
35727 + ACCESS_ONCE_RW(ads->ctl16) = 0;
35728 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35729 + ACCESS_ONCE_RW(ads->ctl18) = 0;
35730 + ACCESS_ONCE_RW(ads->ctl19) = 0;
35731 return;
35732 }
35733
35734 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35735 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35736 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35737 | SM(i->txpower, AR_XmitPower)
35738 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35739 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35740 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35741 ctl12 |= SM(val, AR_PAPRDChainMask);
35742
35743 - ACCESS_ONCE(ads->ctl12) = ctl12;
35744 - ACCESS_ONCE(ads->ctl17) = ctl17;
35745 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35746 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35747
35748 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35749 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35750 | set11nPktDurRTSCTS(i->rates, 1);
35751
35752 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35753 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35754 | set11nPktDurRTSCTS(i->rates, 3);
35755
35756 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35757 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35758 | set11nRateFlags(i->rates, 1)
35759 | set11nRateFlags(i->rates, 2)
35760 | set11nRateFlags(i->rates, 3)
35761 | SM(i->rtscts_rate, AR_RTSCTSRate);
35762
35763 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35764 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35765 }
35766
35767 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35768 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35769 index c8261d4..8d88929 100644
35770 --- a/drivers/net/wireless/ath/ath9k/hw.h
35771 +++ b/drivers/net/wireless/ath/ath9k/hw.h
35772 @@ -773,7 +773,7 @@ struct ath_hw_private_ops {
35773
35774 /* ANI */
35775 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35776 -};
35777 +} __no_const;
35778
35779 /**
35780 * struct ath_hw_ops - callbacks used by hardware code and driver code
35781 @@ -803,7 +803,7 @@ struct ath_hw_ops {
35782 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35783 struct ath_hw_antcomb_conf *antconf);
35784
35785 -};
35786 +} __no_const;
35787
35788 struct ath_nf_limits {
35789 s16 max;
35790 @@ -823,7 +823,7 @@ enum ath_cal_list {
35791 #define AH_FASTCC 0x4
35792
35793 struct ath_hw {
35794 - struct ath_ops reg_ops;
35795 + ath_ops_no_const reg_ops;
35796
35797 struct ieee80211_hw *hw;
35798 struct ath_common common;
35799 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35800 index af00e2c..ab04d34 100644
35801 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35802 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35803 @@ -545,7 +545,7 @@ struct phy_func_ptr {
35804 void (*carrsuppr)(struct brcms_phy *);
35805 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35806 void (*detach)(struct brcms_phy *);
35807 -};
35808 +} __no_const;
35809
35810 struct brcms_phy {
35811 struct brcms_phy_pub pubpi_ro;
35812 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35813 index a2ec369..36fdf14 100644
35814 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
35815 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35816 @@ -3646,7 +3646,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35817 */
35818 if (il3945_mod_params.disable_hw_scan) {
35819 D_INFO("Disabling hw_scan\n");
35820 - il3945_hw_ops.hw_scan = NULL;
35821 + pax_open_kernel();
35822 + *(void **)&il3945_hw_ops.hw_scan = NULL;
35823 + pax_close_kernel();
35824 }
35825
35826 D_INFO("*** LOAD DRIVER ***\n");
35827 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
35828 index f8fc239..8cade22 100644
35829 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
35830 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
35831 @@ -86,8 +86,8 @@ do { \
35832 } while (0)
35833
35834 #else
35835 -#define IWL_DEBUG(m, level, fmt, args...)
35836 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
35837 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
35838 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
35839 #define iwl_print_hex_dump(m, level, p, len)
35840 #define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
35841 do { \
35842 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35843 index 4b9e730..7603659 100644
35844 --- a/drivers/net/wireless/mac80211_hwsim.c
35845 +++ b/drivers/net/wireless/mac80211_hwsim.c
35846 @@ -1677,9 +1677,11 @@ static int __init init_mac80211_hwsim(void)
35847 return -EINVAL;
35848
35849 if (fake_hw_scan) {
35850 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35851 - mac80211_hwsim_ops.sw_scan_start = NULL;
35852 - mac80211_hwsim_ops.sw_scan_complete = NULL;
35853 + pax_open_kernel();
35854 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35855 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35856 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35857 + pax_close_kernel();
35858 }
35859
35860 spin_lock_init(&hwsim_radio_lock);
35861 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35862 index 3186aa4..b35b09f 100644
35863 --- a/drivers/net/wireless/mwifiex/main.h
35864 +++ b/drivers/net/wireless/mwifiex/main.h
35865 @@ -536,7 +536,7 @@ struct mwifiex_if_ops {
35866 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35867 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35868 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35869 -};
35870 +} __no_const;
35871
35872 struct mwifiex_adapter {
35873 u8 iface_type;
35874 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35875 index a330c69..a81540f 100644
35876 --- a/drivers/net/wireless/rndis_wlan.c
35877 +++ b/drivers/net/wireless/rndis_wlan.c
35878 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35879
35880 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35881
35882 - if (rts_threshold < 0 || rts_threshold > 2347)
35883 + if (rts_threshold > 2347)
35884 rts_threshold = 2347;
35885
35886 tmp = cpu_to_le32(rts_threshold);
35887 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35888 index a77f1bb..c608b2b 100644
35889 --- a/drivers/net/wireless/wl1251/wl1251.h
35890 +++ b/drivers/net/wireless/wl1251/wl1251.h
35891 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
35892 void (*reset)(struct wl1251 *wl);
35893 void (*enable_irq)(struct wl1251 *wl);
35894 void (*disable_irq)(struct wl1251 *wl);
35895 -};
35896 +} __no_const;
35897
35898 struct wl1251 {
35899 struct ieee80211_hw *hw;
35900 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35901 index f34b5b2..b5abb9f 100644
35902 --- a/drivers/oprofile/buffer_sync.c
35903 +++ b/drivers/oprofile/buffer_sync.c
35904 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35905 if (cookie == NO_COOKIE)
35906 offset = pc;
35907 if (cookie == INVALID_COOKIE) {
35908 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35909 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35910 offset = pc;
35911 }
35912 if (cookie != last_cookie) {
35913 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35914 /* add userspace sample */
35915
35916 if (!mm) {
35917 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35918 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35919 return 0;
35920 }
35921
35922 cookie = lookup_dcookie(mm, s->eip, &offset);
35923
35924 if (cookie == INVALID_COOKIE) {
35925 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35926 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35927 return 0;
35928 }
35929
35930 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35931 /* ignore backtraces if failed to add a sample */
35932 if (state == sb_bt_start) {
35933 state = sb_bt_ignore;
35934 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35935 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35936 }
35937 }
35938 release_mm(mm);
35939 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35940 index c0cc4e7..44d4e54 100644
35941 --- a/drivers/oprofile/event_buffer.c
35942 +++ b/drivers/oprofile/event_buffer.c
35943 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35944 }
35945
35946 if (buffer_pos == buffer_size) {
35947 - atomic_inc(&oprofile_stats.event_lost_overflow);
35948 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35949 return;
35950 }
35951
35952 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35953 index ed2c3ec..deda85a 100644
35954 --- a/drivers/oprofile/oprof.c
35955 +++ b/drivers/oprofile/oprof.c
35956 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35957 if (oprofile_ops.switch_events())
35958 return;
35959
35960 - atomic_inc(&oprofile_stats.multiplex_counter);
35961 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35962 start_switch_worker();
35963 }
35964
35965 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35966 index 917d28e..d62d981 100644
35967 --- a/drivers/oprofile/oprofile_stats.c
35968 +++ b/drivers/oprofile/oprofile_stats.c
35969 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35970 cpu_buf->sample_invalid_eip = 0;
35971 }
35972
35973 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35974 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35975 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35976 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35977 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35978 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35979 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35980 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35981 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35982 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35983 }
35984
35985
35986 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35987 index 38b6fc0..b5cbfce 100644
35988 --- a/drivers/oprofile/oprofile_stats.h
35989 +++ b/drivers/oprofile/oprofile_stats.h
35990 @@ -13,11 +13,11 @@
35991 #include <linux/atomic.h>
35992
35993 struct oprofile_stat_struct {
35994 - atomic_t sample_lost_no_mm;
35995 - atomic_t sample_lost_no_mapping;
35996 - atomic_t bt_lost_no_mapping;
35997 - atomic_t event_lost_overflow;
35998 - atomic_t multiplex_counter;
35999 + atomic_unchecked_t sample_lost_no_mm;
36000 + atomic_unchecked_t sample_lost_no_mapping;
36001 + atomic_unchecked_t bt_lost_no_mapping;
36002 + atomic_unchecked_t event_lost_overflow;
36003 + atomic_unchecked_t multiplex_counter;
36004 };
36005
36006 extern struct oprofile_stat_struct oprofile_stats;
36007 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
36008 index 2f0aa0f..90fab02 100644
36009 --- a/drivers/oprofile/oprofilefs.c
36010 +++ b/drivers/oprofile/oprofilefs.c
36011 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
36012
36013
36014 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36015 - char const *name, atomic_t *val)
36016 + char const *name, atomic_unchecked_t *val)
36017 {
36018 return __oprofilefs_create_file(sb, root, name,
36019 &atomic_ro_fops, 0444, val);
36020 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
36021 index 3f56bc0..707d642 100644
36022 --- a/drivers/parport/procfs.c
36023 +++ b/drivers/parport/procfs.c
36024 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
36025
36026 *ppos += len;
36027
36028 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
36029 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
36030 }
36031
36032 #ifdef CONFIG_PARPORT_1284
36033 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
36034
36035 *ppos += len;
36036
36037 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
36038 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
36039 }
36040 #endif /* IEEE1284.3 support. */
36041
36042 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
36043 index 9fff878..ad0ad53 100644
36044 --- a/drivers/pci/hotplug/cpci_hotplug.h
36045 +++ b/drivers/pci/hotplug/cpci_hotplug.h
36046 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36047 int (*hardware_test) (struct slot* slot, u32 value);
36048 u8 (*get_power) (struct slot* slot);
36049 int (*set_power) (struct slot* slot, int value);
36050 -};
36051 +} __no_const;
36052
36053 struct cpci_hp_controller {
36054 unsigned int irq;
36055 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
36056 index 76ba8a1..20ca857 100644
36057 --- a/drivers/pci/hotplug/cpqphp_nvram.c
36058 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
36059 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
36060
36061 void compaq_nvram_init (void __iomem *rom_start)
36062 {
36063 +
36064 +#ifndef CONFIG_PAX_KERNEXEC
36065 if (rom_start) {
36066 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36067 }
36068 +#endif
36069 +
36070 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36071
36072 /* initialize our int15 lock */
36073 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
36074 index 2275162..95f1a92 100644
36075 --- a/drivers/pci/pcie/aspm.c
36076 +++ b/drivers/pci/pcie/aspm.c
36077 @@ -27,9 +27,9 @@
36078 #define MODULE_PARAM_PREFIX "pcie_aspm."
36079
36080 /* Note: those are not register definitions */
36081 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36082 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36083 -#define ASPM_STATE_L1 (4) /* L1 state */
36084 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36085 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36086 +#define ASPM_STATE_L1 (4U) /* L1 state */
36087 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36088 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36089
36090 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
36091 index 71eac9c..2de27ef 100644
36092 --- a/drivers/pci/probe.c
36093 +++ b/drivers/pci/probe.c
36094 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
36095 u32 l, sz, mask;
36096 u16 orig_cmd;
36097
36098 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
36099 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
36100
36101 if (!dev->mmio_always_on) {
36102 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
36103 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
36104 index 27911b5..5b6db88 100644
36105 --- a/drivers/pci/proc.c
36106 +++ b/drivers/pci/proc.c
36107 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36108 static int __init pci_proc_init(void)
36109 {
36110 struct pci_dev *dev = NULL;
36111 +
36112 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
36113 +#ifdef CONFIG_GRKERNSEC_PROC_USER
36114 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36115 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36116 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36117 +#endif
36118 +#else
36119 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36120 +#endif
36121 proc_create("devices", 0, proc_bus_pci_dir,
36122 &proc_bus_pci_dev_operations);
36123 proc_initialized = 1;
36124 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36125 index ea0c607..58c4628 100644
36126 --- a/drivers/platform/x86/thinkpad_acpi.c
36127 +++ b/drivers/platform/x86/thinkpad_acpi.c
36128 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36129 return 0;
36130 }
36131
36132 -void static hotkey_mask_warn_incomplete_mask(void)
36133 +static void hotkey_mask_warn_incomplete_mask(void)
36134 {
36135 /* log only what the user can fix... */
36136 const u32 wantedmask = hotkey_driver_mask &
36137 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36138 }
36139 }
36140
36141 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36142 - struct tp_nvram_state *newn,
36143 - const u32 event_mask)
36144 -{
36145 -
36146 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36147 do { \
36148 if ((event_mask & (1 << __scancode)) && \
36149 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36150 tpacpi_hotkey_send_key(__scancode); \
36151 } while (0)
36152
36153 - void issue_volchange(const unsigned int oldvol,
36154 - const unsigned int newvol)
36155 - {
36156 - unsigned int i = oldvol;
36157 +static void issue_volchange(const unsigned int oldvol,
36158 + const unsigned int newvol,
36159 + const u32 event_mask)
36160 +{
36161 + unsigned int i = oldvol;
36162
36163 - while (i > newvol) {
36164 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36165 - i--;
36166 - }
36167 - while (i < newvol) {
36168 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36169 - i++;
36170 - }
36171 + while (i > newvol) {
36172 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36173 + i--;
36174 }
36175 + while (i < newvol) {
36176 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36177 + i++;
36178 + }
36179 +}
36180
36181 - void issue_brightnesschange(const unsigned int oldbrt,
36182 - const unsigned int newbrt)
36183 - {
36184 - unsigned int i = oldbrt;
36185 +static void issue_brightnesschange(const unsigned int oldbrt,
36186 + const unsigned int newbrt,
36187 + const u32 event_mask)
36188 +{
36189 + unsigned int i = oldbrt;
36190
36191 - while (i > newbrt) {
36192 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36193 - i--;
36194 - }
36195 - while (i < newbrt) {
36196 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36197 - i++;
36198 - }
36199 + while (i > newbrt) {
36200 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36201 + i--;
36202 + }
36203 + while (i < newbrt) {
36204 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36205 + i++;
36206 }
36207 +}
36208
36209 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36210 + struct tp_nvram_state *newn,
36211 + const u32 event_mask)
36212 +{
36213 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36214 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36215 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36216 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36217 oldn->volume_level != newn->volume_level) {
36218 /* recently muted, or repeated mute keypress, or
36219 * multiple presses ending in mute */
36220 - issue_volchange(oldn->volume_level, newn->volume_level);
36221 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36222 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36223 }
36224 } else {
36225 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36226 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36227 }
36228 if (oldn->volume_level != newn->volume_level) {
36229 - issue_volchange(oldn->volume_level, newn->volume_level);
36230 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36231 } else if (oldn->volume_toggle != newn->volume_toggle) {
36232 /* repeated vol up/down keypress at end of scale ? */
36233 if (newn->volume_level == 0)
36234 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36235 /* handle brightness */
36236 if (oldn->brightness_level != newn->brightness_level) {
36237 issue_brightnesschange(oldn->brightness_level,
36238 - newn->brightness_level);
36239 + newn->brightness_level,
36240 + event_mask);
36241 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36242 /* repeated key presses that didn't change state */
36243 if (newn->brightness_level == 0)
36244 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36245 && !tp_features.bright_unkfw)
36246 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36247 }
36248 +}
36249
36250 #undef TPACPI_COMPARE_KEY
36251 #undef TPACPI_MAY_SEND_KEY
36252 -}
36253
36254 /*
36255 * Polling driver
36256 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36257 index b859d16..5cc6b1a 100644
36258 --- a/drivers/pnp/pnpbios/bioscalls.c
36259 +++ b/drivers/pnp/pnpbios/bioscalls.c
36260 @@ -59,7 +59,7 @@ do { \
36261 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36262 } while(0)
36263
36264 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36265 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36266 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36267
36268 /*
36269 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36270
36271 cpu = get_cpu();
36272 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36273 +
36274 + pax_open_kernel();
36275 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36276 + pax_close_kernel();
36277
36278 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36279 spin_lock_irqsave(&pnp_bios_lock, flags);
36280 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36281 :"memory");
36282 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36283
36284 + pax_open_kernel();
36285 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36286 + pax_close_kernel();
36287 +
36288 put_cpu();
36289
36290 /* If we get here and this is set then the PnP BIOS faulted on us. */
36291 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36292 return status;
36293 }
36294
36295 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36296 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36297 {
36298 int i;
36299
36300 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36301 pnp_bios_callpoint.offset = header->fields.pm16offset;
36302 pnp_bios_callpoint.segment = PNP_CS16;
36303
36304 + pax_open_kernel();
36305 +
36306 for_each_possible_cpu(i) {
36307 struct desc_struct *gdt = get_cpu_gdt_table(i);
36308 if (!gdt)
36309 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36310 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36311 (unsigned long)__va(header->fields.pm16dseg));
36312 }
36313 +
36314 + pax_close_kernel();
36315 }
36316 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36317 index b0ecacb..7c9da2e 100644
36318 --- a/drivers/pnp/resource.c
36319 +++ b/drivers/pnp/resource.c
36320 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36321 return 1;
36322
36323 /* check if the resource is valid */
36324 - if (*irq < 0 || *irq > 15)
36325 + if (*irq > 15)
36326 return 0;
36327
36328 /* check if the resource is reserved */
36329 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36330 return 1;
36331
36332 /* check if the resource is valid */
36333 - if (*dma < 0 || *dma == 4 || *dma > 7)
36334 + if (*dma == 4 || *dma > 7)
36335 return 0;
36336
36337 /* check if the resource is reserved */
36338 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36339 index 1ed6ea0..77c0bd2 100644
36340 --- a/drivers/power/bq27x00_battery.c
36341 +++ b/drivers/power/bq27x00_battery.c
36342 @@ -72,7 +72,7 @@
36343 struct bq27x00_device_info;
36344 struct bq27x00_access_methods {
36345 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36346 -};
36347 +} __no_const;
36348
36349 enum bq27x00_chip { BQ27000, BQ27500 };
36350
36351 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36352 index a838e66..a9e1665 100644
36353 --- a/drivers/regulator/max8660.c
36354 +++ b/drivers/regulator/max8660.c
36355 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36356 max8660->shadow_regs[MAX8660_OVER1] = 5;
36357 } else {
36358 /* Otherwise devices can be toggled via software */
36359 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
36360 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
36361 + pax_open_kernel();
36362 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36363 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36364 + pax_close_kernel();
36365 }
36366
36367 /*
36368 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36369 index e8cfc99..072aee2 100644
36370 --- a/drivers/regulator/mc13892-regulator.c
36371 +++ b/drivers/regulator/mc13892-regulator.c
36372 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36373 }
36374 mc13xxx_unlock(mc13892);
36375
36376 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36377 + pax_open_kernel();
36378 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36379 = mc13892_vcam_set_mode;
36380 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36381 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36382 = mc13892_vcam_get_mode;
36383 + pax_close_kernel();
36384
36385 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36386 ARRAY_SIZE(mc13892_regulators));
36387 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36388 index cace6d3..f623fda 100644
36389 --- a/drivers/rtc/rtc-dev.c
36390 +++ b/drivers/rtc/rtc-dev.c
36391 @@ -14,6 +14,7 @@
36392 #include <linux/module.h>
36393 #include <linux/rtc.h>
36394 #include <linux/sched.h>
36395 +#include <linux/grsecurity.h>
36396 #include "rtc-core.h"
36397
36398 static dev_t rtc_devt;
36399 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36400 if (copy_from_user(&tm, uarg, sizeof(tm)))
36401 return -EFAULT;
36402
36403 + gr_log_timechange();
36404 +
36405 return rtc_set_time(rtc, &tm);
36406
36407 case RTC_PIE_ON:
36408 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36409 index ffb5878..e6d785c 100644
36410 --- a/drivers/scsi/aacraid/aacraid.h
36411 +++ b/drivers/scsi/aacraid/aacraid.h
36412 @@ -492,7 +492,7 @@ struct adapter_ops
36413 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36414 /* Administrative operations */
36415 int (*adapter_comm)(struct aac_dev * dev, int comm);
36416 -};
36417 +} __no_const;
36418
36419 /*
36420 * Define which interrupt handler needs to be installed
36421 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36422 index 705e13e..91c873c 100644
36423 --- a/drivers/scsi/aacraid/linit.c
36424 +++ b/drivers/scsi/aacraid/linit.c
36425 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36426 #elif defined(__devinitconst)
36427 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36428 #else
36429 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36430 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36431 #endif
36432 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36433 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36434 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36435 index d5ff142..49c0ebb 100644
36436 --- a/drivers/scsi/aic94xx/aic94xx_init.c
36437 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
36438 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36439 .lldd_control_phy = asd_control_phy,
36440 };
36441
36442 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36443 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36444 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36445 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36446 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36447 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36448 index a796de9..1ef20e1 100644
36449 --- a/drivers/scsi/bfa/bfa.h
36450 +++ b/drivers/scsi/bfa/bfa.h
36451 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
36452 u32 *end);
36453 int cpe_vec_q0;
36454 int rme_vec_q0;
36455 -};
36456 +} __no_const;
36457 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36458
36459 struct bfa_faa_cbfn_s {
36460 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36461 index f0f80e2..8ec946b 100644
36462 --- a/drivers/scsi/bfa/bfa_fcpim.c
36463 +++ b/drivers/scsi/bfa/bfa_fcpim.c
36464 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36465
36466 bfa_iotag_attach(fcp);
36467
36468 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36469 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36470 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36471 (fcp->num_itns * sizeof(struct bfa_itn_s));
36472 memset(fcp->itn_arr, 0,
36473 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36474 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36475 {
36476 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36477 - struct bfa_itn_s *itn;
36478 + bfa_itn_s_no_const *itn;
36479
36480 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36481 itn->isr = isr;
36482 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36483 index 36f26da..38a34a8 100644
36484 --- a/drivers/scsi/bfa/bfa_fcpim.h
36485 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36486 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36487 struct bfa_itn_s {
36488 bfa_isr_func_t isr;
36489 };
36490 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36491
36492 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36493 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36494 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36495 struct list_head iotag_tio_free_q; /* free IO resources */
36496 struct list_head iotag_unused_q; /* unused IO resources*/
36497 struct bfa_iotag_s *iotag_arr;
36498 - struct bfa_itn_s *itn_arr;
36499 + bfa_itn_s_no_const *itn_arr;
36500 int num_ioim_reqs;
36501 int num_fwtio_reqs;
36502 int num_itns;
36503 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36504 index 546d46b..642fa5b 100644
36505 --- a/drivers/scsi/bfa/bfa_ioc.h
36506 +++ b/drivers/scsi/bfa/bfa_ioc.h
36507 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36508 bfa_ioc_disable_cbfn_t disable_cbfn;
36509 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36510 bfa_ioc_reset_cbfn_t reset_cbfn;
36511 -};
36512 +} __no_const;
36513
36514 /*
36515 * IOC event notification mechanism.
36516 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36517 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36518 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36519 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36520 -};
36521 +} __no_const;
36522
36523 /*
36524 * Queue element to wait for room in request queue. FIFO order is
36525 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36526 index 351dc0b..951dc32 100644
36527 --- a/drivers/scsi/hosts.c
36528 +++ b/drivers/scsi/hosts.c
36529 @@ -42,7 +42,7 @@
36530 #include "scsi_logging.h"
36531
36532
36533 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36534 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36535
36536
36537 static void scsi_host_cls_release(struct device *dev)
36538 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36539 * subtract one because we increment first then return, but we need to
36540 * know what the next host number was before increment
36541 */
36542 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36543 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36544 shost->dma_channel = 0xff;
36545
36546 /* These three are default values which can be overridden */
36547 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36548 index b96962c..0c82ec2 100644
36549 --- a/drivers/scsi/hpsa.c
36550 +++ b/drivers/scsi/hpsa.c
36551 @@ -507,7 +507,7 @@ static inline u32 next_command(struct ctlr_info *h)
36552 u32 a;
36553
36554 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36555 - return h->access.command_completed(h);
36556 + return h->access->command_completed(h);
36557
36558 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36559 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36560 @@ -2991,7 +2991,7 @@ static void start_io(struct ctlr_info *h)
36561 while (!list_empty(&h->reqQ)) {
36562 c = list_entry(h->reqQ.next, struct CommandList, list);
36563 /* can't do anything if fifo is full */
36564 - if ((h->access.fifo_full(h))) {
36565 + if ((h->access->fifo_full(h))) {
36566 dev_warn(&h->pdev->dev, "fifo full\n");
36567 break;
36568 }
36569 @@ -3001,7 +3001,7 @@ static void start_io(struct ctlr_info *h)
36570 h->Qdepth--;
36571
36572 /* Tell the controller execute command */
36573 - h->access.submit_command(h, c);
36574 + h->access->submit_command(h, c);
36575
36576 /* Put job onto the completed Q */
36577 addQ(&h->cmpQ, c);
36578 @@ -3010,17 +3010,17 @@ static void start_io(struct ctlr_info *h)
36579
36580 static inline unsigned long get_next_completion(struct ctlr_info *h)
36581 {
36582 - return h->access.command_completed(h);
36583 + return h->access->command_completed(h);
36584 }
36585
36586 static inline bool interrupt_pending(struct ctlr_info *h)
36587 {
36588 - return h->access.intr_pending(h);
36589 + return h->access->intr_pending(h);
36590 }
36591
36592 static inline long interrupt_not_for_us(struct ctlr_info *h)
36593 {
36594 - return (h->access.intr_pending(h) == 0) ||
36595 + return (h->access->intr_pending(h) == 0) ||
36596 (h->interrupts_enabled == 0);
36597 }
36598
36599 @@ -3919,7 +3919,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36600 if (prod_index < 0)
36601 return -ENODEV;
36602 h->product_name = products[prod_index].product_name;
36603 - h->access = *(products[prod_index].access);
36604 + h->access = products[prod_index].access;
36605
36606 if (hpsa_board_disabled(h->pdev)) {
36607 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36608 @@ -4164,7 +4164,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36609
36610 assert_spin_locked(&lockup_detector_lock);
36611 remove_ctlr_from_lockup_detector_list(h);
36612 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36613 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36614 spin_lock_irqsave(&h->lock, flags);
36615 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36616 spin_unlock_irqrestore(&h->lock, flags);
36617 @@ -4344,7 +4344,7 @@ reinit_after_soft_reset:
36618 }
36619
36620 /* make sure the board interrupts are off */
36621 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36622 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36623
36624 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36625 goto clean2;
36626 @@ -4378,7 +4378,7 @@ reinit_after_soft_reset:
36627 * fake ones to scoop up any residual completions.
36628 */
36629 spin_lock_irqsave(&h->lock, flags);
36630 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36631 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36632 spin_unlock_irqrestore(&h->lock, flags);
36633 free_irq(h->intr[h->intr_mode], h);
36634 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36635 @@ -4397,9 +4397,9 @@ reinit_after_soft_reset:
36636 dev_info(&h->pdev->dev, "Board READY.\n");
36637 dev_info(&h->pdev->dev,
36638 "Waiting for stale completions to drain.\n");
36639 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36640 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36641 msleep(10000);
36642 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36643 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36644
36645 rc = controller_reset_failed(h->cfgtable);
36646 if (rc)
36647 @@ -4420,7 +4420,7 @@ reinit_after_soft_reset:
36648 }
36649
36650 /* Turn the interrupts on so we can service requests */
36651 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36652 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36653
36654 hpsa_hba_inquiry(h);
36655 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36656 @@ -4472,7 +4472,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36657 * To write all data in the battery backed cache to disks
36658 */
36659 hpsa_flush_cache(h);
36660 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36661 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36662 free_irq(h->intr[h->intr_mode], h);
36663 #ifdef CONFIG_PCI_MSI
36664 if (h->msix_vector)
36665 @@ -4636,7 +4636,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36666 return;
36667 }
36668 /* Change the access methods to the performant access methods */
36669 - h->access = SA5_performant_access;
36670 + h->access = &SA5_performant_access;
36671 h->transMethod = CFGTBL_Trans_Performant;
36672 }
36673
36674 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36675 index 91edafb..a9b88ec 100644
36676 --- a/drivers/scsi/hpsa.h
36677 +++ b/drivers/scsi/hpsa.h
36678 @@ -73,7 +73,7 @@ struct ctlr_info {
36679 unsigned int msix_vector;
36680 unsigned int msi_vector;
36681 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36682 - struct access_method access;
36683 + struct access_method *access;
36684
36685 /* queue and queue Info */
36686 struct list_head reqQ;
36687 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36688 index f2df059..a3a9930 100644
36689 --- a/drivers/scsi/ips.h
36690 +++ b/drivers/scsi/ips.h
36691 @@ -1027,7 +1027,7 @@ typedef struct {
36692 int (*intr)(struct ips_ha *);
36693 void (*enableint)(struct ips_ha *);
36694 uint32_t (*statupd)(struct ips_ha *);
36695 -} ips_hw_func_t;
36696 +} __no_const ips_hw_func_t;
36697
36698 typedef struct ips_ha {
36699 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36700 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36701 index 4d70d96..84d0573 100644
36702 --- a/drivers/scsi/libfc/fc_exch.c
36703 +++ b/drivers/scsi/libfc/fc_exch.c
36704 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36705 * all together if not used XXX
36706 */
36707 struct {
36708 - atomic_t no_free_exch;
36709 - atomic_t no_free_exch_xid;
36710 - atomic_t xid_not_found;
36711 - atomic_t xid_busy;
36712 - atomic_t seq_not_found;
36713 - atomic_t non_bls_resp;
36714 + atomic_unchecked_t no_free_exch;
36715 + atomic_unchecked_t no_free_exch_xid;
36716 + atomic_unchecked_t xid_not_found;
36717 + atomic_unchecked_t xid_busy;
36718 + atomic_unchecked_t seq_not_found;
36719 + atomic_unchecked_t non_bls_resp;
36720 } stats;
36721 };
36722
36723 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36724 /* allocate memory for exchange */
36725 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36726 if (!ep) {
36727 - atomic_inc(&mp->stats.no_free_exch);
36728 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36729 goto out;
36730 }
36731 memset(ep, 0, sizeof(*ep));
36732 @@ -780,7 +780,7 @@ out:
36733 return ep;
36734 err:
36735 spin_unlock_bh(&pool->lock);
36736 - atomic_inc(&mp->stats.no_free_exch_xid);
36737 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36738 mempool_free(ep, mp->ep_pool);
36739 return NULL;
36740 }
36741 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36742 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36743 ep = fc_exch_find(mp, xid);
36744 if (!ep) {
36745 - atomic_inc(&mp->stats.xid_not_found);
36746 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36747 reject = FC_RJT_OX_ID;
36748 goto out;
36749 }
36750 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36751 ep = fc_exch_find(mp, xid);
36752 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36753 if (ep) {
36754 - atomic_inc(&mp->stats.xid_busy);
36755 + atomic_inc_unchecked(&mp->stats.xid_busy);
36756 reject = FC_RJT_RX_ID;
36757 goto rel;
36758 }
36759 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36760 }
36761 xid = ep->xid; /* get our XID */
36762 } else if (!ep) {
36763 - atomic_inc(&mp->stats.xid_not_found);
36764 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36765 reject = FC_RJT_RX_ID; /* XID not found */
36766 goto out;
36767 }
36768 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36769 } else {
36770 sp = &ep->seq;
36771 if (sp->id != fh->fh_seq_id) {
36772 - atomic_inc(&mp->stats.seq_not_found);
36773 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36774 if (f_ctl & FC_FC_END_SEQ) {
36775 /*
36776 * Update sequence_id based on incoming last
36777 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36778
36779 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36780 if (!ep) {
36781 - atomic_inc(&mp->stats.xid_not_found);
36782 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36783 goto out;
36784 }
36785 if (ep->esb_stat & ESB_ST_COMPLETE) {
36786 - atomic_inc(&mp->stats.xid_not_found);
36787 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36788 goto rel;
36789 }
36790 if (ep->rxid == FC_XID_UNKNOWN)
36791 ep->rxid = ntohs(fh->fh_rx_id);
36792 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36793 - atomic_inc(&mp->stats.xid_not_found);
36794 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36795 goto rel;
36796 }
36797 if (ep->did != ntoh24(fh->fh_s_id) &&
36798 ep->did != FC_FID_FLOGI) {
36799 - atomic_inc(&mp->stats.xid_not_found);
36800 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36801 goto rel;
36802 }
36803 sof = fr_sof(fp);
36804 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36805 sp->ssb_stat |= SSB_ST_RESP;
36806 sp->id = fh->fh_seq_id;
36807 } else if (sp->id != fh->fh_seq_id) {
36808 - atomic_inc(&mp->stats.seq_not_found);
36809 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36810 goto rel;
36811 }
36812
36813 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36814 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36815
36816 if (!sp)
36817 - atomic_inc(&mp->stats.xid_not_found);
36818 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36819 else
36820 - atomic_inc(&mp->stats.non_bls_resp);
36821 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36822
36823 fc_frame_free(fp);
36824 }
36825 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36826 index db9238f..4378ed2 100644
36827 --- a/drivers/scsi/libsas/sas_ata.c
36828 +++ b/drivers/scsi/libsas/sas_ata.c
36829 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
36830 .postreset = ata_std_postreset,
36831 .error_handler = ata_std_error_handler,
36832 .post_internal_cmd = sas_ata_post_internal,
36833 - .qc_defer = ata_std_qc_defer,
36834 + .qc_defer = ata_std_qc_defer,
36835 .qc_prep = ata_noop_qc_prep,
36836 .qc_issue = sas_ata_qc_issue,
36837 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36838 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36839 index 825f930..ce42672 100644
36840 --- a/drivers/scsi/lpfc/lpfc.h
36841 +++ b/drivers/scsi/lpfc/lpfc.h
36842 @@ -413,7 +413,7 @@ struct lpfc_vport {
36843 struct dentry *debug_nodelist;
36844 struct dentry *vport_debugfs_root;
36845 struct lpfc_debugfs_trc *disc_trc;
36846 - atomic_t disc_trc_cnt;
36847 + atomic_unchecked_t disc_trc_cnt;
36848 #endif
36849 uint8_t stat_data_enabled;
36850 uint8_t stat_data_blocked;
36851 @@ -821,8 +821,8 @@ struct lpfc_hba {
36852 struct timer_list fabric_block_timer;
36853 unsigned long bit_flags;
36854 #define FABRIC_COMANDS_BLOCKED 0
36855 - atomic_t num_rsrc_err;
36856 - atomic_t num_cmd_success;
36857 + atomic_unchecked_t num_rsrc_err;
36858 + atomic_unchecked_t num_cmd_success;
36859 unsigned long last_rsrc_error_time;
36860 unsigned long last_ramp_down_time;
36861 unsigned long last_ramp_up_time;
36862 @@ -852,7 +852,7 @@ struct lpfc_hba {
36863
36864 struct dentry *debug_slow_ring_trc;
36865 struct lpfc_debugfs_trc *slow_ring_trc;
36866 - atomic_t slow_ring_trc_cnt;
36867 + atomic_unchecked_t slow_ring_trc_cnt;
36868 /* iDiag debugfs sub-directory */
36869 struct dentry *idiag_root;
36870 struct dentry *idiag_pci_cfg;
36871 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36872 index 3587a3f..d45b81b 100644
36873 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36874 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36875 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36876
36877 #include <linux/debugfs.h>
36878
36879 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36880 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36881 static unsigned long lpfc_debugfs_start_time = 0L;
36882
36883 /* iDiag */
36884 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36885 lpfc_debugfs_enable = 0;
36886
36887 len = 0;
36888 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36889 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36890 (lpfc_debugfs_max_disc_trc - 1);
36891 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36892 dtp = vport->disc_trc + i;
36893 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36894 lpfc_debugfs_enable = 0;
36895
36896 len = 0;
36897 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36898 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36899 (lpfc_debugfs_max_slow_ring_trc - 1);
36900 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36901 dtp = phba->slow_ring_trc + i;
36902 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36903 !vport || !vport->disc_trc)
36904 return;
36905
36906 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36907 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36908 (lpfc_debugfs_max_disc_trc - 1);
36909 dtp = vport->disc_trc + index;
36910 dtp->fmt = fmt;
36911 dtp->data1 = data1;
36912 dtp->data2 = data2;
36913 dtp->data3 = data3;
36914 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36915 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36916 dtp->jif = jiffies;
36917 #endif
36918 return;
36919 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36920 !phba || !phba->slow_ring_trc)
36921 return;
36922
36923 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36924 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36925 (lpfc_debugfs_max_slow_ring_trc - 1);
36926 dtp = phba->slow_ring_trc + index;
36927 dtp->fmt = fmt;
36928 dtp->data1 = data1;
36929 dtp->data2 = data2;
36930 dtp->data3 = data3;
36931 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36932 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36933 dtp->jif = jiffies;
36934 #endif
36935 return;
36936 @@ -4040,7 +4040,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36937 "slow_ring buffer\n");
36938 goto debug_failed;
36939 }
36940 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36941 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36942 memset(phba->slow_ring_trc, 0,
36943 (sizeof(struct lpfc_debugfs_trc) *
36944 lpfc_debugfs_max_slow_ring_trc));
36945 @@ -4086,7 +4086,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36946 "buffer\n");
36947 goto debug_failed;
36948 }
36949 - atomic_set(&vport->disc_trc_cnt, 0);
36950 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36951
36952 snprintf(name, sizeof(name), "discovery_trace");
36953 vport->debug_disc_trc =
36954 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36955 index dfea2da..8e17227 100644
36956 --- a/drivers/scsi/lpfc/lpfc_init.c
36957 +++ b/drivers/scsi/lpfc/lpfc_init.c
36958 @@ -10145,8 +10145,10 @@ lpfc_init(void)
36959 printk(LPFC_COPYRIGHT "\n");
36960
36961 if (lpfc_enable_npiv) {
36962 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36963 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36964 + pax_open_kernel();
36965 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36966 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36967 + pax_close_kernel();
36968 }
36969 lpfc_transport_template =
36970 fc_attach_transport(&lpfc_transport_functions);
36971 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36972 index c60f5d0..751535c 100644
36973 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36974 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36975 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36976 uint32_t evt_posted;
36977
36978 spin_lock_irqsave(&phba->hbalock, flags);
36979 - atomic_inc(&phba->num_rsrc_err);
36980 + atomic_inc_unchecked(&phba->num_rsrc_err);
36981 phba->last_rsrc_error_time = jiffies;
36982
36983 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36984 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36985 unsigned long flags;
36986 struct lpfc_hba *phba = vport->phba;
36987 uint32_t evt_posted;
36988 - atomic_inc(&phba->num_cmd_success);
36989 + atomic_inc_unchecked(&phba->num_cmd_success);
36990
36991 if (vport->cfg_lun_queue_depth <= queue_depth)
36992 return;
36993 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36994 unsigned long num_rsrc_err, num_cmd_success;
36995 int i;
36996
36997 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36998 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36999 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37000 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37001
37002 vports = lpfc_create_vport_work_array(phba);
37003 if (vports != NULL)
37004 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37005 }
37006 }
37007 lpfc_destroy_vport_work_array(phba, vports);
37008 - atomic_set(&phba->num_rsrc_err, 0);
37009 - atomic_set(&phba->num_cmd_success, 0);
37010 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37011 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37012 }
37013
37014 /**
37015 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
37016 }
37017 }
37018 lpfc_destroy_vport_work_array(phba, vports);
37019 - atomic_set(&phba->num_rsrc_err, 0);
37020 - atomic_set(&phba->num_cmd_success, 0);
37021 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37022 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37023 }
37024
37025 /**
37026 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
37027 index ea8a0b4..812a124 100644
37028 --- a/drivers/scsi/pmcraid.c
37029 +++ b/drivers/scsi/pmcraid.c
37030 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
37031 res->scsi_dev = scsi_dev;
37032 scsi_dev->hostdata = res;
37033 res->change_detected = 0;
37034 - atomic_set(&res->read_failures, 0);
37035 - atomic_set(&res->write_failures, 0);
37036 + atomic_set_unchecked(&res->read_failures, 0);
37037 + atomic_set_unchecked(&res->write_failures, 0);
37038 rc = 0;
37039 }
37040 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37041 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
37042
37043 /* If this was a SCSI read/write command keep count of errors */
37044 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37045 - atomic_inc(&res->read_failures);
37046 + atomic_inc_unchecked(&res->read_failures);
37047 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37048 - atomic_inc(&res->write_failures);
37049 + atomic_inc_unchecked(&res->write_failures);
37050
37051 if (!RES_IS_GSCSI(res->cfg_entry) &&
37052 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37053 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
37054 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37055 * hrrq_id assigned here in queuecommand
37056 */
37057 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37058 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37059 pinstance->num_hrrq;
37060 cmd->cmd_done = pmcraid_io_done;
37061
37062 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
37063 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37064 * hrrq_id assigned here in queuecommand
37065 */
37066 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37067 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37068 pinstance->num_hrrq;
37069
37070 if (request_size) {
37071 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
37072
37073 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37074 /* add resources only after host is added into system */
37075 - if (!atomic_read(&pinstance->expose_resources))
37076 + if (!atomic_read_unchecked(&pinstance->expose_resources))
37077 return;
37078
37079 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
37080 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
37081 init_waitqueue_head(&pinstance->reset_wait_q);
37082
37083 atomic_set(&pinstance->outstanding_cmds, 0);
37084 - atomic_set(&pinstance->last_message_id, 0);
37085 - atomic_set(&pinstance->expose_resources, 0);
37086 + atomic_set_unchecked(&pinstance->last_message_id, 0);
37087 + atomic_set_unchecked(&pinstance->expose_resources, 0);
37088
37089 INIT_LIST_HEAD(&pinstance->free_res_q);
37090 INIT_LIST_HEAD(&pinstance->used_res_q);
37091 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
37092 /* Schedule worker thread to handle CCN and take care of adding and
37093 * removing devices to OS
37094 */
37095 - atomic_set(&pinstance->expose_resources, 1);
37096 + atomic_set_unchecked(&pinstance->expose_resources, 1);
37097 schedule_work(&pinstance->worker_q);
37098 return rc;
37099
37100 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
37101 index ca496c7..9c791d5 100644
37102 --- a/drivers/scsi/pmcraid.h
37103 +++ b/drivers/scsi/pmcraid.h
37104 @@ -748,7 +748,7 @@ struct pmcraid_instance {
37105 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
37106
37107 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37108 - atomic_t last_message_id;
37109 + atomic_unchecked_t last_message_id;
37110
37111 /* configuration table */
37112 struct pmcraid_config_table *cfg_table;
37113 @@ -777,7 +777,7 @@ struct pmcraid_instance {
37114 atomic_t outstanding_cmds;
37115
37116 /* should add/delete resources to mid-layer now ?*/
37117 - atomic_t expose_resources;
37118 + atomic_unchecked_t expose_resources;
37119
37120
37121
37122 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37123 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37124 };
37125 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37126 - atomic_t read_failures; /* count of failed READ commands */
37127 - atomic_t write_failures; /* count of failed WRITE commands */
37128 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37129 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37130
37131 /* To indicate add/delete/modify during CCN */
37132 u8 change_detected;
37133 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37134 index af1003f..be55a75 100644
37135 --- a/drivers/scsi/qla2xxx/qla_def.h
37136 +++ b/drivers/scsi/qla2xxx/qla_def.h
37137 @@ -2247,7 +2247,7 @@ struct isp_operations {
37138 int (*start_scsi) (srb_t *);
37139 int (*abort_isp) (struct scsi_qla_host *);
37140 int (*iospace_config)(struct qla_hw_data*);
37141 -};
37142 +} __no_const;
37143
37144 /* MSI-X Support *************************************************************/
37145
37146 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37147 index bfe6854..ceac088 100644
37148 --- a/drivers/scsi/qla4xxx/ql4_def.h
37149 +++ b/drivers/scsi/qla4xxx/ql4_def.h
37150 @@ -261,7 +261,7 @@ struct ddb_entry {
37151 * (4000 only) */
37152 atomic_t relogin_timer; /* Max Time to wait for
37153 * relogin to complete */
37154 - atomic_t relogin_retry_count; /* Num of times relogin has been
37155 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37156 * retried */
37157 uint32_t default_time2wait; /* Default Min time between
37158 * relogins (+aens) */
37159 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37160 index ce6d3b7..73fac54 100644
37161 --- a/drivers/scsi/qla4xxx/ql4_os.c
37162 +++ b/drivers/scsi/qla4xxx/ql4_os.c
37163 @@ -2178,12 +2178,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37164 */
37165 if (!iscsi_is_session_online(cls_sess)) {
37166 /* Reset retry relogin timer */
37167 - atomic_inc(&ddb_entry->relogin_retry_count);
37168 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37169 DEBUG2(ql4_printk(KERN_INFO, ha,
37170 "%s: index[%d] relogin timed out-retrying"
37171 " relogin (%d), retry (%d)\n", __func__,
37172 ddb_entry->fw_ddb_index,
37173 - atomic_read(&ddb_entry->relogin_retry_count),
37174 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37175 ddb_entry->default_time2wait + 4));
37176 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37177 atomic_set(&ddb_entry->retry_relogin_timer,
37178 @@ -3953,7 +3953,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37179
37180 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37181 atomic_set(&ddb_entry->relogin_timer, 0);
37182 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37183 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37184 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37185 ddb_entry->default_relogin_timeout =
37186 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37187 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37188 index 2aeb2e9..46e3925 100644
37189 --- a/drivers/scsi/scsi.c
37190 +++ b/drivers/scsi/scsi.c
37191 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37192 unsigned long timeout;
37193 int rtn = 0;
37194
37195 - atomic_inc(&cmd->device->iorequest_cnt);
37196 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37197
37198 /* check if the device is still usable */
37199 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37200 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37201 index b2c95db..227d74e 100644
37202 --- a/drivers/scsi/scsi_lib.c
37203 +++ b/drivers/scsi/scsi_lib.c
37204 @@ -1411,7 +1411,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37205 shost = sdev->host;
37206 scsi_init_cmd_errh(cmd);
37207 cmd->result = DID_NO_CONNECT << 16;
37208 - atomic_inc(&cmd->device->iorequest_cnt);
37209 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37210
37211 /*
37212 * SCSI request completion path will do scsi_device_unbusy(),
37213 @@ -1437,9 +1437,9 @@ static void scsi_softirq_done(struct request *rq)
37214
37215 INIT_LIST_HEAD(&cmd->eh_entry);
37216
37217 - atomic_inc(&cmd->device->iodone_cnt);
37218 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37219 if (cmd->result)
37220 - atomic_inc(&cmd->device->ioerr_cnt);
37221 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37222
37223 disposition = scsi_decide_disposition(cmd);
37224 if (disposition != SUCCESS &&
37225 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37226 index 04c2a27..9d8bd66 100644
37227 --- a/drivers/scsi/scsi_sysfs.c
37228 +++ b/drivers/scsi/scsi_sysfs.c
37229 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37230 char *buf) \
37231 { \
37232 struct scsi_device *sdev = to_scsi_device(dev); \
37233 - unsigned long long count = atomic_read(&sdev->field); \
37234 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37235 return snprintf(buf, 20, "0x%llx\n", count); \
37236 } \
37237 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37238 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37239 index 84a1fdf..693b0d6 100644
37240 --- a/drivers/scsi/scsi_tgt_lib.c
37241 +++ b/drivers/scsi/scsi_tgt_lib.c
37242 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37243 int err;
37244
37245 dprintk("%lx %u\n", uaddr, len);
37246 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37247 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37248 if (err) {
37249 /*
37250 * TODO: need to fixup sg_tablesize, max_segment_size,
37251 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37252 index f59d4a0..1d89407 100644
37253 --- a/drivers/scsi/scsi_transport_fc.c
37254 +++ b/drivers/scsi/scsi_transport_fc.c
37255 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37256 * Netlink Infrastructure
37257 */
37258
37259 -static atomic_t fc_event_seq;
37260 +static atomic_unchecked_t fc_event_seq;
37261
37262 /**
37263 * fc_get_event_number - Obtain the next sequential FC event number
37264 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
37265 u32
37266 fc_get_event_number(void)
37267 {
37268 - return atomic_add_return(1, &fc_event_seq);
37269 + return atomic_add_return_unchecked(1, &fc_event_seq);
37270 }
37271 EXPORT_SYMBOL(fc_get_event_number);
37272
37273 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
37274 {
37275 int error;
37276
37277 - atomic_set(&fc_event_seq, 0);
37278 + atomic_set_unchecked(&fc_event_seq, 0);
37279
37280 error = transport_class_register(&fc_host_class);
37281 if (error)
37282 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37283 char *cp;
37284
37285 *val = simple_strtoul(buf, &cp, 0);
37286 - if ((*cp && (*cp != '\n')) || (*val < 0))
37287 + if (*cp && (*cp != '\n'))
37288 return -EINVAL;
37289 /*
37290 * Check for overflow; dev_loss_tmo is u32
37291 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37292 index e3e3c7d..ebdab62 100644
37293 --- a/drivers/scsi/scsi_transport_iscsi.c
37294 +++ b/drivers/scsi/scsi_transport_iscsi.c
37295 @@ -79,7 +79,7 @@ struct iscsi_internal {
37296 struct transport_container session_cont;
37297 };
37298
37299 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37300 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37301 static struct workqueue_struct *iscsi_eh_timer_workq;
37302
37303 static DEFINE_IDA(iscsi_sess_ida);
37304 @@ -1063,7 +1063,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37305 int err;
37306
37307 ihost = shost->shost_data;
37308 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37309 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37310
37311 if (target_id == ISCSI_MAX_TARGET) {
37312 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37313 @@ -2680,7 +2680,7 @@ static __init int iscsi_transport_init(void)
37314 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37315 ISCSI_TRANSPORT_VERSION);
37316
37317 - atomic_set(&iscsi_session_nr, 0);
37318 + atomic_set_unchecked(&iscsi_session_nr, 0);
37319
37320 err = class_register(&iscsi_transport_class);
37321 if (err)
37322 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37323 index 21a045e..ec89e03 100644
37324 --- a/drivers/scsi/scsi_transport_srp.c
37325 +++ b/drivers/scsi/scsi_transport_srp.c
37326 @@ -33,7 +33,7 @@
37327 #include "scsi_transport_srp_internal.h"
37328
37329 struct srp_host_attrs {
37330 - atomic_t next_port_id;
37331 + atomic_unchecked_t next_port_id;
37332 };
37333 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37334
37335 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37336 struct Scsi_Host *shost = dev_to_shost(dev);
37337 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37338
37339 - atomic_set(&srp_host->next_port_id, 0);
37340 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37341 return 0;
37342 }
37343
37344 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37345 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37346 rport->roles = ids->roles;
37347
37348 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37349 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37350 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37351
37352 transport_setup_device(&rport->dev);
37353 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37354 index eacd46b..e3f4d62 100644
37355 --- a/drivers/scsi/sg.c
37356 +++ b/drivers/scsi/sg.c
37357 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37358 sdp->disk->disk_name,
37359 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37360 NULL,
37361 - (char *)arg);
37362 + (char __user *)arg);
37363 case BLKTRACESTART:
37364 return blk_trace_startstop(sdp->device->request_queue, 1);
37365 case BLKTRACESTOP:
37366 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
37367 const struct file_operations * fops;
37368 };
37369
37370 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37371 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37372 {"allow_dio", &adio_fops},
37373 {"debug", &debug_fops},
37374 {"def_reserved_size", &dressz_fops},
37375 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
37376 if (!sg_proc_sgp)
37377 return 1;
37378 for (k = 0; k < num_leaves; ++k) {
37379 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37380 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37381 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
37382 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
37383 }
37384 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
37385 index f64250e..1ee3049 100644
37386 --- a/drivers/spi/spi-dw-pci.c
37387 +++ b/drivers/spi/spi-dw-pci.c
37388 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
37389 #define spi_resume NULL
37390 #endif
37391
37392 -static const struct pci_device_id pci_ids[] __devinitdata = {
37393 +static const struct pci_device_id pci_ids[] __devinitconst = {
37394 /* Intel MID platform SPI controller 0 */
37395 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
37396 {},
37397 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37398 index b2ccdea..84cde75 100644
37399 --- a/drivers/spi/spi.c
37400 +++ b/drivers/spi/spi.c
37401 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
37402 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37403
37404 /* portable code must never pass more than 32 bytes */
37405 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37406 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37407
37408 static u8 *buf;
37409
37410 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37411 index 400df8c..065d4f4 100644
37412 --- a/drivers/staging/octeon/ethernet-rx.c
37413 +++ b/drivers/staging/octeon/ethernet-rx.c
37414 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37415 /* Increment RX stats for virtual ports */
37416 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37417 #ifdef CONFIG_64BIT
37418 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37419 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37420 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37421 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37422 #else
37423 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37424 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37425 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37426 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37427 #endif
37428 }
37429 netif_receive_skb(skb);
37430 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37431 dev->name);
37432 */
37433 #ifdef CONFIG_64BIT
37434 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37435 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37436 #else
37437 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37438 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37439 #endif
37440 dev_kfree_skb_irq(skb);
37441 }
37442 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37443 index 9112cd8..92f8d51 100644
37444 --- a/drivers/staging/octeon/ethernet.c
37445 +++ b/drivers/staging/octeon/ethernet.c
37446 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37447 * since the RX tasklet also increments it.
37448 */
37449 #ifdef CONFIG_64BIT
37450 - atomic64_add(rx_status.dropped_packets,
37451 - (atomic64_t *)&priv->stats.rx_dropped);
37452 + atomic64_add_unchecked(rx_status.dropped_packets,
37453 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37454 #else
37455 - atomic_add(rx_status.dropped_packets,
37456 - (atomic_t *)&priv->stats.rx_dropped);
37457 + atomic_add_unchecked(rx_status.dropped_packets,
37458 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37459 #endif
37460 }
37461
37462 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37463 index 86308a0..feaa925 100644
37464 --- a/drivers/staging/rtl8712/rtl871x_io.h
37465 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37466 @@ -108,7 +108,7 @@ struct _io_ops {
37467 u8 *pmem);
37468 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37469 u8 *pmem);
37470 -};
37471 +} __no_const;
37472
37473 struct io_req {
37474 struct list_head list;
37475 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37476 index c7b5e8b..783d6cb 100644
37477 --- a/drivers/staging/sbe-2t3e3/netdev.c
37478 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37479 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37480 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37481
37482 if (rlen)
37483 - if (copy_to_user(data, &resp, rlen))
37484 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37485 return -EFAULT;
37486
37487 return 0;
37488 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37489 index 42cdafe..2769103 100644
37490 --- a/drivers/staging/speakup/speakup_soft.c
37491 +++ b/drivers/staging/speakup/speakup_soft.c
37492 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37493 break;
37494 } else if (!initialized) {
37495 if (*init) {
37496 - ch = *init;
37497 init++;
37498 } else {
37499 initialized = 1;
37500 }
37501 + ch = *init;
37502 } else {
37503 ch = synth_buffer_getc();
37504 }
37505 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37506 index b8f8c48..1fc5025 100644
37507 --- a/drivers/staging/usbip/usbip_common.h
37508 +++ b/drivers/staging/usbip/usbip_common.h
37509 @@ -289,7 +289,7 @@ struct usbip_device {
37510 void (*shutdown)(struct usbip_device *);
37511 void (*reset)(struct usbip_device *);
37512 void (*unusable)(struct usbip_device *);
37513 - } eh_ops;
37514 + } __no_const eh_ops;
37515 };
37516
37517 /* usbip_common.c */
37518 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37519 index 88b3298..3783eee 100644
37520 --- a/drivers/staging/usbip/vhci.h
37521 +++ b/drivers/staging/usbip/vhci.h
37522 @@ -88,7 +88,7 @@ struct vhci_hcd {
37523 unsigned resuming:1;
37524 unsigned long re_timeout;
37525
37526 - atomic_t seqnum;
37527 + atomic_unchecked_t seqnum;
37528
37529 /*
37530 * NOTE:
37531 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37532 index 2ee97e2..0420b86 100644
37533 --- a/drivers/staging/usbip/vhci_hcd.c
37534 +++ b/drivers/staging/usbip/vhci_hcd.c
37535 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
37536 return;
37537 }
37538
37539 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37540 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37541 if (priv->seqnum == 0xffff)
37542 dev_info(&urb->dev->dev, "seqnum max\n");
37543
37544 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37545 return -ENOMEM;
37546 }
37547
37548 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37549 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37550 if (unlink->seqnum == 0xffff)
37551 pr_info("seqnum max\n");
37552
37553 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
37554 vdev->rhport = rhport;
37555 }
37556
37557 - atomic_set(&vhci->seqnum, 0);
37558 + atomic_set_unchecked(&vhci->seqnum, 0);
37559 spin_lock_init(&vhci->lock);
37560
37561 hcd->power_budget = 0; /* no limit */
37562 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37563 index 3f511b4..d3dbc1e 100644
37564 --- a/drivers/staging/usbip/vhci_rx.c
37565 +++ b/drivers/staging/usbip/vhci_rx.c
37566 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37567 if (!urb) {
37568 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37569 pr_info("max seqnum %d\n",
37570 - atomic_read(&the_controller->seqnum));
37571 + atomic_read_unchecked(&the_controller->seqnum));
37572 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37573 return;
37574 }
37575 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37576 index 7735027..30eed13 100644
37577 --- a/drivers/staging/vt6655/hostap.c
37578 +++ b/drivers/staging/vt6655/hostap.c
37579 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37580 *
37581 */
37582
37583 +static net_device_ops_no_const apdev_netdev_ops;
37584 +
37585 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37586 {
37587 PSDevice apdev_priv;
37588 struct net_device *dev = pDevice->dev;
37589 int ret;
37590 - const struct net_device_ops apdev_netdev_ops = {
37591 - .ndo_start_xmit = pDevice->tx_80211,
37592 - };
37593
37594 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37595
37596 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37597 *apdev_priv = *pDevice;
37598 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37599
37600 + /* only half broken now */
37601 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37602 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37603
37604 pDevice->apdev->type = ARPHRD_IEEE80211;
37605 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37606 index 51b5adf..098e320 100644
37607 --- a/drivers/staging/vt6656/hostap.c
37608 +++ b/drivers/staging/vt6656/hostap.c
37609 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37610 *
37611 */
37612
37613 +static net_device_ops_no_const apdev_netdev_ops;
37614 +
37615 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37616 {
37617 PSDevice apdev_priv;
37618 struct net_device *dev = pDevice->dev;
37619 int ret;
37620 - const struct net_device_ops apdev_netdev_ops = {
37621 - .ndo_start_xmit = pDevice->tx_80211,
37622 - };
37623
37624 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37625
37626 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37627 *apdev_priv = *pDevice;
37628 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37629
37630 + /* only half broken now */
37631 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37632 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37633
37634 pDevice->apdev->type = ARPHRD_IEEE80211;
37635 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37636 index 7843dfd..3db105f 100644
37637 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37638 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37639 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37640
37641 struct usbctlx_completor {
37642 int (*complete) (struct usbctlx_completor *);
37643 -};
37644 +} __no_const;
37645
37646 static int
37647 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37648 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37649 index 1ca66ea..76f1343 100644
37650 --- a/drivers/staging/zcache/tmem.c
37651 +++ b/drivers/staging/zcache/tmem.c
37652 @@ -39,7 +39,7 @@
37653 * A tmem host implementation must use this function to register callbacks
37654 * for memory allocation.
37655 */
37656 -static struct tmem_hostops tmem_hostops;
37657 +static tmem_hostops_no_const tmem_hostops;
37658
37659 static void tmem_objnode_tree_init(void);
37660
37661 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37662 * A tmem host implementation must use this function to register
37663 * callbacks for a page-accessible memory (PAM) implementation
37664 */
37665 -static struct tmem_pamops tmem_pamops;
37666 +static tmem_pamops_no_const tmem_pamops;
37667
37668 void tmem_register_pamops(struct tmem_pamops *m)
37669 {
37670 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37671 index ed147c4..94fc3c6 100644
37672 --- a/drivers/staging/zcache/tmem.h
37673 +++ b/drivers/staging/zcache/tmem.h
37674 @@ -180,6 +180,7 @@ struct tmem_pamops {
37675 void (*new_obj)(struct tmem_obj *);
37676 int (*replace_in_obj)(void *, struct tmem_obj *);
37677 };
37678 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37679 extern void tmem_register_pamops(struct tmem_pamops *m);
37680
37681 /* memory allocation methods provided by the host implementation */
37682 @@ -189,6 +190,7 @@ struct tmem_hostops {
37683 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37684 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37685 };
37686 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37687 extern void tmem_register_hostops(struct tmem_hostops *m);
37688
37689 /* core tmem accessor functions */
37690 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
37691 index 97c74ee..7f6d77d 100644
37692 --- a/drivers/target/iscsi/iscsi_target.c
37693 +++ b/drivers/target/iscsi/iscsi_target.c
37694 @@ -1361,7 +1361,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
37695 * outstanding_r2ts reaches zero, go ahead and send the delayed
37696 * TASK_ABORTED status.
37697 */
37698 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
37699 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
37700 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
37701 if (--cmd->outstanding_r2ts < 1) {
37702 iscsit_stop_dataout_timer(cmd);
37703 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37704 index dcb0618..97e3d85 100644
37705 --- a/drivers/target/target_core_tmr.c
37706 +++ b/drivers/target/target_core_tmr.c
37707 @@ -260,7 +260,7 @@ static void core_tmr_drain_task_list(
37708 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37709 cmd->t_task_list_num,
37710 atomic_read(&cmd->t_task_cdbs_left),
37711 - atomic_read(&cmd->t_task_cdbs_sent),
37712 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37713 atomic_read(&cmd->t_transport_active),
37714 atomic_read(&cmd->t_transport_stop),
37715 atomic_read(&cmd->t_transport_sent));
37716 @@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
37717 pr_debug("LUN_RESET: got t_transport_active = 1 for"
37718 " task: %p, t_fe_count: %d dev: %p\n", task,
37719 fe_count, dev);
37720 - atomic_set(&cmd->t_transport_aborted, 1);
37721 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37722 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37723
37724 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37725 @@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
37726 }
37727 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
37728 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
37729 - atomic_set(&cmd->t_transport_aborted, 1);
37730 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37731 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37732
37733 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37734 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37735 index cd5cd95..5249d30 100644
37736 --- a/drivers/target/target_core_transport.c
37737 +++ b/drivers/target/target_core_transport.c
37738 @@ -1330,7 +1330,7 @@ struct se_device *transport_add_device_to_core_hba(
37739 spin_lock_init(&dev->se_port_lock);
37740 spin_lock_init(&dev->se_tmr_lock);
37741 spin_lock_init(&dev->qf_cmd_lock);
37742 - atomic_set(&dev->dev_ordered_id, 0);
37743 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37744
37745 se_dev_set_default_attribs(dev, dev_limits);
37746
37747 @@ -1517,7 +1517,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37748 * Used to determine when ORDERED commands should go from
37749 * Dormant to Active status.
37750 */
37751 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37752 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37753 smp_mb__after_atomic_inc();
37754 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37755 cmd->se_ordered_id, cmd->sam_task_attr,
37756 @@ -1862,7 +1862,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
37757 " t_transport_active: %d t_transport_stop: %d"
37758 " t_transport_sent: %d\n", cmd->t_task_list_num,
37759 atomic_read(&cmd->t_task_cdbs_left),
37760 - atomic_read(&cmd->t_task_cdbs_sent),
37761 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37762 atomic_read(&cmd->t_task_cdbs_ex_left),
37763 atomic_read(&cmd->t_transport_active),
37764 atomic_read(&cmd->t_transport_stop),
37765 @@ -2121,9 +2121,9 @@ check_depth:
37766 cmd = task->task_se_cmd;
37767 spin_lock_irqsave(&cmd->t_state_lock, flags);
37768 task->task_flags |= (TF_ACTIVE | TF_SENT);
37769 - atomic_inc(&cmd->t_task_cdbs_sent);
37770 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37771
37772 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37773 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37774 cmd->t_task_list_num)
37775 atomic_set(&cmd->t_transport_sent, 1);
37776
37777 @@ -4348,7 +4348,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
37778 atomic_set(&cmd->transport_lun_stop, 0);
37779 }
37780 if (!atomic_read(&cmd->t_transport_active) ||
37781 - atomic_read(&cmd->t_transport_aborted)) {
37782 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
37783 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37784 return false;
37785 }
37786 @@ -4597,7 +4597,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
37787 {
37788 int ret = 0;
37789
37790 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
37791 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
37792 if (!send_status ||
37793 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
37794 return 1;
37795 @@ -4634,7 +4634,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
37796 */
37797 if (cmd->data_direction == DMA_TO_DEVICE) {
37798 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
37799 - atomic_inc(&cmd->t_transport_aborted);
37800 + atomic_inc_unchecked(&cmd->t_transport_aborted);
37801 smp_mb__after_atomic_inc();
37802 }
37803 }
37804 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37805 index b9040be..e3f5aab 100644
37806 --- a/drivers/tty/hvc/hvcs.c
37807 +++ b/drivers/tty/hvc/hvcs.c
37808 @@ -83,6 +83,7 @@
37809 #include <asm/hvcserver.h>
37810 #include <asm/uaccess.h>
37811 #include <asm/vio.h>
37812 +#include <asm/local.h>
37813
37814 /*
37815 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37816 @@ -270,7 +271,7 @@ struct hvcs_struct {
37817 unsigned int index;
37818
37819 struct tty_struct *tty;
37820 - int open_count;
37821 + local_t open_count;
37822
37823 /*
37824 * Used to tell the driver kernel_thread what operations need to take
37825 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37826
37827 spin_lock_irqsave(&hvcsd->lock, flags);
37828
37829 - if (hvcsd->open_count > 0) {
37830 + if (local_read(&hvcsd->open_count) > 0) {
37831 spin_unlock_irqrestore(&hvcsd->lock, flags);
37832 printk(KERN_INFO "HVCS: vterm state unchanged. "
37833 "The hvcs device node is still in use.\n");
37834 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37835 if ((retval = hvcs_partner_connect(hvcsd)))
37836 goto error_release;
37837
37838 - hvcsd->open_count = 1;
37839 + local_set(&hvcsd->open_count, 1);
37840 hvcsd->tty = tty;
37841 tty->driver_data = hvcsd;
37842
37843 @@ -1179,7 +1180,7 @@ fast_open:
37844
37845 spin_lock_irqsave(&hvcsd->lock, flags);
37846 kref_get(&hvcsd->kref);
37847 - hvcsd->open_count++;
37848 + local_inc(&hvcsd->open_count);
37849 hvcsd->todo_mask |= HVCS_SCHED_READ;
37850 spin_unlock_irqrestore(&hvcsd->lock, flags);
37851
37852 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37853 hvcsd = tty->driver_data;
37854
37855 spin_lock_irqsave(&hvcsd->lock, flags);
37856 - if (--hvcsd->open_count == 0) {
37857 + if (local_dec_and_test(&hvcsd->open_count)) {
37858
37859 vio_disable_interrupts(hvcsd->vdev);
37860
37861 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37862 free_irq(irq, hvcsd);
37863 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37864 return;
37865 - } else if (hvcsd->open_count < 0) {
37866 + } else if (local_read(&hvcsd->open_count) < 0) {
37867 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37868 " is missmanaged.\n",
37869 - hvcsd->vdev->unit_address, hvcsd->open_count);
37870 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37871 }
37872
37873 spin_unlock_irqrestore(&hvcsd->lock, flags);
37874 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37875
37876 spin_lock_irqsave(&hvcsd->lock, flags);
37877 /* Preserve this so that we know how many kref refs to put */
37878 - temp_open_count = hvcsd->open_count;
37879 + temp_open_count = local_read(&hvcsd->open_count);
37880
37881 /*
37882 * Don't kref put inside the spinlock because the destruction
37883 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37884 hvcsd->tty->driver_data = NULL;
37885 hvcsd->tty = NULL;
37886
37887 - hvcsd->open_count = 0;
37888 + local_set(&hvcsd->open_count, 0);
37889
37890 /* This will drop any buffered data on the floor which is OK in a hangup
37891 * scenario. */
37892 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
37893 * the middle of a write operation? This is a crummy place to do this
37894 * but we want to keep it all in the spinlock.
37895 */
37896 - if (hvcsd->open_count <= 0) {
37897 + if (local_read(&hvcsd->open_count) <= 0) {
37898 spin_unlock_irqrestore(&hvcsd->lock, flags);
37899 return -ENODEV;
37900 }
37901 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37902 {
37903 struct hvcs_struct *hvcsd = tty->driver_data;
37904
37905 - if (!hvcsd || hvcsd->open_count <= 0)
37906 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37907 return 0;
37908
37909 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37910 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37911 index ef92869..f4ebd88 100644
37912 --- a/drivers/tty/ipwireless/tty.c
37913 +++ b/drivers/tty/ipwireless/tty.c
37914 @@ -29,6 +29,7 @@
37915 #include <linux/tty_driver.h>
37916 #include <linux/tty_flip.h>
37917 #include <linux/uaccess.h>
37918 +#include <asm/local.h>
37919
37920 #include "tty.h"
37921 #include "network.h"
37922 @@ -51,7 +52,7 @@ struct ipw_tty {
37923 int tty_type;
37924 struct ipw_network *network;
37925 struct tty_struct *linux_tty;
37926 - int open_count;
37927 + local_t open_count;
37928 unsigned int control_lines;
37929 struct mutex ipw_tty_mutex;
37930 int tx_bytes_queued;
37931 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37932 mutex_unlock(&tty->ipw_tty_mutex);
37933 return -ENODEV;
37934 }
37935 - if (tty->open_count == 0)
37936 + if (local_read(&tty->open_count) == 0)
37937 tty->tx_bytes_queued = 0;
37938
37939 - tty->open_count++;
37940 + local_inc(&tty->open_count);
37941
37942 tty->linux_tty = linux_tty;
37943 linux_tty->driver_data = tty;
37944 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37945
37946 static void do_ipw_close(struct ipw_tty *tty)
37947 {
37948 - tty->open_count--;
37949 -
37950 - if (tty->open_count == 0) {
37951 + if (local_dec_return(&tty->open_count) == 0) {
37952 struct tty_struct *linux_tty = tty->linux_tty;
37953
37954 if (linux_tty != NULL) {
37955 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37956 return;
37957
37958 mutex_lock(&tty->ipw_tty_mutex);
37959 - if (tty->open_count == 0) {
37960 + if (local_read(&tty->open_count) == 0) {
37961 mutex_unlock(&tty->ipw_tty_mutex);
37962 return;
37963 }
37964 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37965 return;
37966 }
37967
37968 - if (!tty->open_count) {
37969 + if (!local_read(&tty->open_count)) {
37970 mutex_unlock(&tty->ipw_tty_mutex);
37971 return;
37972 }
37973 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37974 return -ENODEV;
37975
37976 mutex_lock(&tty->ipw_tty_mutex);
37977 - if (!tty->open_count) {
37978 + if (!local_read(&tty->open_count)) {
37979 mutex_unlock(&tty->ipw_tty_mutex);
37980 return -EINVAL;
37981 }
37982 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37983 if (!tty)
37984 return -ENODEV;
37985
37986 - if (!tty->open_count)
37987 + if (!local_read(&tty->open_count))
37988 return -EINVAL;
37989
37990 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37991 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37992 if (!tty)
37993 return 0;
37994
37995 - if (!tty->open_count)
37996 + if (!local_read(&tty->open_count))
37997 return 0;
37998
37999 return tty->tx_bytes_queued;
38000 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
38001 if (!tty)
38002 return -ENODEV;
38003
38004 - if (!tty->open_count)
38005 + if (!local_read(&tty->open_count))
38006 return -EINVAL;
38007
38008 return get_control_lines(tty);
38009 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
38010 if (!tty)
38011 return -ENODEV;
38012
38013 - if (!tty->open_count)
38014 + if (!local_read(&tty->open_count))
38015 return -EINVAL;
38016
38017 return set_control_lines(tty, set, clear);
38018 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
38019 if (!tty)
38020 return -ENODEV;
38021
38022 - if (!tty->open_count)
38023 + if (!local_read(&tty->open_count))
38024 return -EINVAL;
38025
38026 /* FIXME: Exactly how is the tty object locked here .. */
38027 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
38028 against a parallel ioctl etc */
38029 mutex_lock(&ttyj->ipw_tty_mutex);
38030 }
38031 - while (ttyj->open_count)
38032 + while (local_read(&ttyj->open_count))
38033 do_ipw_close(ttyj);
38034 ipwireless_disassociate_network_ttys(network,
38035 ttyj->channel_idx);
38036 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
38037 index fc7bbba..9527e93 100644
38038 --- a/drivers/tty/n_gsm.c
38039 +++ b/drivers/tty/n_gsm.c
38040 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
38041 kref_init(&dlci->ref);
38042 mutex_init(&dlci->mutex);
38043 dlci->fifo = &dlci->_fifo;
38044 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
38045 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
38046 kfree(dlci);
38047 return NULL;
38048 }
38049 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
38050 index d2256d0..97476fa 100644
38051 --- a/drivers/tty/n_tty.c
38052 +++ b/drivers/tty/n_tty.c
38053 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
38054 {
38055 *ops = tty_ldisc_N_TTY;
38056 ops->owner = NULL;
38057 - ops->refcount = ops->flags = 0;
38058 + atomic_set(&ops->refcount, 0);
38059 + ops->flags = 0;
38060 }
38061 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
38062 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
38063 index d8653ab..f8afd9d 100644
38064 --- a/drivers/tty/pty.c
38065 +++ b/drivers/tty/pty.c
38066 @@ -765,8 +765,10 @@ static void __init unix98_pty_init(void)
38067 register_sysctl_table(pty_root_table);
38068
38069 /* Now create the /dev/ptmx special device */
38070 + pax_open_kernel();
38071 tty_default_fops(&ptmx_fops);
38072 - ptmx_fops.open = ptmx_open;
38073 + *(void **)&ptmx_fops.open = ptmx_open;
38074 + pax_close_kernel();
38075
38076 cdev_init(&ptmx_cdev, &ptmx_fops);
38077 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
38078 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
38079 index 2b42a01..32a2ed3 100644
38080 --- a/drivers/tty/serial/kgdboc.c
38081 +++ b/drivers/tty/serial/kgdboc.c
38082 @@ -24,8 +24,9 @@
38083 #define MAX_CONFIG_LEN 40
38084
38085 static struct kgdb_io kgdboc_io_ops;
38086 +static struct kgdb_io kgdboc_io_ops_console;
38087
38088 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38089 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38090 static int configured = -1;
38091
38092 static char config[MAX_CONFIG_LEN];
38093 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
38094 kgdboc_unregister_kbd();
38095 if (configured == 1)
38096 kgdb_unregister_io_module(&kgdboc_io_ops);
38097 + else if (configured == 2)
38098 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
38099 }
38100
38101 static int configure_kgdboc(void)
38102 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
38103 int err;
38104 char *cptr = config;
38105 struct console *cons;
38106 + int is_console = 0;
38107
38108 err = kgdboc_option_setup(config);
38109 if (err || !strlen(config) || isspace(config[0]))
38110 goto noconfig;
38111
38112 err = -ENODEV;
38113 - kgdboc_io_ops.is_console = 0;
38114 kgdb_tty_driver = NULL;
38115
38116 kgdboc_use_kms = 0;
38117 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
38118 int idx;
38119 if (cons->device && cons->device(cons, &idx) == p &&
38120 idx == tty_line) {
38121 - kgdboc_io_ops.is_console = 1;
38122 + is_console = 1;
38123 break;
38124 }
38125 cons = cons->next;
38126 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38127 kgdb_tty_line = tty_line;
38128
38129 do_register:
38130 - err = kgdb_register_io_module(&kgdboc_io_ops);
38131 + if (is_console) {
38132 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
38133 + configured = 2;
38134 + } else {
38135 + err = kgdb_register_io_module(&kgdboc_io_ops);
38136 + configured = 1;
38137 + }
38138 if (err)
38139 goto noconfig;
38140
38141 - configured = 1;
38142 -
38143 return 0;
38144
38145 noconfig:
38146 @@ -213,7 +220,7 @@ noconfig:
38147 static int __init init_kgdboc(void)
38148 {
38149 /* Already configured? */
38150 - if (configured == 1)
38151 + if (configured >= 1)
38152 return 0;
38153
38154 return configure_kgdboc();
38155 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38156 if (config[len - 1] == '\n')
38157 config[len - 1] = '\0';
38158
38159 - if (configured == 1)
38160 + if (configured >= 1)
38161 cleanup_kgdboc();
38162
38163 /* Go and configure with the new params. */
38164 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38165 .post_exception = kgdboc_post_exp_handler,
38166 };
38167
38168 +static struct kgdb_io kgdboc_io_ops_console = {
38169 + .name = "kgdboc",
38170 + .read_char = kgdboc_get_char,
38171 + .write_char = kgdboc_put_char,
38172 + .pre_exception = kgdboc_pre_exp_handler,
38173 + .post_exception = kgdboc_post_exp_handler,
38174 + .is_console = 1
38175 +};
38176 +
38177 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38178 /* This is only available if kgdboc is a built in for early debugging */
38179 static int __init kgdboc_early_init(char *opt)
38180 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38181 index 7867b7c..b3c119d 100644
38182 --- a/drivers/tty/sysrq.c
38183 +++ b/drivers/tty/sysrq.c
38184 @@ -862,7 +862,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38185 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38186 size_t count, loff_t *ppos)
38187 {
38188 - if (count) {
38189 + if (count && capable(CAP_SYS_ADMIN)) {
38190 char c;
38191
38192 if (get_user(c, buf))
38193 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38194 index e41b9bb..84002fb 100644
38195 --- a/drivers/tty/tty_io.c
38196 +++ b/drivers/tty/tty_io.c
38197 @@ -3291,7 +3291,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38198
38199 void tty_default_fops(struct file_operations *fops)
38200 {
38201 - *fops = tty_fops;
38202 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38203 }
38204
38205 /*
38206 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38207 index 24b95db..9c078d0 100644
38208 --- a/drivers/tty/tty_ldisc.c
38209 +++ b/drivers/tty/tty_ldisc.c
38210 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38211 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38212 struct tty_ldisc_ops *ldo = ld->ops;
38213
38214 - ldo->refcount--;
38215 + atomic_dec(&ldo->refcount);
38216 module_put(ldo->owner);
38217 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38218
38219 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38220 spin_lock_irqsave(&tty_ldisc_lock, flags);
38221 tty_ldiscs[disc] = new_ldisc;
38222 new_ldisc->num = disc;
38223 - new_ldisc->refcount = 0;
38224 + atomic_set(&new_ldisc->refcount, 0);
38225 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38226
38227 return ret;
38228 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
38229 return -EINVAL;
38230
38231 spin_lock_irqsave(&tty_ldisc_lock, flags);
38232 - if (tty_ldiscs[disc]->refcount)
38233 + if (atomic_read(&tty_ldiscs[disc]->refcount))
38234 ret = -EBUSY;
38235 else
38236 tty_ldiscs[disc] = NULL;
38237 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38238 if (ldops) {
38239 ret = ERR_PTR(-EAGAIN);
38240 if (try_module_get(ldops->owner)) {
38241 - ldops->refcount++;
38242 + atomic_inc(&ldops->refcount);
38243 ret = ldops;
38244 }
38245 }
38246 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38247 unsigned long flags;
38248
38249 spin_lock_irqsave(&tty_ldisc_lock, flags);
38250 - ldops->refcount--;
38251 + atomic_dec(&ldops->refcount);
38252 module_put(ldops->owner);
38253 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38254 }
38255 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38256 index a605549..6bd3c96 100644
38257 --- a/drivers/tty/vt/keyboard.c
38258 +++ b/drivers/tty/vt/keyboard.c
38259 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38260 kbd->kbdmode == VC_OFF) &&
38261 value != KVAL(K_SAK))
38262 return; /* SAK is allowed even in raw mode */
38263 +
38264 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38265 + {
38266 + void *func = fn_handler[value];
38267 + if (func == fn_show_state || func == fn_show_ptregs ||
38268 + func == fn_show_mem)
38269 + return;
38270 + }
38271 +#endif
38272 +
38273 fn_handler[value](vc);
38274 }
38275
38276 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
38277 index 65447c5..0526f0a 100644
38278 --- a/drivers/tty/vt/vt_ioctl.c
38279 +++ b/drivers/tty/vt/vt_ioctl.c
38280 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38281 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38282 return -EFAULT;
38283
38284 - if (!capable(CAP_SYS_TTY_CONFIG))
38285 - perm = 0;
38286 -
38287 switch (cmd) {
38288 case KDGKBENT:
38289 key_map = key_maps[s];
38290 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38291 val = (i ? K_HOLE : K_NOSUCHMAP);
38292 return put_user(val, &user_kbe->kb_value);
38293 case KDSKBENT:
38294 + if (!capable(CAP_SYS_TTY_CONFIG))
38295 + perm = 0;
38296 +
38297 if (!perm)
38298 return -EPERM;
38299 if (!i && v == K_NOSUCHMAP) {
38300 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38301 int i, j, k;
38302 int ret;
38303
38304 - if (!capable(CAP_SYS_TTY_CONFIG))
38305 - perm = 0;
38306 -
38307 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38308 if (!kbs) {
38309 ret = -ENOMEM;
38310 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38311 kfree(kbs);
38312 return ((p && *p) ? -EOVERFLOW : 0);
38313 case KDSKBSENT:
38314 + if (!capable(CAP_SYS_TTY_CONFIG))
38315 + perm = 0;
38316 +
38317 if (!perm) {
38318 ret = -EPERM;
38319 goto reterr;
38320 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38321 index a783d53..cb30d94 100644
38322 --- a/drivers/uio/uio.c
38323 +++ b/drivers/uio/uio.c
38324 @@ -25,6 +25,7 @@
38325 #include <linux/kobject.h>
38326 #include <linux/cdev.h>
38327 #include <linux/uio_driver.h>
38328 +#include <asm/local.h>
38329
38330 #define UIO_MAX_DEVICES (1U << MINORBITS)
38331
38332 @@ -32,10 +33,10 @@ struct uio_device {
38333 struct module *owner;
38334 struct device *dev;
38335 int minor;
38336 - atomic_t event;
38337 + atomic_unchecked_t event;
38338 struct fasync_struct *async_queue;
38339 wait_queue_head_t wait;
38340 - int vma_count;
38341 + local_t vma_count;
38342 struct uio_info *info;
38343 struct kobject *map_dir;
38344 struct kobject *portio_dir;
38345 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38346 struct device_attribute *attr, char *buf)
38347 {
38348 struct uio_device *idev = dev_get_drvdata(dev);
38349 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38350 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38351 }
38352
38353 static struct device_attribute uio_class_attributes[] = {
38354 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38355 {
38356 struct uio_device *idev = info->uio_dev;
38357
38358 - atomic_inc(&idev->event);
38359 + atomic_inc_unchecked(&idev->event);
38360 wake_up_interruptible(&idev->wait);
38361 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38362 }
38363 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38364 }
38365
38366 listener->dev = idev;
38367 - listener->event_count = atomic_read(&idev->event);
38368 + listener->event_count = atomic_read_unchecked(&idev->event);
38369 filep->private_data = listener;
38370
38371 if (idev->info->open) {
38372 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38373 return -EIO;
38374
38375 poll_wait(filep, &idev->wait, wait);
38376 - if (listener->event_count != atomic_read(&idev->event))
38377 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38378 return POLLIN | POLLRDNORM;
38379 return 0;
38380 }
38381 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38382 do {
38383 set_current_state(TASK_INTERRUPTIBLE);
38384
38385 - event_count = atomic_read(&idev->event);
38386 + event_count = atomic_read_unchecked(&idev->event);
38387 if (event_count != listener->event_count) {
38388 if (copy_to_user(buf, &event_count, count))
38389 retval = -EFAULT;
38390 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38391 static void uio_vma_open(struct vm_area_struct *vma)
38392 {
38393 struct uio_device *idev = vma->vm_private_data;
38394 - idev->vma_count++;
38395 + local_inc(&idev->vma_count);
38396 }
38397
38398 static void uio_vma_close(struct vm_area_struct *vma)
38399 {
38400 struct uio_device *idev = vma->vm_private_data;
38401 - idev->vma_count--;
38402 + local_dec(&idev->vma_count);
38403 }
38404
38405 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38406 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
38407 idev->owner = owner;
38408 idev->info = info;
38409 init_waitqueue_head(&idev->wait);
38410 - atomic_set(&idev->event, 0);
38411 + atomic_set_unchecked(&idev->event, 0);
38412
38413 ret = uio_get_minor(idev);
38414 if (ret)
38415 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38416 index 98b89fe..aff824e 100644
38417 --- a/drivers/usb/atm/cxacru.c
38418 +++ b/drivers/usb/atm/cxacru.c
38419 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38420 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38421 if (ret < 2)
38422 return -EINVAL;
38423 - if (index < 0 || index > 0x7f)
38424 + if (index > 0x7f)
38425 return -EINVAL;
38426 pos += tmp;
38427
38428 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38429 index d3448ca..d2864ca 100644
38430 --- a/drivers/usb/atm/usbatm.c
38431 +++ b/drivers/usb/atm/usbatm.c
38432 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38433 if (printk_ratelimit())
38434 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38435 __func__, vpi, vci);
38436 - atomic_inc(&vcc->stats->rx_err);
38437 + atomic_inc_unchecked(&vcc->stats->rx_err);
38438 return;
38439 }
38440
38441 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38442 if (length > ATM_MAX_AAL5_PDU) {
38443 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38444 __func__, length, vcc);
38445 - atomic_inc(&vcc->stats->rx_err);
38446 + atomic_inc_unchecked(&vcc->stats->rx_err);
38447 goto out;
38448 }
38449
38450 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38451 if (sarb->len < pdu_length) {
38452 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38453 __func__, pdu_length, sarb->len, vcc);
38454 - atomic_inc(&vcc->stats->rx_err);
38455 + atomic_inc_unchecked(&vcc->stats->rx_err);
38456 goto out;
38457 }
38458
38459 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38460 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38461 __func__, vcc);
38462 - atomic_inc(&vcc->stats->rx_err);
38463 + atomic_inc_unchecked(&vcc->stats->rx_err);
38464 goto out;
38465 }
38466
38467 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38468 if (printk_ratelimit())
38469 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38470 __func__, length);
38471 - atomic_inc(&vcc->stats->rx_drop);
38472 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38473 goto out;
38474 }
38475
38476 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38477
38478 vcc->push(vcc, skb);
38479
38480 - atomic_inc(&vcc->stats->rx);
38481 + atomic_inc_unchecked(&vcc->stats->rx);
38482 out:
38483 skb_trim(sarb, 0);
38484 }
38485 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38486 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38487
38488 usbatm_pop(vcc, skb);
38489 - atomic_inc(&vcc->stats->tx);
38490 + atomic_inc_unchecked(&vcc->stats->tx);
38491
38492 skb = skb_dequeue(&instance->sndqueue);
38493 }
38494 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38495 if (!left--)
38496 return sprintf(page,
38497 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38498 - atomic_read(&atm_dev->stats.aal5.tx),
38499 - atomic_read(&atm_dev->stats.aal5.tx_err),
38500 - atomic_read(&atm_dev->stats.aal5.rx),
38501 - atomic_read(&atm_dev->stats.aal5.rx_err),
38502 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38503 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38504 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38505 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38506 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38507 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38508
38509 if (!left--) {
38510 if (instance->disconnected)
38511 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38512 index d956965..4179a77 100644
38513 --- a/drivers/usb/core/devices.c
38514 +++ b/drivers/usb/core/devices.c
38515 @@ -126,7 +126,7 @@ static const char format_endpt[] =
38516 * time it gets called.
38517 */
38518 static struct device_connect_event {
38519 - atomic_t count;
38520 + atomic_unchecked_t count;
38521 wait_queue_head_t wait;
38522 } device_event = {
38523 .count = ATOMIC_INIT(1),
38524 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38525
38526 void usbfs_conn_disc_event(void)
38527 {
38528 - atomic_add(2, &device_event.count);
38529 + atomic_add_unchecked(2, &device_event.count);
38530 wake_up(&device_event.wait);
38531 }
38532
38533 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38534
38535 poll_wait(file, &device_event.wait, wait);
38536
38537 - event_count = atomic_read(&device_event.count);
38538 + event_count = atomic_read_unchecked(&device_event.count);
38539 if (file->f_version != event_count) {
38540 file->f_version = event_count;
38541 return POLLIN | POLLRDNORM;
38542 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38543 index 1fc8f12..20647c1 100644
38544 --- a/drivers/usb/early/ehci-dbgp.c
38545 +++ b/drivers/usb/early/ehci-dbgp.c
38546 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38547
38548 #ifdef CONFIG_KGDB
38549 static struct kgdb_io kgdbdbgp_io_ops;
38550 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38551 +static struct kgdb_io kgdbdbgp_io_ops_console;
38552 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38553 #else
38554 #define dbgp_kgdb_mode (0)
38555 #endif
38556 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38557 .write_char = kgdbdbgp_write_char,
38558 };
38559
38560 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38561 + .name = "kgdbdbgp",
38562 + .read_char = kgdbdbgp_read_char,
38563 + .write_char = kgdbdbgp_write_char,
38564 + .is_console = 1
38565 +};
38566 +
38567 static int kgdbdbgp_wait_time;
38568
38569 static int __init kgdbdbgp_parse_config(char *str)
38570 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38571 ptr++;
38572 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38573 }
38574 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38575 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38576 + if (early_dbgp_console.index != -1)
38577 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38578 + else
38579 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38580
38581 return 0;
38582 }
38583 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38584 index d6bea3e..60b250e 100644
38585 --- a/drivers/usb/wusbcore/wa-hc.h
38586 +++ b/drivers/usb/wusbcore/wa-hc.h
38587 @@ -192,7 +192,7 @@ struct wahc {
38588 struct list_head xfer_delayed_list;
38589 spinlock_t xfer_list_lock;
38590 struct work_struct xfer_work;
38591 - atomic_t xfer_id_count;
38592 + atomic_unchecked_t xfer_id_count;
38593 };
38594
38595
38596 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38597 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38598 spin_lock_init(&wa->xfer_list_lock);
38599 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38600 - atomic_set(&wa->xfer_id_count, 1);
38601 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38602 }
38603
38604 /**
38605 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38606 index 57c01ab..8a05959 100644
38607 --- a/drivers/usb/wusbcore/wa-xfer.c
38608 +++ b/drivers/usb/wusbcore/wa-xfer.c
38609 @@ -296,7 +296,7 @@ out:
38610 */
38611 static void wa_xfer_id_init(struct wa_xfer *xfer)
38612 {
38613 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38614 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38615 }
38616
38617 /*
38618 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38619 index c14c42b..f955cc2 100644
38620 --- a/drivers/vhost/vhost.c
38621 +++ b/drivers/vhost/vhost.c
38622 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38623 return 0;
38624 }
38625
38626 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38627 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38628 {
38629 struct file *eventfp, *filep = NULL,
38630 *pollstart = NULL, *pollstop = NULL;
38631 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38632 index b0b2ac3..89a4399 100644
38633 --- a/drivers/video/aty/aty128fb.c
38634 +++ b/drivers/video/aty/aty128fb.c
38635 @@ -148,7 +148,7 @@ enum {
38636 };
38637
38638 /* Must match above enum */
38639 -static const char *r128_family[] __devinitdata = {
38640 +static const char *r128_family[] __devinitconst = {
38641 "AGP",
38642 "PCI",
38643 "PRO AGP",
38644 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38645 index 5c3960d..15cf8fc 100644
38646 --- a/drivers/video/fbcmap.c
38647 +++ b/drivers/video/fbcmap.c
38648 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38649 rc = -ENODEV;
38650 goto out;
38651 }
38652 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38653 - !info->fbops->fb_setcmap)) {
38654 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38655 rc = -EINVAL;
38656 goto out1;
38657 }
38658 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38659 index c6ce416..3b9b642 100644
38660 --- a/drivers/video/fbmem.c
38661 +++ b/drivers/video/fbmem.c
38662 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38663 image->dx += image->width + 8;
38664 }
38665 } else if (rotate == FB_ROTATE_UD) {
38666 - for (x = 0; x < num && image->dx >= 0; x++) {
38667 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38668 info->fbops->fb_imageblit(info, image);
38669 image->dx -= image->width + 8;
38670 }
38671 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38672 image->dy += image->height + 8;
38673 }
38674 } else if (rotate == FB_ROTATE_CCW) {
38675 - for (x = 0; x < num && image->dy >= 0; x++) {
38676 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38677 info->fbops->fb_imageblit(info, image);
38678 image->dy -= image->height + 8;
38679 }
38680 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38681 return -EFAULT;
38682 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38683 return -EINVAL;
38684 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38685 + if (con2fb.framebuffer >= FB_MAX)
38686 return -EINVAL;
38687 if (!registered_fb[con2fb.framebuffer])
38688 request_module("fb%d", con2fb.framebuffer);
38689 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38690 index 5a5d092..265c5ed 100644
38691 --- a/drivers/video/geode/gx1fb_core.c
38692 +++ b/drivers/video/geode/gx1fb_core.c
38693 @@ -29,7 +29,7 @@ static int crt_option = 1;
38694 static char panel_option[32] = "";
38695
38696 /* Modes relevant to the GX1 (taken from modedb.c) */
38697 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38698 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38699 /* 640x480-60 VESA */
38700 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38701 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38702 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38703 index 0fad23f..0e9afa4 100644
38704 --- a/drivers/video/gxt4500.c
38705 +++ b/drivers/video/gxt4500.c
38706 @@ -156,7 +156,7 @@ struct gxt4500_par {
38707 static char *mode_option;
38708
38709 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38710 -static const struct fb_videomode defaultmode __devinitdata = {
38711 +static const struct fb_videomode defaultmode __devinitconst = {
38712 .refresh = 60,
38713 .xres = 1280,
38714 .yres = 1024,
38715 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38716 return 0;
38717 }
38718
38719 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38720 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38721 .id = "IBM GXT4500P",
38722 .type = FB_TYPE_PACKED_PIXELS,
38723 .visual = FB_VISUAL_PSEUDOCOLOR,
38724 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38725 index 7672d2e..b56437f 100644
38726 --- a/drivers/video/i810/i810_accel.c
38727 +++ b/drivers/video/i810/i810_accel.c
38728 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38729 }
38730 }
38731 printk("ringbuffer lockup!!!\n");
38732 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38733 i810_report_error(mmio);
38734 par->dev_flags |= LOCKUP;
38735 info->pixmap.scan_align = 1;
38736 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38737 index b83f361..2b05a91 100644
38738 --- a/drivers/video/i810/i810_main.c
38739 +++ b/drivers/video/i810/i810_main.c
38740 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38741 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38742
38743 /* PCI */
38744 -static const char *i810_pci_list[] __devinitdata = {
38745 +static const char *i810_pci_list[] __devinitconst = {
38746 "Intel(R) 810 Framebuffer Device" ,
38747 "Intel(R) 810-DC100 Framebuffer Device" ,
38748 "Intel(R) 810E Framebuffer Device" ,
38749 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38750 index de36693..3c63fc2 100644
38751 --- a/drivers/video/jz4740_fb.c
38752 +++ b/drivers/video/jz4740_fb.c
38753 @@ -136,7 +136,7 @@ struct jzfb {
38754 uint32_t pseudo_palette[16];
38755 };
38756
38757 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38758 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38759 .id = "JZ4740 FB",
38760 .type = FB_TYPE_PACKED_PIXELS,
38761 .visual = FB_VISUAL_TRUECOLOR,
38762 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38763 index 3c14e43..eafa544 100644
38764 --- a/drivers/video/logo/logo_linux_clut224.ppm
38765 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38766 @@ -1,1604 +1,1123 @@
38767 P3
38768 -# Standard 224-color Linux logo
38769 80 80
38770 255
38771 - 0 0 0 0 0 0 0 0 0 0 0 0
38772 - 0 0 0 0 0 0 0 0 0 0 0 0
38773 - 0 0 0 0 0 0 0 0 0 0 0 0
38774 - 0 0 0 0 0 0 0 0 0 0 0 0
38775 - 0 0 0 0 0 0 0 0 0 0 0 0
38776 - 0 0 0 0 0 0 0 0 0 0 0 0
38777 - 0 0 0 0 0 0 0 0 0 0 0 0
38778 - 0 0 0 0 0 0 0 0 0 0 0 0
38779 - 0 0 0 0 0 0 0 0 0 0 0 0
38780 - 6 6 6 6 6 6 10 10 10 10 10 10
38781 - 10 10 10 6 6 6 6 6 6 6 6 6
38782 - 0 0 0 0 0 0 0 0 0 0 0 0
38783 - 0 0 0 0 0 0 0 0 0 0 0 0
38784 - 0 0 0 0 0 0 0 0 0 0 0 0
38785 - 0 0 0 0 0 0 0 0 0 0 0 0
38786 - 0 0 0 0 0 0 0 0 0 0 0 0
38787 - 0 0 0 0 0 0 0 0 0 0 0 0
38788 - 0 0 0 0 0 0 0 0 0 0 0 0
38789 - 0 0 0 0 0 0 0 0 0 0 0 0
38790 - 0 0 0 0 0 0 0 0 0 0 0 0
38791 - 0 0 0 0 0 0 0 0 0 0 0 0
38792 - 0 0 0 0 0 0 0 0 0 0 0 0
38793 - 0 0 0 0 0 0 0 0 0 0 0 0
38794 - 0 0 0 0 0 0 0 0 0 0 0 0
38795 - 0 0 0 0 0 0 0 0 0 0 0 0
38796 - 0 0 0 0 0 0 0 0 0 0 0 0
38797 - 0 0 0 0 0 0 0 0 0 0 0 0
38798 - 0 0 0 0 0 0 0 0 0 0 0 0
38799 - 0 0 0 6 6 6 10 10 10 14 14 14
38800 - 22 22 22 26 26 26 30 30 30 34 34 34
38801 - 30 30 30 30 30 30 26 26 26 18 18 18
38802 - 14 14 14 10 10 10 6 6 6 0 0 0
38803 - 0 0 0 0 0 0 0 0 0 0 0 0
38804 - 0 0 0 0 0 0 0 0 0 0 0 0
38805 - 0 0 0 0 0 0 0 0 0 0 0 0
38806 - 0 0 0 0 0 0 0 0 0 0 0 0
38807 - 0 0 0 0 0 0 0 0 0 0 0 0
38808 - 0 0 0 0 0 0 0 0 0 0 0 0
38809 - 0 0 0 0 0 0 0 0 0 0 0 0
38810 - 0 0 0 0 0 0 0 0 0 0 0 0
38811 - 0 0 0 0 0 0 0 0 0 0 0 0
38812 - 0 0 0 0 0 1 0 0 1 0 0 0
38813 - 0 0 0 0 0 0 0 0 0 0 0 0
38814 - 0 0 0 0 0 0 0 0 0 0 0 0
38815 - 0 0 0 0 0 0 0 0 0 0 0 0
38816 - 0 0 0 0 0 0 0 0 0 0 0 0
38817 - 0 0 0 0 0 0 0 0 0 0 0 0
38818 - 0 0 0 0 0 0 0 0 0 0 0 0
38819 - 6 6 6 14 14 14 26 26 26 42 42 42
38820 - 54 54 54 66 66 66 78 78 78 78 78 78
38821 - 78 78 78 74 74 74 66 66 66 54 54 54
38822 - 42 42 42 26 26 26 18 18 18 10 10 10
38823 - 6 6 6 0 0 0 0 0 0 0 0 0
38824 - 0 0 0 0 0 0 0 0 0 0 0 0
38825 - 0 0 0 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 0 0 0 0 0 0 0 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 0 0 0
38830 - 0 0 0 0 0 0 0 0 0 0 0 0
38831 - 0 0 0 0 0 0 0 0 0 0 0 0
38832 - 0 0 1 0 0 0 0 0 0 0 0 0
38833 - 0 0 0 0 0 0 0 0 0 0 0 0
38834 - 0 0 0 0 0 0 0 0 0 0 0 0
38835 - 0 0 0 0 0 0 0 0 0 0 0 0
38836 - 0 0 0 0 0 0 0 0 0 0 0 0
38837 - 0 0 0 0 0 0 0 0 0 0 0 0
38838 - 0 0 0 0 0 0 0 0 0 10 10 10
38839 - 22 22 22 42 42 42 66 66 66 86 86 86
38840 - 66 66 66 38 38 38 38 38 38 22 22 22
38841 - 26 26 26 34 34 34 54 54 54 66 66 66
38842 - 86 86 86 70 70 70 46 46 46 26 26 26
38843 - 14 14 14 6 6 6 0 0 0 0 0 0
38844 - 0 0 0 0 0 0 0 0 0 0 0 0
38845 - 0 0 0 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 0 0 0 0 0 0 0 0 0 0
38848 - 0 0 0 0 0 0 0 0 0 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 0 0 0
38850 - 0 0 0 0 0 0 0 0 0 0 0 0
38851 - 0 0 0 0 0 0 0 0 0 0 0 0
38852 - 0 0 1 0 0 1 0 0 1 0 0 0
38853 - 0 0 0 0 0 0 0 0 0 0 0 0
38854 - 0 0 0 0 0 0 0 0 0 0 0 0
38855 - 0 0 0 0 0 0 0 0 0 0 0 0
38856 - 0 0 0 0 0 0 0 0 0 0 0 0
38857 - 0 0 0 0 0 0 0 0 0 0 0 0
38858 - 0 0 0 0 0 0 10 10 10 26 26 26
38859 - 50 50 50 82 82 82 58 58 58 6 6 6
38860 - 2 2 6 2 2 6 2 2 6 2 2 6
38861 - 2 2 6 2 2 6 2 2 6 2 2 6
38862 - 6 6 6 54 54 54 86 86 86 66 66 66
38863 - 38 38 38 18 18 18 6 6 6 0 0 0
38864 - 0 0 0 0 0 0 0 0 0 0 0 0
38865 - 0 0 0 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 0 0 0 0 0 0 0 0 0
38868 - 0 0 0 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 0 0 0 0 0 0
38870 - 0 0 0 0 0 0 0 0 0 0 0 0
38871 - 0 0 0 0 0 0 0 0 0 0 0 0
38872 - 0 0 0 0 0 0 0 0 0 0 0 0
38873 - 0 0 0 0 0 0 0 0 0 0 0 0
38874 - 0 0 0 0 0 0 0 0 0 0 0 0
38875 - 0 0 0 0 0 0 0 0 0 0 0 0
38876 - 0 0 0 0 0 0 0 0 0 0 0 0
38877 - 0 0 0 0 0 0 0 0 0 0 0 0
38878 - 0 0 0 6 6 6 22 22 22 50 50 50
38879 - 78 78 78 34 34 34 2 2 6 2 2 6
38880 - 2 2 6 2 2 6 2 2 6 2 2 6
38881 - 2 2 6 2 2 6 2 2 6 2 2 6
38882 - 2 2 6 2 2 6 6 6 6 70 70 70
38883 - 78 78 78 46 46 46 22 22 22 6 6 6
38884 - 0 0 0 0 0 0 0 0 0 0 0 0
38885 - 0 0 0 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 0 0 0 0 0 0 0 0 0 0 0 0
38888 - 0 0 0 0 0 0 0 0 0 0 0 0
38889 - 0 0 0 0 0 0 0 0 0 0 0 0
38890 - 0 0 0 0 0 0 0 0 0 0 0 0
38891 - 0 0 0 0 0 0 0 0 0 0 0 0
38892 - 0 0 1 0 0 1 0 0 1 0 0 0
38893 - 0 0 0 0 0 0 0 0 0 0 0 0
38894 - 0 0 0 0 0 0 0 0 0 0 0 0
38895 - 0 0 0 0 0 0 0 0 0 0 0 0
38896 - 0 0 0 0 0 0 0 0 0 0 0 0
38897 - 0 0 0 0 0 0 0 0 0 0 0 0
38898 - 6 6 6 18 18 18 42 42 42 82 82 82
38899 - 26 26 26 2 2 6 2 2 6 2 2 6
38900 - 2 2 6 2 2 6 2 2 6 2 2 6
38901 - 2 2 6 2 2 6 2 2 6 14 14 14
38902 - 46 46 46 34 34 34 6 6 6 2 2 6
38903 - 42 42 42 78 78 78 42 42 42 18 18 18
38904 - 6 6 6 0 0 0 0 0 0 0 0 0
38905 - 0 0 0 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 0 0 0
38907 - 0 0 0 0 0 0 0 0 0 0 0 0
38908 - 0 0 0 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 0 0 0 0 0 0
38910 - 0 0 0 0 0 0 0 0 0 0 0 0
38911 - 0 0 0 0 0 0 0 0 0 0 0 0
38912 - 0 0 1 0 0 0 0 0 1 0 0 0
38913 - 0 0 0 0 0 0 0 0 0 0 0 0
38914 - 0 0 0 0 0 0 0 0 0 0 0 0
38915 - 0 0 0 0 0 0 0 0 0 0 0 0
38916 - 0 0 0 0 0 0 0 0 0 0 0 0
38917 - 0 0 0 0 0 0 0 0 0 0 0 0
38918 - 10 10 10 30 30 30 66 66 66 58 58 58
38919 - 2 2 6 2 2 6 2 2 6 2 2 6
38920 - 2 2 6 2 2 6 2 2 6 2 2 6
38921 - 2 2 6 2 2 6 2 2 6 26 26 26
38922 - 86 86 86 101 101 101 46 46 46 10 10 10
38923 - 2 2 6 58 58 58 70 70 70 34 34 34
38924 - 10 10 10 0 0 0 0 0 0 0 0 0
38925 - 0 0 0 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 0 0 0
38927 - 0 0 0 0 0 0 0 0 0 0 0 0
38928 - 0 0 0 0 0 0 0 0 0 0 0 0
38929 - 0 0 0 0 0 0 0 0 0 0 0 0
38930 - 0 0 0 0 0 0 0 0 0 0 0 0
38931 - 0 0 0 0 0 0 0 0 0 0 0 0
38932 - 0 0 1 0 0 1 0 0 1 0 0 0
38933 - 0 0 0 0 0 0 0 0 0 0 0 0
38934 - 0 0 0 0 0 0 0 0 0 0 0 0
38935 - 0 0 0 0 0 0 0 0 0 0 0 0
38936 - 0 0 0 0 0 0 0 0 0 0 0 0
38937 - 0 0 0 0 0 0 0 0 0 0 0 0
38938 - 14 14 14 42 42 42 86 86 86 10 10 10
38939 - 2 2 6 2 2 6 2 2 6 2 2 6
38940 - 2 2 6 2 2 6 2 2 6 2 2 6
38941 - 2 2 6 2 2 6 2 2 6 30 30 30
38942 - 94 94 94 94 94 94 58 58 58 26 26 26
38943 - 2 2 6 6 6 6 78 78 78 54 54 54
38944 - 22 22 22 6 6 6 0 0 0 0 0 0
38945 - 0 0 0 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 0 0 0 0 0 0
38947 - 0 0 0 0 0 0 0 0 0 0 0 0
38948 - 0 0 0 0 0 0 0 0 0 0 0 0
38949 - 0 0 0 0 0 0 0 0 0 0 0 0
38950 - 0 0 0 0 0 0 0 0 0 0 0 0
38951 - 0 0 0 0 0 0 0 0 0 0 0 0
38952 - 0 0 0 0 0 0 0 0 0 0 0 0
38953 - 0 0 0 0 0 0 0 0 0 0 0 0
38954 - 0 0 0 0 0 0 0 0 0 0 0 0
38955 - 0 0 0 0 0 0 0 0 0 0 0 0
38956 - 0 0 0 0 0 0 0 0 0 0 0 0
38957 - 0 0 0 0 0 0 0 0 0 6 6 6
38958 - 22 22 22 62 62 62 62 62 62 2 2 6
38959 - 2 2 6 2 2 6 2 2 6 2 2 6
38960 - 2 2 6 2 2 6 2 2 6 2 2 6
38961 - 2 2 6 2 2 6 2 2 6 26 26 26
38962 - 54 54 54 38 38 38 18 18 18 10 10 10
38963 - 2 2 6 2 2 6 34 34 34 82 82 82
38964 - 38 38 38 14 14 14 0 0 0 0 0 0
38965 - 0 0 0 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 0 0 0 0 0 0 0 0 0
38967 - 0 0 0 0 0 0 0 0 0 0 0 0
38968 - 0 0 0 0 0 0 0 0 0 0 0 0
38969 - 0 0 0 0 0 0 0 0 0 0 0 0
38970 - 0 0 0 0 0 0 0 0 0 0 0 0
38971 - 0 0 0 0 0 0 0 0 0 0 0 0
38972 - 0 0 0 0 0 1 0 0 1 0 0 0
38973 - 0 0 0 0 0 0 0 0 0 0 0 0
38974 - 0 0 0 0 0 0 0 0 0 0 0 0
38975 - 0 0 0 0 0 0 0 0 0 0 0 0
38976 - 0 0 0 0 0 0 0 0 0 0 0 0
38977 - 0 0 0 0 0 0 0 0 0 6 6 6
38978 - 30 30 30 78 78 78 30 30 30 2 2 6
38979 - 2 2 6 2 2 6 2 2 6 2 2 6
38980 - 2 2 6 2 2 6 2 2 6 2 2 6
38981 - 2 2 6 2 2 6 2 2 6 10 10 10
38982 - 10 10 10 2 2 6 2 2 6 2 2 6
38983 - 2 2 6 2 2 6 2 2 6 78 78 78
38984 - 50 50 50 18 18 18 6 6 6 0 0 0
38985 - 0 0 0 0 0 0 0 0 0 0 0 0
38986 - 0 0 0 0 0 0 0 0 0 0 0 0
38987 - 0 0 0 0 0 0 0 0 0 0 0 0
38988 - 0 0 0 0 0 0 0 0 0 0 0 0
38989 - 0 0 0 0 0 0 0 0 0 0 0 0
38990 - 0 0 0 0 0 0 0 0 0 0 0 0
38991 - 0 0 0 0 0 0 0 0 0 0 0 0
38992 - 0 0 1 0 0 0 0 0 0 0 0 0
38993 - 0 0 0 0 0 0 0 0 0 0 0 0
38994 - 0 0 0 0 0 0 0 0 0 0 0 0
38995 - 0 0 0 0 0 0 0 0 0 0 0 0
38996 - 0 0 0 0 0 0 0 0 0 0 0 0
38997 - 0 0 0 0 0 0 0 0 0 10 10 10
38998 - 38 38 38 86 86 86 14 14 14 2 2 6
38999 - 2 2 6 2 2 6 2 2 6 2 2 6
39000 - 2 2 6 2 2 6 2 2 6 2 2 6
39001 - 2 2 6 2 2 6 2 2 6 2 2 6
39002 - 2 2 6 2 2 6 2 2 6 2 2 6
39003 - 2 2 6 2 2 6 2 2 6 54 54 54
39004 - 66 66 66 26 26 26 6 6 6 0 0 0
39005 - 0 0 0 0 0 0 0 0 0 0 0 0
39006 - 0 0 0 0 0 0 0 0 0 0 0 0
39007 - 0 0 0 0 0 0 0 0 0 0 0 0
39008 - 0 0 0 0 0 0 0 0 0 0 0 0
39009 - 0 0 0 0 0 0 0 0 0 0 0 0
39010 - 0 0 0 0 0 0 0 0 0 0 0 0
39011 - 0 0 0 0 0 0 0 0 0 0 0 0
39012 - 0 0 0 0 0 1 0 0 1 0 0 0
39013 - 0 0 0 0 0 0 0 0 0 0 0 0
39014 - 0 0 0 0 0 0 0 0 0 0 0 0
39015 - 0 0 0 0 0 0 0 0 0 0 0 0
39016 - 0 0 0 0 0 0 0 0 0 0 0 0
39017 - 0 0 0 0 0 0 0 0 0 14 14 14
39018 - 42 42 42 82 82 82 2 2 6 2 2 6
39019 - 2 2 6 6 6 6 10 10 10 2 2 6
39020 - 2 2 6 2 2 6 2 2 6 2 2 6
39021 - 2 2 6 2 2 6 2 2 6 6 6 6
39022 - 14 14 14 10 10 10 2 2 6 2 2 6
39023 - 2 2 6 2 2 6 2 2 6 18 18 18
39024 - 82 82 82 34 34 34 10 10 10 0 0 0
39025 - 0 0 0 0 0 0 0 0 0 0 0 0
39026 - 0 0 0 0 0 0 0 0 0 0 0 0
39027 - 0 0 0 0 0 0 0 0 0 0 0 0
39028 - 0 0 0 0 0 0 0 0 0 0 0 0
39029 - 0 0 0 0 0 0 0 0 0 0 0 0
39030 - 0 0 0 0 0 0 0 0 0 0 0 0
39031 - 0 0 0 0 0 0 0 0 0 0 0 0
39032 - 0 0 1 0 0 0 0 0 0 0 0 0
39033 - 0 0 0 0 0 0 0 0 0 0 0 0
39034 - 0 0 0 0 0 0 0 0 0 0 0 0
39035 - 0 0 0 0 0 0 0 0 0 0 0 0
39036 - 0 0 0 0 0 0 0 0 0 0 0 0
39037 - 0 0 0 0 0 0 0 0 0 14 14 14
39038 - 46 46 46 86 86 86 2 2 6 2 2 6
39039 - 6 6 6 6 6 6 22 22 22 34 34 34
39040 - 6 6 6 2 2 6 2 2 6 2 2 6
39041 - 2 2 6 2 2 6 18 18 18 34 34 34
39042 - 10 10 10 50 50 50 22 22 22 2 2 6
39043 - 2 2 6 2 2 6 2 2 6 10 10 10
39044 - 86 86 86 42 42 42 14 14 14 0 0 0
39045 - 0 0 0 0 0 0 0 0 0 0 0 0
39046 - 0 0 0 0 0 0 0 0 0 0 0 0
39047 - 0 0 0 0 0 0 0 0 0 0 0 0
39048 - 0 0 0 0 0 0 0 0 0 0 0 0
39049 - 0 0 0 0 0 0 0 0 0 0 0 0
39050 - 0 0 0 0 0 0 0 0 0 0 0 0
39051 - 0 0 0 0 0 0 0 0 0 0 0 0
39052 - 0 0 1 0 0 1 0 0 1 0 0 0
39053 - 0 0 0 0 0 0 0 0 0 0 0 0
39054 - 0 0 0 0 0 0 0 0 0 0 0 0
39055 - 0 0 0 0 0 0 0 0 0 0 0 0
39056 - 0 0 0 0 0 0 0 0 0 0 0 0
39057 - 0 0 0 0 0 0 0 0 0 14 14 14
39058 - 46 46 46 86 86 86 2 2 6 2 2 6
39059 - 38 38 38 116 116 116 94 94 94 22 22 22
39060 - 22 22 22 2 2 6 2 2 6 2 2 6
39061 - 14 14 14 86 86 86 138 138 138 162 162 162
39062 -154 154 154 38 38 38 26 26 26 6 6 6
39063 - 2 2 6 2 2 6 2 2 6 2 2 6
39064 - 86 86 86 46 46 46 14 14 14 0 0 0
39065 - 0 0 0 0 0 0 0 0 0 0 0 0
39066 - 0 0 0 0 0 0 0 0 0 0 0 0
39067 - 0 0 0 0 0 0 0 0 0 0 0 0
39068 - 0 0 0 0 0 0 0 0 0 0 0 0
39069 - 0 0 0 0 0 0 0 0 0 0 0 0
39070 - 0 0 0 0 0 0 0 0 0 0 0 0
39071 - 0 0 0 0 0 0 0 0 0 0 0 0
39072 - 0 0 0 0 0 0 0 0 0 0 0 0
39073 - 0 0 0 0 0 0 0 0 0 0 0 0
39074 - 0 0 0 0 0 0 0 0 0 0 0 0
39075 - 0 0 0 0 0 0 0 0 0 0 0 0
39076 - 0 0 0 0 0 0 0 0 0 0 0 0
39077 - 0 0 0 0 0 0 0 0 0 14 14 14
39078 - 46 46 46 86 86 86 2 2 6 14 14 14
39079 -134 134 134 198 198 198 195 195 195 116 116 116
39080 - 10 10 10 2 2 6 2 2 6 6 6 6
39081 -101 98 89 187 187 187 210 210 210 218 218 218
39082 -214 214 214 134 134 134 14 14 14 6 6 6
39083 - 2 2 6 2 2 6 2 2 6 2 2 6
39084 - 86 86 86 50 50 50 18 18 18 6 6 6
39085 - 0 0 0 0 0 0 0 0 0 0 0 0
39086 - 0 0 0 0 0 0 0 0 0 0 0 0
39087 - 0 0 0 0 0 0 0 0 0 0 0 0
39088 - 0 0 0 0 0 0 0 0 0 0 0 0
39089 - 0 0 0 0 0 0 0 0 0 0 0 0
39090 - 0 0 0 0 0 0 0 0 0 0 0 0
39091 - 0 0 0 0 0 0 0 0 1 0 0 0
39092 - 0 0 1 0 0 1 0 0 1 0 0 0
39093 - 0 0 0 0 0 0 0 0 0 0 0 0
39094 - 0 0 0 0 0 0 0 0 0 0 0 0
39095 - 0 0 0 0 0 0 0 0 0 0 0 0
39096 - 0 0 0 0 0 0 0 0 0 0 0 0
39097 - 0 0 0 0 0 0 0 0 0 14 14 14
39098 - 46 46 46 86 86 86 2 2 6 54 54 54
39099 -218 218 218 195 195 195 226 226 226 246 246 246
39100 - 58 58 58 2 2 6 2 2 6 30 30 30
39101 -210 210 210 253 253 253 174 174 174 123 123 123
39102 -221 221 221 234 234 234 74 74 74 2 2 6
39103 - 2 2 6 2 2 6 2 2 6 2 2 6
39104 - 70 70 70 58 58 58 22 22 22 6 6 6
39105 - 0 0 0 0 0 0 0 0 0 0 0 0
39106 - 0 0 0 0 0 0 0 0 0 0 0 0
39107 - 0 0 0 0 0 0 0 0 0 0 0 0
39108 - 0 0 0 0 0 0 0 0 0 0 0 0
39109 - 0 0 0 0 0 0 0 0 0 0 0 0
39110 - 0 0 0 0 0 0 0 0 0 0 0 0
39111 - 0 0 0 0 0 0 0 0 0 0 0 0
39112 - 0 0 0 0 0 0 0 0 0 0 0 0
39113 - 0 0 0 0 0 0 0 0 0 0 0 0
39114 - 0 0 0 0 0 0 0 0 0 0 0 0
39115 - 0 0 0 0 0 0 0 0 0 0 0 0
39116 - 0 0 0 0 0 0 0 0 0 0 0 0
39117 - 0 0 0 0 0 0 0 0 0 14 14 14
39118 - 46 46 46 82 82 82 2 2 6 106 106 106
39119 -170 170 170 26 26 26 86 86 86 226 226 226
39120 -123 123 123 10 10 10 14 14 14 46 46 46
39121 -231 231 231 190 190 190 6 6 6 70 70 70
39122 - 90 90 90 238 238 238 158 158 158 2 2 6
39123 - 2 2 6 2 2 6 2 2 6 2 2 6
39124 - 70 70 70 58 58 58 22 22 22 6 6 6
39125 - 0 0 0 0 0 0 0 0 0 0 0 0
39126 - 0 0 0 0 0 0 0 0 0 0 0 0
39127 - 0 0 0 0 0 0 0 0 0 0 0 0
39128 - 0 0 0 0 0 0 0 0 0 0 0 0
39129 - 0 0 0 0 0 0 0 0 0 0 0 0
39130 - 0 0 0 0 0 0 0 0 0 0 0 0
39131 - 0 0 0 0 0 0 0 0 1 0 0 0
39132 - 0 0 1 0 0 1 0 0 1 0 0 0
39133 - 0 0 0 0 0 0 0 0 0 0 0 0
39134 - 0 0 0 0 0 0 0 0 0 0 0 0
39135 - 0 0 0 0 0 0 0 0 0 0 0 0
39136 - 0 0 0 0 0 0 0 0 0 0 0 0
39137 - 0 0 0 0 0 0 0 0 0 14 14 14
39138 - 42 42 42 86 86 86 6 6 6 116 116 116
39139 -106 106 106 6 6 6 70 70 70 149 149 149
39140 -128 128 128 18 18 18 38 38 38 54 54 54
39141 -221 221 221 106 106 106 2 2 6 14 14 14
39142 - 46 46 46 190 190 190 198 198 198 2 2 6
39143 - 2 2 6 2 2 6 2 2 6 2 2 6
39144 - 74 74 74 62 62 62 22 22 22 6 6 6
39145 - 0 0 0 0 0 0 0 0 0 0 0 0
39146 - 0 0 0 0 0 0 0 0 0 0 0 0
39147 - 0 0 0 0 0 0 0 0 0 0 0 0
39148 - 0 0 0 0 0 0 0 0 0 0 0 0
39149 - 0 0 0 0 0 0 0 0 0 0 0 0
39150 - 0 0 0 0 0 0 0 0 0 0 0 0
39151 - 0 0 0 0 0 0 0 0 1 0 0 0
39152 - 0 0 1 0 0 0 0 0 1 0 0 0
39153 - 0 0 0 0 0 0 0 0 0 0 0 0
39154 - 0 0 0 0 0 0 0 0 0 0 0 0
39155 - 0 0 0 0 0 0 0 0 0 0 0 0
39156 - 0 0 0 0 0 0 0 0 0 0 0 0
39157 - 0 0 0 0 0 0 0 0 0 14 14 14
39158 - 42 42 42 94 94 94 14 14 14 101 101 101
39159 -128 128 128 2 2 6 18 18 18 116 116 116
39160 -118 98 46 121 92 8 121 92 8 98 78 10
39161 -162 162 162 106 106 106 2 2 6 2 2 6
39162 - 2 2 6 195 195 195 195 195 195 6 6 6
39163 - 2 2 6 2 2 6 2 2 6 2 2 6
39164 - 74 74 74 62 62 62 22 22 22 6 6 6
39165 - 0 0 0 0 0 0 0 0 0 0 0 0
39166 - 0 0 0 0 0 0 0 0 0 0 0 0
39167 - 0 0 0 0 0 0 0 0 0 0 0 0
39168 - 0 0 0 0 0 0 0 0 0 0 0 0
39169 - 0 0 0 0 0 0 0 0 0 0 0 0
39170 - 0 0 0 0 0 0 0 0 0 0 0 0
39171 - 0 0 0 0 0 0 0 0 1 0 0 1
39172 - 0 0 1 0 0 0 0 0 1 0 0 0
39173 - 0 0 0 0 0 0 0 0 0 0 0 0
39174 - 0 0 0 0 0 0 0 0 0 0 0 0
39175 - 0 0 0 0 0 0 0 0 0 0 0 0
39176 - 0 0 0 0 0 0 0 0 0 0 0 0
39177 - 0 0 0 0 0 0 0 0 0 10 10 10
39178 - 38 38 38 90 90 90 14 14 14 58 58 58
39179 -210 210 210 26 26 26 54 38 6 154 114 10
39180 -226 170 11 236 186 11 225 175 15 184 144 12
39181 -215 174 15 175 146 61 37 26 9 2 2 6
39182 - 70 70 70 246 246 246 138 138 138 2 2 6
39183 - 2 2 6 2 2 6 2 2 6 2 2 6
39184 - 70 70 70 66 66 66 26 26 26 6 6 6
39185 - 0 0 0 0 0 0 0 0 0 0 0 0
39186 - 0 0 0 0 0 0 0 0 0 0 0 0
39187 - 0 0 0 0 0 0 0 0 0 0 0 0
39188 - 0 0 0 0 0 0 0 0 0 0 0 0
39189 - 0 0 0 0 0 0 0 0 0 0 0 0
39190 - 0 0 0 0 0 0 0 0 0 0 0 0
39191 - 0 0 0 0 0 0 0 0 0 0 0 0
39192 - 0 0 0 0 0 0 0 0 0 0 0 0
39193 - 0 0 0 0 0 0 0 0 0 0 0 0
39194 - 0 0 0 0 0 0 0 0 0 0 0 0
39195 - 0 0 0 0 0 0 0 0 0 0 0 0
39196 - 0 0 0 0 0 0 0 0 0 0 0 0
39197 - 0 0 0 0 0 0 0 0 0 10 10 10
39198 - 38 38 38 86 86 86 14 14 14 10 10 10
39199 -195 195 195 188 164 115 192 133 9 225 175 15
39200 -239 182 13 234 190 10 232 195 16 232 200 30
39201 -245 207 45 241 208 19 232 195 16 184 144 12
39202 -218 194 134 211 206 186 42 42 42 2 2 6
39203 - 2 2 6 2 2 6 2 2 6 2 2 6
39204 - 50 50 50 74 74 74 30 30 30 6 6 6
39205 - 0 0 0 0 0 0 0 0 0 0 0 0
39206 - 0 0 0 0 0 0 0 0 0 0 0 0
39207 - 0 0 0 0 0 0 0 0 0 0 0 0
39208 - 0 0 0 0 0 0 0 0 0 0 0 0
39209 - 0 0 0 0 0 0 0 0 0 0 0 0
39210 - 0 0 0 0 0 0 0 0 0 0 0 0
39211 - 0 0 0 0 0 0 0 0 0 0 0 0
39212 - 0 0 0 0 0 0 0 0 0 0 0 0
39213 - 0 0 0 0 0 0 0 0 0 0 0 0
39214 - 0 0 0 0 0 0 0 0 0 0 0 0
39215 - 0 0 0 0 0 0 0 0 0 0 0 0
39216 - 0 0 0 0 0 0 0 0 0 0 0 0
39217 - 0 0 0 0 0 0 0 0 0 10 10 10
39218 - 34 34 34 86 86 86 14 14 14 2 2 6
39219 -121 87 25 192 133 9 219 162 10 239 182 13
39220 -236 186 11 232 195 16 241 208 19 244 214 54
39221 -246 218 60 246 218 38 246 215 20 241 208 19
39222 -241 208 19 226 184 13 121 87 25 2 2 6
39223 - 2 2 6 2 2 6 2 2 6 2 2 6
39224 - 50 50 50 82 82 82 34 34 34 10 10 10
39225 - 0 0 0 0 0 0 0 0 0 0 0 0
39226 - 0 0 0 0 0 0 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 0 0 0 0 0 0
39228 - 0 0 0 0 0 0 0 0 0 0 0 0
39229 - 0 0 0 0 0 0 0 0 0 0 0 0
39230 - 0 0 0 0 0 0 0 0 0 0 0 0
39231 - 0 0 0 0 0 0 0 0 0 0 0 0
39232 - 0 0 0 0 0 0 0 0 0 0 0 0
39233 - 0 0 0 0 0 0 0 0 0 0 0 0
39234 - 0 0 0 0 0 0 0 0 0 0 0 0
39235 - 0 0 0 0 0 0 0 0 0 0 0 0
39236 - 0 0 0 0 0 0 0 0 0 0 0 0
39237 - 0 0 0 0 0 0 0 0 0 10 10 10
39238 - 34 34 34 82 82 82 30 30 30 61 42 6
39239 -180 123 7 206 145 10 230 174 11 239 182 13
39240 -234 190 10 238 202 15 241 208 19 246 218 74
39241 -246 218 38 246 215 20 246 215 20 246 215 20
39242 -226 184 13 215 174 15 184 144 12 6 6 6
39243 - 2 2 6 2 2 6 2 2 6 2 2 6
39244 - 26 26 26 94 94 94 42 42 42 14 14 14
39245 - 0 0 0 0 0 0 0 0 0 0 0 0
39246 - 0 0 0 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 0 0 0 0 0 0
39248 - 0 0 0 0 0 0 0 0 0 0 0 0
39249 - 0 0 0 0 0 0 0 0 0 0 0 0
39250 - 0 0 0 0 0 0 0 0 0 0 0 0
39251 - 0 0 0 0 0 0 0 0 0 0 0 0
39252 - 0 0 0 0 0 0 0 0 0 0 0 0
39253 - 0 0 0 0 0 0 0 0 0 0 0 0
39254 - 0 0 0 0 0 0 0 0 0 0 0 0
39255 - 0 0 0 0 0 0 0 0 0 0 0 0
39256 - 0 0 0 0 0 0 0 0 0 0 0 0
39257 - 0 0 0 0 0 0 0 0 0 10 10 10
39258 - 30 30 30 78 78 78 50 50 50 104 69 6
39259 -192 133 9 216 158 10 236 178 12 236 186 11
39260 -232 195 16 241 208 19 244 214 54 245 215 43
39261 -246 215 20 246 215 20 241 208 19 198 155 10
39262 -200 144 11 216 158 10 156 118 10 2 2 6
39263 - 2 2 6 2 2 6 2 2 6 2 2 6
39264 - 6 6 6 90 90 90 54 54 54 18 18 18
39265 - 6 6 6 0 0 0 0 0 0 0 0 0
39266 - 0 0 0 0 0 0 0 0 0 0 0 0
39267 - 0 0 0 0 0 0 0 0 0 0 0 0
39268 - 0 0 0 0 0 0 0 0 0 0 0 0
39269 - 0 0 0 0 0 0 0 0 0 0 0 0
39270 - 0 0 0 0 0 0 0 0 0 0 0 0
39271 - 0 0 0 0 0 0 0 0 0 0 0 0
39272 - 0 0 0 0 0 0 0 0 0 0 0 0
39273 - 0 0 0 0 0 0 0 0 0 0 0 0
39274 - 0 0 0 0 0 0 0 0 0 0 0 0
39275 - 0 0 0 0 0 0 0 0 0 0 0 0
39276 - 0 0 0 0 0 0 0 0 0 0 0 0
39277 - 0 0 0 0 0 0 0 0 0 10 10 10
39278 - 30 30 30 78 78 78 46 46 46 22 22 22
39279 -137 92 6 210 162 10 239 182 13 238 190 10
39280 -238 202 15 241 208 19 246 215 20 246 215 20
39281 -241 208 19 203 166 17 185 133 11 210 150 10
39282 -216 158 10 210 150 10 102 78 10 2 2 6
39283 - 6 6 6 54 54 54 14 14 14 2 2 6
39284 - 2 2 6 62 62 62 74 74 74 30 30 30
39285 - 10 10 10 0 0 0 0 0 0 0 0 0
39286 - 0 0 0 0 0 0 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 0 0 0
39288 - 0 0 0 0 0 0 0 0 0 0 0 0
39289 - 0 0 0 0 0 0 0 0 0 0 0 0
39290 - 0 0 0 0 0 0 0 0 0 0 0 0
39291 - 0 0 0 0 0 0 0 0 0 0 0 0
39292 - 0 0 0 0 0 0 0 0 0 0 0 0
39293 - 0 0 0 0 0 0 0 0 0 0 0 0
39294 - 0 0 0 0 0 0 0 0 0 0 0 0
39295 - 0 0 0 0 0 0 0 0 0 0 0 0
39296 - 0 0 0 0 0 0 0 0 0 0 0 0
39297 - 0 0 0 0 0 0 0 0 0 10 10 10
39298 - 34 34 34 78 78 78 50 50 50 6 6 6
39299 - 94 70 30 139 102 15 190 146 13 226 184 13
39300 -232 200 30 232 195 16 215 174 15 190 146 13
39301 -168 122 10 192 133 9 210 150 10 213 154 11
39302 -202 150 34 182 157 106 101 98 89 2 2 6
39303 - 2 2 6 78 78 78 116 116 116 58 58 58
39304 - 2 2 6 22 22 22 90 90 90 46 46 46
39305 - 18 18 18 6 6 6 0 0 0 0 0 0
39306 - 0 0 0 0 0 0 0 0 0 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 0 0 0
39308 - 0 0 0 0 0 0 0 0 0 0 0 0
39309 - 0 0 0 0 0 0 0 0 0 0 0 0
39310 - 0 0 0 0 0 0 0 0 0 0 0 0
39311 - 0 0 0 0 0 0 0 0 0 0 0 0
39312 - 0 0 0 0 0 0 0 0 0 0 0 0
39313 - 0 0 0 0 0 0 0 0 0 0 0 0
39314 - 0 0 0 0 0 0 0 0 0 0 0 0
39315 - 0 0 0 0 0 0 0 0 0 0 0 0
39316 - 0 0 0 0 0 0 0 0 0 0 0 0
39317 - 0 0 0 0 0 0 0 0 0 10 10 10
39318 - 38 38 38 86 86 86 50 50 50 6 6 6
39319 -128 128 128 174 154 114 156 107 11 168 122 10
39320 -198 155 10 184 144 12 197 138 11 200 144 11
39321 -206 145 10 206 145 10 197 138 11 188 164 115
39322 -195 195 195 198 198 198 174 174 174 14 14 14
39323 - 2 2 6 22 22 22 116 116 116 116 116 116
39324 - 22 22 22 2 2 6 74 74 74 70 70 70
39325 - 30 30 30 10 10 10 0 0 0 0 0 0
39326 - 0 0 0 0 0 0 0 0 0 0 0 0
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 0 0 0 0 0 0 0 0 0 0 0 0
39329 - 0 0 0 0 0 0 0 0 0 0 0 0
39330 - 0 0 0 0 0 0 0 0 0 0 0 0
39331 - 0 0 0 0 0 0 0 0 0 0 0 0
39332 - 0 0 0 0 0 0 0 0 0 0 0 0
39333 - 0 0 0 0 0 0 0 0 0 0 0 0
39334 - 0 0 0 0 0 0 0 0 0 0 0 0
39335 - 0 0 0 0 0 0 0 0 0 0 0 0
39336 - 0 0 0 0 0 0 0 0 0 0 0 0
39337 - 0 0 0 0 0 0 6 6 6 18 18 18
39338 - 50 50 50 101 101 101 26 26 26 10 10 10
39339 -138 138 138 190 190 190 174 154 114 156 107 11
39340 -197 138 11 200 144 11 197 138 11 192 133 9
39341 -180 123 7 190 142 34 190 178 144 187 187 187
39342 -202 202 202 221 221 221 214 214 214 66 66 66
39343 - 2 2 6 2 2 6 50 50 50 62 62 62
39344 - 6 6 6 2 2 6 10 10 10 90 90 90
39345 - 50 50 50 18 18 18 6 6 6 0 0 0
39346 - 0 0 0 0 0 0 0 0 0 0 0 0
39347 - 0 0 0 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 0 0 0 0 0 0
39349 - 0 0 0 0 0 0 0 0 0 0 0 0
39350 - 0 0 0 0 0 0 0 0 0 0 0 0
39351 - 0 0 0 0 0 0 0 0 0 0 0 0
39352 - 0 0 0 0 0 0 0 0 0 0 0 0
39353 - 0 0 0 0 0 0 0 0 0 0 0 0
39354 - 0 0 0 0 0 0 0 0 0 0 0 0
39355 - 0 0 0 0 0 0 0 0 0 0 0 0
39356 - 0 0 0 0 0 0 0 0 0 0 0 0
39357 - 0 0 0 0 0 0 10 10 10 34 34 34
39358 - 74 74 74 74 74 74 2 2 6 6 6 6
39359 -144 144 144 198 198 198 190 190 190 178 166 146
39360 -154 121 60 156 107 11 156 107 11 168 124 44
39361 -174 154 114 187 187 187 190 190 190 210 210 210
39362 -246 246 246 253 253 253 253 253 253 182 182 182
39363 - 6 6 6 2 2 6 2 2 6 2 2 6
39364 - 2 2 6 2 2 6 2 2 6 62 62 62
39365 - 74 74 74 34 34 34 14 14 14 0 0 0
39366 - 0 0 0 0 0 0 0 0 0 0 0 0
39367 - 0 0 0 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 0 0 0 0 0 0 0 0 0 0 0 0
39370 - 0 0 0 0 0 0 0 0 0 0 0 0
39371 - 0 0 0 0 0 0 0 0 0 0 0 0
39372 - 0 0 0 0 0 0 0 0 0 0 0 0
39373 - 0 0 0 0 0 0 0 0 0 0 0 0
39374 - 0 0 0 0 0 0 0 0 0 0 0 0
39375 - 0 0 0 0 0 0 0 0 0 0 0 0
39376 - 0 0 0 0 0 0 0 0 0 0 0 0
39377 - 0 0 0 10 10 10 22 22 22 54 54 54
39378 - 94 94 94 18 18 18 2 2 6 46 46 46
39379 -234 234 234 221 221 221 190 190 190 190 190 190
39380 -190 190 190 187 187 187 187 187 187 190 190 190
39381 -190 190 190 195 195 195 214 214 214 242 242 242
39382 -253 253 253 253 253 253 253 253 253 253 253 253
39383 - 82 82 82 2 2 6 2 2 6 2 2 6
39384 - 2 2 6 2 2 6 2 2 6 14 14 14
39385 - 86 86 86 54 54 54 22 22 22 6 6 6
39386 - 0 0 0 0 0 0 0 0 0 0 0 0
39387 - 0 0 0 0 0 0 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 0 0 0 0 0 0 0 0 0
39391 - 0 0 0 0 0 0 0 0 0 0 0 0
39392 - 0 0 0 0 0 0 0 0 0 0 0 0
39393 - 0 0 0 0 0 0 0 0 0 0 0 0
39394 - 0 0 0 0 0 0 0 0 0 0 0 0
39395 - 0 0 0 0 0 0 0 0 0 0 0 0
39396 - 0 0 0 0 0 0 0 0 0 0 0 0
39397 - 6 6 6 18 18 18 46 46 46 90 90 90
39398 - 46 46 46 18 18 18 6 6 6 182 182 182
39399 -253 253 253 246 246 246 206 206 206 190 190 190
39400 -190 190 190 190 190 190 190 190 190 190 190 190
39401 -206 206 206 231 231 231 250 250 250 253 253 253
39402 -253 253 253 253 253 253 253 253 253 253 253 253
39403 -202 202 202 14 14 14 2 2 6 2 2 6
39404 - 2 2 6 2 2 6 2 2 6 2 2 6
39405 - 42 42 42 86 86 86 42 42 42 18 18 18
39406 - 6 6 6 0 0 0 0 0 0 0 0 0
39407 - 0 0 0 0 0 0 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 0 0 0 0 0 0 0 0 0 0 0 0
39412 - 0 0 0 0 0 0 0 0 0 0 0 0
39413 - 0 0 0 0 0 0 0 0 0 0 0 0
39414 - 0 0 0 0 0 0 0 0 0 0 0 0
39415 - 0 0 0 0 0 0 0 0 0 0 0 0
39416 - 0 0 0 0 0 0 0 0 0 6 6 6
39417 - 14 14 14 38 38 38 74 74 74 66 66 66
39418 - 2 2 6 6 6 6 90 90 90 250 250 250
39419 -253 253 253 253 253 253 238 238 238 198 198 198
39420 -190 190 190 190 190 190 195 195 195 221 221 221
39421 -246 246 246 253 253 253 253 253 253 253 253 253
39422 -253 253 253 253 253 253 253 253 253 253 253 253
39423 -253 253 253 82 82 82 2 2 6 2 2 6
39424 - 2 2 6 2 2 6 2 2 6 2 2 6
39425 - 2 2 6 78 78 78 70 70 70 34 34 34
39426 - 14 14 14 6 6 6 0 0 0 0 0 0
39427 - 0 0 0 0 0 0 0 0 0 0 0 0
39428 - 0 0 0 0 0 0 0 0 0 0 0 0
39429 - 0 0 0 0 0 0 0 0 0 0 0 0
39430 - 0 0 0 0 0 0 0 0 0 0 0 0
39431 - 0 0 0 0 0 0 0 0 0 0 0 0
39432 - 0 0 0 0 0 0 0 0 0 0 0 0
39433 - 0 0 0 0 0 0 0 0 0 0 0 0
39434 - 0 0 0 0 0 0 0 0 0 0 0 0
39435 - 0 0 0 0 0 0 0 0 0 0 0 0
39436 - 0 0 0 0 0 0 0 0 0 14 14 14
39437 - 34 34 34 66 66 66 78 78 78 6 6 6
39438 - 2 2 6 18 18 18 218 218 218 253 253 253
39439 -253 253 253 253 253 253 253 253 253 246 246 246
39440 -226 226 226 231 231 231 246 246 246 253 253 253
39441 -253 253 253 253 253 253 253 253 253 253 253 253
39442 -253 253 253 253 253 253 253 253 253 253 253 253
39443 -253 253 253 178 178 178 2 2 6 2 2 6
39444 - 2 2 6 2 2 6 2 2 6 2 2 6
39445 - 2 2 6 18 18 18 90 90 90 62 62 62
39446 - 30 30 30 10 10 10 0 0 0 0 0 0
39447 - 0 0 0 0 0 0 0 0 0 0 0 0
39448 - 0 0 0 0 0 0 0 0 0 0 0 0
39449 - 0 0 0 0 0 0 0 0 0 0 0 0
39450 - 0 0 0 0 0 0 0 0 0 0 0 0
39451 - 0 0 0 0 0 0 0 0 0 0 0 0
39452 - 0 0 0 0 0 0 0 0 0 0 0 0
39453 - 0 0 0 0 0 0 0 0 0 0 0 0
39454 - 0 0 0 0 0 0 0 0 0 0 0 0
39455 - 0 0 0 0 0 0 0 0 0 0 0 0
39456 - 0 0 0 0 0 0 10 10 10 26 26 26
39457 - 58 58 58 90 90 90 18 18 18 2 2 6
39458 - 2 2 6 110 110 110 253 253 253 253 253 253
39459 -253 253 253 253 253 253 253 253 253 253 253 253
39460 -250 250 250 253 253 253 253 253 253 253 253 253
39461 -253 253 253 253 253 253 253 253 253 253 253 253
39462 -253 253 253 253 253 253 253 253 253 253 253 253
39463 -253 253 253 231 231 231 18 18 18 2 2 6
39464 - 2 2 6 2 2 6 2 2 6 2 2 6
39465 - 2 2 6 2 2 6 18 18 18 94 94 94
39466 - 54 54 54 26 26 26 10 10 10 0 0 0
39467 - 0 0 0 0 0 0 0 0 0 0 0 0
39468 - 0 0 0 0 0 0 0 0 0 0 0 0
39469 - 0 0 0 0 0 0 0 0 0 0 0 0
39470 - 0 0 0 0 0 0 0 0 0 0 0 0
39471 - 0 0 0 0 0 0 0 0 0 0 0 0
39472 - 0 0 0 0 0 0 0 0 0 0 0 0
39473 - 0 0 0 0 0 0 0 0 0 0 0 0
39474 - 0 0 0 0 0 0 0 0 0 0 0 0
39475 - 0 0 0 0 0 0 0 0 0 0 0 0
39476 - 0 0 0 6 6 6 22 22 22 50 50 50
39477 - 90 90 90 26 26 26 2 2 6 2 2 6
39478 - 14 14 14 195 195 195 250 250 250 253 253 253
39479 -253 253 253 253 253 253 253 253 253 253 253 253
39480 -253 253 253 253 253 253 253 253 253 253 253 253
39481 -253 253 253 253 253 253 253 253 253 253 253 253
39482 -253 253 253 253 253 253 253 253 253 253 253 253
39483 -250 250 250 242 242 242 54 54 54 2 2 6
39484 - 2 2 6 2 2 6 2 2 6 2 2 6
39485 - 2 2 6 2 2 6 2 2 6 38 38 38
39486 - 86 86 86 50 50 50 22 22 22 6 6 6
39487 - 0 0 0 0 0 0 0 0 0 0 0 0
39488 - 0 0 0 0 0 0 0 0 0 0 0 0
39489 - 0 0 0 0 0 0 0 0 0 0 0 0
39490 - 0 0 0 0 0 0 0 0 0 0 0 0
39491 - 0 0 0 0 0 0 0 0 0 0 0 0
39492 - 0 0 0 0 0 0 0 0 0 0 0 0
39493 - 0 0 0 0 0 0 0 0 0 0 0 0
39494 - 0 0 0 0 0 0 0 0 0 0 0 0
39495 - 0 0 0 0 0 0 0 0 0 0 0 0
39496 - 6 6 6 14 14 14 38 38 38 82 82 82
39497 - 34 34 34 2 2 6 2 2 6 2 2 6
39498 - 42 42 42 195 195 195 246 246 246 253 253 253
39499 -253 253 253 253 253 253 253 253 253 250 250 250
39500 -242 242 242 242 242 242 250 250 250 253 253 253
39501 -253 253 253 253 253 253 253 253 253 253 253 253
39502 -253 253 253 250 250 250 246 246 246 238 238 238
39503 -226 226 226 231 231 231 101 101 101 6 6 6
39504 - 2 2 6 2 2 6 2 2 6 2 2 6
39505 - 2 2 6 2 2 6 2 2 6 2 2 6
39506 - 38 38 38 82 82 82 42 42 42 14 14 14
39507 - 6 6 6 0 0 0 0 0 0 0 0 0
39508 - 0 0 0 0 0 0 0 0 0 0 0 0
39509 - 0 0 0 0 0 0 0 0 0 0 0 0
39510 - 0 0 0 0 0 0 0 0 0 0 0 0
39511 - 0 0 0 0 0 0 0 0 0 0 0 0
39512 - 0 0 0 0 0 0 0 0 0 0 0 0
39513 - 0 0 0 0 0 0 0 0 0 0 0 0
39514 - 0 0 0 0 0 0 0 0 0 0 0 0
39515 - 0 0 0 0 0 0 0 0 0 0 0 0
39516 - 10 10 10 26 26 26 62 62 62 66 66 66
39517 - 2 2 6 2 2 6 2 2 6 6 6 6
39518 - 70 70 70 170 170 170 206 206 206 234 234 234
39519 -246 246 246 250 250 250 250 250 250 238 238 238
39520 -226 226 226 231 231 231 238 238 238 250 250 250
39521 -250 250 250 250 250 250 246 246 246 231 231 231
39522 -214 214 214 206 206 206 202 202 202 202 202 202
39523 -198 198 198 202 202 202 182 182 182 18 18 18
39524 - 2 2 6 2 2 6 2 2 6 2 2 6
39525 - 2 2 6 2 2 6 2 2 6 2 2 6
39526 - 2 2 6 62 62 62 66 66 66 30 30 30
39527 - 10 10 10 0 0 0 0 0 0 0 0 0
39528 - 0 0 0 0 0 0 0 0 0 0 0 0
39529 - 0 0 0 0 0 0 0 0 0 0 0 0
39530 - 0 0 0 0 0 0 0 0 0 0 0 0
39531 - 0 0 0 0 0 0 0 0 0 0 0 0
39532 - 0 0 0 0 0 0 0 0 0 0 0 0
39533 - 0 0 0 0 0 0 0 0 0 0 0 0
39534 - 0 0 0 0 0 0 0 0 0 0 0 0
39535 - 0 0 0 0 0 0 0 0 0 0 0 0
39536 - 14 14 14 42 42 42 82 82 82 18 18 18
39537 - 2 2 6 2 2 6 2 2 6 10 10 10
39538 - 94 94 94 182 182 182 218 218 218 242 242 242
39539 -250 250 250 253 253 253 253 253 253 250 250 250
39540 -234 234 234 253 253 253 253 253 253 253 253 253
39541 -253 253 253 253 253 253 253 253 253 246 246 246
39542 -238 238 238 226 226 226 210 210 210 202 202 202
39543 -195 195 195 195 195 195 210 210 210 158 158 158
39544 - 6 6 6 14 14 14 50 50 50 14 14 14
39545 - 2 2 6 2 2 6 2 2 6 2 2 6
39546 - 2 2 6 6 6 6 86 86 86 46 46 46
39547 - 18 18 18 6 6 6 0 0 0 0 0 0
39548 - 0 0 0 0 0 0 0 0 0 0 0 0
39549 - 0 0 0 0 0 0 0 0 0 0 0 0
39550 - 0 0 0 0 0 0 0 0 0 0 0 0
39551 - 0 0 0 0 0 0 0 0 0 0 0 0
39552 - 0 0 0 0 0 0 0 0 0 0 0 0
39553 - 0 0 0 0 0 0 0 0 0 0 0 0
39554 - 0 0 0 0 0 0 0 0 0 0 0 0
39555 - 0 0 0 0 0 0 0 0 0 6 6 6
39556 - 22 22 22 54 54 54 70 70 70 2 2 6
39557 - 2 2 6 10 10 10 2 2 6 22 22 22
39558 -166 166 166 231 231 231 250 250 250 253 253 253
39559 -253 253 253 253 253 253 253 253 253 250 250 250
39560 -242 242 242 253 253 253 253 253 253 253 253 253
39561 -253 253 253 253 253 253 253 253 253 253 253 253
39562 -253 253 253 253 253 253 253 253 253 246 246 246
39563 -231 231 231 206 206 206 198 198 198 226 226 226
39564 - 94 94 94 2 2 6 6 6 6 38 38 38
39565 - 30 30 30 2 2 6 2 2 6 2 2 6
39566 - 2 2 6 2 2 6 62 62 62 66 66 66
39567 - 26 26 26 10 10 10 0 0 0 0 0 0
39568 - 0 0 0 0 0 0 0 0 0 0 0 0
39569 - 0 0 0 0 0 0 0 0 0 0 0 0
39570 - 0 0 0 0 0 0 0 0 0 0 0 0
39571 - 0 0 0 0 0 0 0 0 0 0 0 0
39572 - 0 0 0 0 0 0 0 0 0 0 0 0
39573 - 0 0 0 0 0 0 0 0 0 0 0 0
39574 - 0 0 0 0 0 0 0 0 0 0 0 0
39575 - 0 0 0 0 0 0 0 0 0 10 10 10
39576 - 30 30 30 74 74 74 50 50 50 2 2 6
39577 - 26 26 26 26 26 26 2 2 6 106 106 106
39578 -238 238 238 253 253 253 253 253 253 253 253 253
39579 -253 253 253 253 253 253 253 253 253 253 253 253
39580 -253 253 253 253 253 253 253 253 253 253 253 253
39581 -253 253 253 253 253 253 253 253 253 253 253 253
39582 -253 253 253 253 253 253 253 253 253 253 253 253
39583 -253 253 253 246 246 246 218 218 218 202 202 202
39584 -210 210 210 14 14 14 2 2 6 2 2 6
39585 - 30 30 30 22 22 22 2 2 6 2 2 6
39586 - 2 2 6 2 2 6 18 18 18 86 86 86
39587 - 42 42 42 14 14 14 0 0 0 0 0 0
39588 - 0 0 0 0 0 0 0 0 0 0 0 0
39589 - 0 0 0 0 0 0 0 0 0 0 0 0
39590 - 0 0 0 0 0 0 0 0 0 0 0 0
39591 - 0 0 0 0 0 0 0 0 0 0 0 0
39592 - 0 0 0 0 0 0 0 0 0 0 0 0
39593 - 0 0 0 0 0 0 0 0 0 0 0 0
39594 - 0 0 0 0 0 0 0 0 0 0 0 0
39595 - 0 0 0 0 0 0 0 0 0 14 14 14
39596 - 42 42 42 90 90 90 22 22 22 2 2 6
39597 - 42 42 42 2 2 6 18 18 18 218 218 218
39598 -253 253 253 253 253 253 253 253 253 253 253 253
39599 -253 253 253 253 253 253 253 253 253 253 253 253
39600 -253 253 253 253 253 253 253 253 253 253 253 253
39601 -253 253 253 253 253 253 253 253 253 253 253 253
39602 -253 253 253 253 253 253 253 253 253 253 253 253
39603 -253 253 253 253 253 253 250 250 250 221 221 221
39604 -218 218 218 101 101 101 2 2 6 14 14 14
39605 - 18 18 18 38 38 38 10 10 10 2 2 6
39606 - 2 2 6 2 2 6 2 2 6 78 78 78
39607 - 58 58 58 22 22 22 6 6 6 0 0 0
39608 - 0 0 0 0 0 0 0 0 0 0 0 0
39609 - 0 0 0 0 0 0 0 0 0 0 0 0
39610 - 0 0 0 0 0 0 0 0 0 0 0 0
39611 - 0 0 0 0 0 0 0 0 0 0 0 0
39612 - 0 0 0 0 0 0 0 0 0 0 0 0
39613 - 0 0 0 0 0 0 0 0 0 0 0 0
39614 - 0 0 0 0 0 0 0 0 0 0 0 0
39615 - 0 0 0 0 0 0 6 6 6 18 18 18
39616 - 54 54 54 82 82 82 2 2 6 26 26 26
39617 - 22 22 22 2 2 6 123 123 123 253 253 253
39618 -253 253 253 253 253 253 253 253 253 253 253 253
39619 -253 253 253 253 253 253 253 253 253 253 253 253
39620 -253 253 253 253 253 253 253 253 253 253 253 253
39621 -253 253 253 253 253 253 253 253 253 253 253 253
39622 -253 253 253 253 253 253 253 253 253 253 253 253
39623 -253 253 253 253 253 253 253 253 253 250 250 250
39624 -238 238 238 198 198 198 6 6 6 38 38 38
39625 - 58 58 58 26 26 26 38 38 38 2 2 6
39626 - 2 2 6 2 2 6 2 2 6 46 46 46
39627 - 78 78 78 30 30 30 10 10 10 0 0 0
39628 - 0 0 0 0 0 0 0 0 0 0 0 0
39629 - 0 0 0 0 0 0 0 0 0 0 0 0
39630 - 0 0 0 0 0 0 0 0 0 0 0 0
39631 - 0 0 0 0 0 0 0 0 0 0 0 0
39632 - 0 0 0 0 0 0 0 0 0 0 0 0
39633 - 0 0 0 0 0 0 0 0 0 0 0 0
39634 - 0 0 0 0 0 0 0 0 0 0 0 0
39635 - 0 0 0 0 0 0 10 10 10 30 30 30
39636 - 74 74 74 58 58 58 2 2 6 42 42 42
39637 - 2 2 6 22 22 22 231 231 231 253 253 253
39638 -253 253 253 253 253 253 253 253 253 253 253 253
39639 -253 253 253 253 253 253 253 253 253 250 250 250
39640 -253 253 253 253 253 253 253 253 253 253 253 253
39641 -253 253 253 253 253 253 253 253 253 253 253 253
39642 -253 253 253 253 253 253 253 253 253 253 253 253
39643 -253 253 253 253 253 253 253 253 253 253 253 253
39644 -253 253 253 246 246 246 46 46 46 38 38 38
39645 - 42 42 42 14 14 14 38 38 38 14 14 14
39646 - 2 2 6 2 2 6 2 2 6 6 6 6
39647 - 86 86 86 46 46 46 14 14 14 0 0 0
39648 - 0 0 0 0 0 0 0 0 0 0 0 0
39649 - 0 0 0 0 0 0 0 0 0 0 0 0
39650 - 0 0 0 0 0 0 0 0 0 0 0 0
39651 - 0 0 0 0 0 0 0 0 0 0 0 0
39652 - 0 0 0 0 0 0 0 0 0 0 0 0
39653 - 0 0 0 0 0 0 0 0 0 0 0 0
39654 - 0 0 0 0 0 0 0 0 0 0 0 0
39655 - 0 0 0 6 6 6 14 14 14 42 42 42
39656 - 90 90 90 18 18 18 18 18 18 26 26 26
39657 - 2 2 6 116 116 116 253 253 253 253 253 253
39658 -253 253 253 253 253 253 253 253 253 253 253 253
39659 -253 253 253 253 253 253 250 250 250 238 238 238
39660 -253 253 253 253 253 253 253 253 253 253 253 253
39661 -253 253 253 253 253 253 253 253 253 253 253 253
39662 -253 253 253 253 253 253 253 253 253 253 253 253
39663 -253 253 253 253 253 253 253 253 253 253 253 253
39664 -253 253 253 253 253 253 94 94 94 6 6 6
39665 - 2 2 6 2 2 6 10 10 10 34 34 34
39666 - 2 2 6 2 2 6 2 2 6 2 2 6
39667 - 74 74 74 58 58 58 22 22 22 6 6 6
39668 - 0 0 0 0 0 0 0 0 0 0 0 0
39669 - 0 0 0 0 0 0 0 0 0 0 0 0
39670 - 0 0 0 0 0 0 0 0 0 0 0 0
39671 - 0 0 0 0 0 0 0 0 0 0 0 0
39672 - 0 0 0 0 0 0 0 0 0 0 0 0
39673 - 0 0 0 0 0 0 0 0 0 0 0 0
39674 - 0 0 0 0 0 0 0 0 0 0 0 0
39675 - 0 0 0 10 10 10 26 26 26 66 66 66
39676 - 82 82 82 2 2 6 38 38 38 6 6 6
39677 - 14 14 14 210 210 210 253 253 253 253 253 253
39678 -253 253 253 253 253 253 253 253 253 253 253 253
39679 -253 253 253 253 253 253 246 246 246 242 242 242
39680 -253 253 253 253 253 253 253 253 253 253 253 253
39681 -253 253 253 253 253 253 253 253 253 253 253 253
39682 -253 253 253 253 253 253 253 253 253 253 253 253
39683 -253 253 253 253 253 253 253 253 253 253 253 253
39684 -253 253 253 253 253 253 144 144 144 2 2 6
39685 - 2 2 6 2 2 6 2 2 6 46 46 46
39686 - 2 2 6 2 2 6 2 2 6 2 2 6
39687 - 42 42 42 74 74 74 30 30 30 10 10 10
39688 - 0 0 0 0 0 0 0 0 0 0 0 0
39689 - 0 0 0 0 0 0 0 0 0 0 0 0
39690 - 0 0 0 0 0 0 0 0 0 0 0 0
39691 - 0 0 0 0 0 0 0 0 0 0 0 0
39692 - 0 0 0 0 0 0 0 0 0 0 0 0
39693 - 0 0 0 0 0 0 0 0 0 0 0 0
39694 - 0 0 0 0 0 0 0 0 0 0 0 0
39695 - 6 6 6 14 14 14 42 42 42 90 90 90
39696 - 26 26 26 6 6 6 42 42 42 2 2 6
39697 - 74 74 74 250 250 250 253 253 253 253 253 253
39698 -253 253 253 253 253 253 253 253 253 253 253 253
39699 -253 253 253 253 253 253 242 242 242 242 242 242
39700 -253 253 253 253 253 253 253 253 253 253 253 253
39701 -253 253 253 253 253 253 253 253 253 253 253 253
39702 -253 253 253 253 253 253 253 253 253 253 253 253
39703 -253 253 253 253 253 253 253 253 253 253 253 253
39704 -253 253 253 253 253 253 182 182 182 2 2 6
39705 - 2 2 6 2 2 6 2 2 6 46 46 46
39706 - 2 2 6 2 2 6 2 2 6 2 2 6
39707 - 10 10 10 86 86 86 38 38 38 10 10 10
39708 - 0 0 0 0 0 0 0 0 0 0 0 0
39709 - 0 0 0 0 0 0 0 0 0 0 0 0
39710 - 0 0 0 0 0 0 0 0 0 0 0 0
39711 - 0 0 0 0 0 0 0 0 0 0 0 0
39712 - 0 0 0 0 0 0 0 0 0 0 0 0
39713 - 0 0 0 0 0 0 0 0 0 0 0 0
39714 - 0 0 0 0 0 0 0 0 0 0 0 0
39715 - 10 10 10 26 26 26 66 66 66 82 82 82
39716 - 2 2 6 22 22 22 18 18 18 2 2 6
39717 -149 149 149 253 253 253 253 253 253 253 253 253
39718 -253 253 253 253 253 253 253 253 253 253 253 253
39719 -253 253 253 253 253 253 234 234 234 242 242 242
39720 -253 253 253 253 253 253 253 253 253 253 253 253
39721 -253 253 253 253 253 253 253 253 253 253 253 253
39722 -253 253 253 253 253 253 253 253 253 253 253 253
39723 -253 253 253 253 253 253 253 253 253 253 253 253
39724 -253 253 253 253 253 253 206 206 206 2 2 6
39725 - 2 2 6 2 2 6 2 2 6 38 38 38
39726 - 2 2 6 2 2 6 2 2 6 2 2 6
39727 - 6 6 6 86 86 86 46 46 46 14 14 14
39728 - 0 0 0 0 0 0 0 0 0 0 0 0
39729 - 0 0 0 0 0 0 0 0 0 0 0 0
39730 - 0 0 0 0 0 0 0 0 0 0 0 0
39731 - 0 0 0 0 0 0 0 0 0 0 0 0
39732 - 0 0 0 0 0 0 0 0 0 0 0 0
39733 - 0 0 0 0 0 0 0 0 0 0 0 0
39734 - 0 0 0 0 0 0 0 0 0 6 6 6
39735 - 18 18 18 46 46 46 86 86 86 18 18 18
39736 - 2 2 6 34 34 34 10 10 10 6 6 6
39737 -210 210 210 253 253 253 253 253 253 253 253 253
39738 -253 253 253 253 253 253 253 253 253 253 253 253
39739 -253 253 253 253 253 253 234 234 234 242 242 242
39740 -253 253 253 253 253 253 253 253 253 253 253 253
39741 -253 253 253 253 253 253 253 253 253 253 253 253
39742 -253 253 253 253 253 253 253 253 253 253 253 253
39743 -253 253 253 253 253 253 253 253 253 253 253 253
39744 -253 253 253 253 253 253 221 221 221 6 6 6
39745 - 2 2 6 2 2 6 6 6 6 30 30 30
39746 - 2 2 6 2 2 6 2 2 6 2 2 6
39747 - 2 2 6 82 82 82 54 54 54 18 18 18
39748 - 6 6 6 0 0 0 0 0 0 0 0 0
39749 - 0 0 0 0 0 0 0 0 0 0 0 0
39750 - 0 0 0 0 0 0 0 0 0 0 0 0
39751 - 0 0 0 0 0 0 0 0 0 0 0 0
39752 - 0 0 0 0 0 0 0 0 0 0 0 0
39753 - 0 0 0 0 0 0 0 0 0 0 0 0
39754 - 0 0 0 0 0 0 0 0 0 10 10 10
39755 - 26 26 26 66 66 66 62 62 62 2 2 6
39756 - 2 2 6 38 38 38 10 10 10 26 26 26
39757 -238 238 238 253 253 253 253 253 253 253 253 253
39758 -253 253 253 253 253 253 253 253 253 253 253 253
39759 -253 253 253 253 253 253 231 231 231 238 238 238
39760 -253 253 253 253 253 253 253 253 253 253 253 253
39761 -253 253 253 253 253 253 253 253 253 253 253 253
39762 -253 253 253 253 253 253 253 253 253 253 253 253
39763 -253 253 253 253 253 253 253 253 253 253 253 253
39764 -253 253 253 253 253 253 231 231 231 6 6 6
39765 - 2 2 6 2 2 6 10 10 10 30 30 30
39766 - 2 2 6 2 2 6 2 2 6 2 2 6
39767 - 2 2 6 66 66 66 58 58 58 22 22 22
39768 - 6 6 6 0 0 0 0 0 0 0 0 0
39769 - 0 0 0 0 0 0 0 0 0 0 0 0
39770 - 0 0 0 0 0 0 0 0 0 0 0 0
39771 - 0 0 0 0 0 0 0 0 0 0 0 0
39772 - 0 0 0 0 0 0 0 0 0 0 0 0
39773 - 0 0 0 0 0 0 0 0 0 0 0 0
39774 - 0 0 0 0 0 0 0 0 0 10 10 10
39775 - 38 38 38 78 78 78 6 6 6 2 2 6
39776 - 2 2 6 46 46 46 14 14 14 42 42 42
39777 -246 246 246 253 253 253 253 253 253 253 253 253
39778 -253 253 253 253 253 253 253 253 253 253 253 253
39779 -253 253 253 253 253 253 231 231 231 242 242 242
39780 -253 253 253 253 253 253 253 253 253 253 253 253
39781 -253 253 253 253 253 253 253 253 253 253 253 253
39782 -253 253 253 253 253 253 253 253 253 253 253 253
39783 -253 253 253 253 253 253 253 253 253 253 253 253
39784 -253 253 253 253 253 253 234 234 234 10 10 10
39785 - 2 2 6 2 2 6 22 22 22 14 14 14
39786 - 2 2 6 2 2 6 2 2 6 2 2 6
39787 - 2 2 6 66 66 66 62 62 62 22 22 22
39788 - 6 6 6 0 0 0 0 0 0 0 0 0
39789 - 0 0 0 0 0 0 0 0 0 0 0 0
39790 - 0 0 0 0 0 0 0 0 0 0 0 0
39791 - 0 0 0 0 0 0 0 0 0 0 0 0
39792 - 0 0 0 0 0 0 0 0 0 0 0 0
39793 - 0 0 0 0 0 0 0 0 0 0 0 0
39794 - 0 0 0 0 0 0 6 6 6 18 18 18
39795 - 50 50 50 74 74 74 2 2 6 2 2 6
39796 - 14 14 14 70 70 70 34 34 34 62 62 62
39797 -250 250 250 253 253 253 253 253 253 253 253 253
39798 -253 253 253 253 253 253 253 253 253 253 253 253
39799 -253 253 253 253 253 253 231 231 231 246 246 246
39800 -253 253 253 253 253 253 253 253 253 253 253 253
39801 -253 253 253 253 253 253 253 253 253 253 253 253
39802 -253 253 253 253 253 253 253 253 253 253 253 253
39803 -253 253 253 253 253 253 253 253 253 253 253 253
39804 -253 253 253 253 253 253 234 234 234 14 14 14
39805 - 2 2 6 2 2 6 30 30 30 2 2 6
39806 - 2 2 6 2 2 6 2 2 6 2 2 6
39807 - 2 2 6 66 66 66 62 62 62 22 22 22
39808 - 6 6 6 0 0 0 0 0 0 0 0 0
39809 - 0 0 0 0 0 0 0 0 0 0 0 0
39810 - 0 0 0 0 0 0 0 0 0 0 0 0
39811 - 0 0 0 0 0 0 0 0 0 0 0 0
39812 - 0 0 0 0 0 0 0 0 0 0 0 0
39813 - 0 0 0 0 0 0 0 0 0 0 0 0
39814 - 0 0 0 0 0 0 6 6 6 18 18 18
39815 - 54 54 54 62 62 62 2 2 6 2 2 6
39816 - 2 2 6 30 30 30 46 46 46 70 70 70
39817 -250 250 250 253 253 253 253 253 253 253 253 253
39818 -253 253 253 253 253 253 253 253 253 253 253 253
39819 -253 253 253 253 253 253 231 231 231 246 246 246
39820 -253 253 253 253 253 253 253 253 253 253 253 253
39821 -253 253 253 253 253 253 253 253 253 253 253 253
39822 -253 253 253 253 253 253 253 253 253 253 253 253
39823 -253 253 253 253 253 253 253 253 253 253 253 253
39824 -253 253 253 253 253 253 226 226 226 10 10 10
39825 - 2 2 6 6 6 6 30 30 30 2 2 6
39826 - 2 2 6 2 2 6 2 2 6 2 2 6
39827 - 2 2 6 66 66 66 58 58 58 22 22 22
39828 - 6 6 6 0 0 0 0 0 0 0 0 0
39829 - 0 0 0 0 0 0 0 0 0 0 0 0
39830 - 0 0 0 0 0 0 0 0 0 0 0 0
39831 - 0 0 0 0 0 0 0 0 0 0 0 0
39832 - 0 0 0 0 0 0 0 0 0 0 0 0
39833 - 0 0 0 0 0 0 0 0 0 0 0 0
39834 - 0 0 0 0 0 0 6 6 6 22 22 22
39835 - 58 58 58 62 62 62 2 2 6 2 2 6
39836 - 2 2 6 2 2 6 30 30 30 78 78 78
39837 -250 250 250 253 253 253 253 253 253 253 253 253
39838 -253 253 253 253 253 253 253 253 253 253 253 253
39839 -253 253 253 253 253 253 231 231 231 246 246 246
39840 -253 253 253 253 253 253 253 253 253 253 253 253
39841 -253 253 253 253 253 253 253 253 253 253 253 253
39842 -253 253 253 253 253 253 253 253 253 253 253 253
39843 -253 253 253 253 253 253 253 253 253 253 253 253
39844 -253 253 253 253 253 253 206 206 206 2 2 6
39845 - 22 22 22 34 34 34 18 14 6 22 22 22
39846 - 26 26 26 18 18 18 6 6 6 2 2 6
39847 - 2 2 6 82 82 82 54 54 54 18 18 18
39848 - 6 6 6 0 0 0 0 0 0 0 0 0
39849 - 0 0 0 0 0 0 0 0 0 0 0 0
39850 - 0 0 0 0 0 0 0 0 0 0 0 0
39851 - 0 0 0 0 0 0 0 0 0 0 0 0
39852 - 0 0 0 0 0 0 0 0 0 0 0 0
39853 - 0 0 0 0 0 0 0 0 0 0 0 0
39854 - 0 0 0 0 0 0 6 6 6 26 26 26
39855 - 62 62 62 106 106 106 74 54 14 185 133 11
39856 -210 162 10 121 92 8 6 6 6 62 62 62
39857 -238 238 238 253 253 253 253 253 253 253 253 253
39858 -253 253 253 253 253 253 253 253 253 253 253 253
39859 -253 253 253 253 253 253 231 231 231 246 246 246
39860 -253 253 253 253 253 253 253 253 253 253 253 253
39861 -253 253 253 253 253 253 253 253 253 253 253 253
39862 -253 253 253 253 253 253 253 253 253 253 253 253
39863 -253 253 253 253 253 253 253 253 253 253 253 253
39864 -253 253 253 253 253 253 158 158 158 18 18 18
39865 - 14 14 14 2 2 6 2 2 6 2 2 6
39866 - 6 6 6 18 18 18 66 66 66 38 38 38
39867 - 6 6 6 94 94 94 50 50 50 18 18 18
39868 - 6 6 6 0 0 0 0 0 0 0 0 0
39869 - 0 0 0 0 0 0 0 0 0 0 0 0
39870 - 0 0 0 0 0 0 0 0 0 0 0 0
39871 - 0 0 0 0 0 0 0 0 0 0 0 0
39872 - 0 0 0 0 0 0 0 0 0 0 0 0
39873 - 0 0 0 0 0 0 0 0 0 6 6 6
39874 - 10 10 10 10 10 10 18 18 18 38 38 38
39875 - 78 78 78 142 134 106 216 158 10 242 186 14
39876 -246 190 14 246 190 14 156 118 10 10 10 10
39877 - 90 90 90 238 238 238 253 253 253 253 253 253
39878 -253 253 253 253 253 253 253 253 253 253 253 253
39879 -253 253 253 253 253 253 231 231 231 250 250 250
39880 -253 253 253 253 253 253 253 253 253 253 253 253
39881 -253 253 253 253 253 253 253 253 253 253 253 253
39882 -253 253 253 253 253 253 253 253 253 253 253 253
39883 -253 253 253 253 253 253 253 253 253 246 230 190
39884 -238 204 91 238 204 91 181 142 44 37 26 9
39885 - 2 2 6 2 2 6 2 2 6 2 2 6
39886 - 2 2 6 2 2 6 38 38 38 46 46 46
39887 - 26 26 26 106 106 106 54 54 54 18 18 18
39888 - 6 6 6 0 0 0 0 0 0 0 0 0
39889 - 0 0 0 0 0 0 0 0 0 0 0 0
39890 - 0 0 0 0 0 0 0 0 0 0 0 0
39891 - 0 0 0 0 0 0 0 0 0 0 0 0
39892 - 0 0 0 0 0 0 0 0 0 0 0 0
39893 - 0 0 0 6 6 6 14 14 14 22 22 22
39894 - 30 30 30 38 38 38 50 50 50 70 70 70
39895 -106 106 106 190 142 34 226 170 11 242 186 14
39896 -246 190 14 246 190 14 246 190 14 154 114 10
39897 - 6 6 6 74 74 74 226 226 226 253 253 253
39898 -253 253 253 253 253 253 253 253 253 253 253 253
39899 -253 253 253 253 253 253 231 231 231 250 250 250
39900 -253 253 253 253 253 253 253 253 253 253 253 253
39901 -253 253 253 253 253 253 253 253 253 253 253 253
39902 -253 253 253 253 253 253 253 253 253 253 253 253
39903 -253 253 253 253 253 253 253 253 253 228 184 62
39904 -241 196 14 241 208 19 232 195 16 38 30 10
39905 - 2 2 6 2 2 6 2 2 6 2 2 6
39906 - 2 2 6 6 6 6 30 30 30 26 26 26
39907 -203 166 17 154 142 90 66 66 66 26 26 26
39908 - 6 6 6 0 0 0 0 0 0 0 0 0
39909 - 0 0 0 0 0 0 0 0 0 0 0 0
39910 - 0 0 0 0 0 0 0 0 0 0 0 0
39911 - 0 0 0 0 0 0 0 0 0 0 0 0
39912 - 0 0 0 0 0 0 0 0 0 0 0 0
39913 - 6 6 6 18 18 18 38 38 38 58 58 58
39914 - 78 78 78 86 86 86 101 101 101 123 123 123
39915 -175 146 61 210 150 10 234 174 13 246 186 14
39916 -246 190 14 246 190 14 246 190 14 238 190 10
39917 -102 78 10 2 2 6 46 46 46 198 198 198
39918 -253 253 253 253 253 253 253 253 253 253 253 253
39919 -253 253 253 253 253 253 234 234 234 242 242 242
39920 -253 253 253 253 253 253 253 253 253 253 253 253
39921 -253 253 253 253 253 253 253 253 253 253 253 253
39922 -253 253 253 253 253 253 253 253 253 253 253 253
39923 -253 253 253 253 253 253 253 253 253 224 178 62
39924 -242 186 14 241 196 14 210 166 10 22 18 6
39925 - 2 2 6 2 2 6 2 2 6 2 2 6
39926 - 2 2 6 2 2 6 6 6 6 121 92 8
39927 -238 202 15 232 195 16 82 82 82 34 34 34
39928 - 10 10 10 0 0 0 0 0 0 0 0 0
39929 - 0 0 0 0 0 0 0 0 0 0 0 0
39930 - 0 0 0 0 0 0 0 0 0 0 0 0
39931 - 0 0 0 0 0 0 0 0 0 0 0 0
39932 - 0 0 0 0 0 0 0 0 0 0 0 0
39933 - 14 14 14 38 38 38 70 70 70 154 122 46
39934 -190 142 34 200 144 11 197 138 11 197 138 11
39935 -213 154 11 226 170 11 242 186 14 246 190 14
39936 -246 190 14 246 190 14 246 190 14 246 190 14
39937 -225 175 15 46 32 6 2 2 6 22 22 22
39938 -158 158 158 250 250 250 253 253 253 253 253 253
39939 -253 253 253 253 253 253 253 253 253 253 253 253
39940 -253 253 253 253 253 253 253 253 253 253 253 253
39941 -253 253 253 253 253 253 253 253 253 253 253 253
39942 -253 253 253 253 253 253 253 253 253 253 253 253
39943 -253 253 253 250 250 250 242 242 242 224 178 62
39944 -239 182 13 236 186 11 213 154 11 46 32 6
39945 - 2 2 6 2 2 6 2 2 6 2 2 6
39946 - 2 2 6 2 2 6 61 42 6 225 175 15
39947 -238 190 10 236 186 11 112 100 78 42 42 42
39948 - 14 14 14 0 0 0 0 0 0 0 0 0
39949 - 0 0 0 0 0 0 0 0 0 0 0 0
39950 - 0 0 0 0 0 0 0 0 0 0 0 0
39951 - 0 0 0 0 0 0 0 0 0 0 0 0
39952 - 0 0 0 0 0 0 0 0 0 6 6 6
39953 - 22 22 22 54 54 54 154 122 46 213 154 11
39954 -226 170 11 230 174 11 226 170 11 226 170 11
39955 -236 178 12 242 186 14 246 190 14 246 190 14
39956 -246 190 14 246 190 14 246 190 14 246 190 14
39957 -241 196 14 184 144 12 10 10 10 2 2 6
39958 - 6 6 6 116 116 116 242 242 242 253 253 253
39959 -253 253 253 253 253 253 253 253 253 253 253 253
39960 -253 253 253 253 253 253 253 253 253 253 253 253
39961 -253 253 253 253 253 253 253 253 253 253 253 253
39962 -253 253 253 253 253 253 253 253 253 253 253 253
39963 -253 253 253 231 231 231 198 198 198 214 170 54
39964 -236 178 12 236 178 12 210 150 10 137 92 6
39965 - 18 14 6 2 2 6 2 2 6 2 2 6
39966 - 6 6 6 70 47 6 200 144 11 236 178 12
39967 -239 182 13 239 182 13 124 112 88 58 58 58
39968 - 22 22 22 6 6 6 0 0 0 0 0 0
39969 - 0 0 0 0 0 0 0 0 0 0 0 0
39970 - 0 0 0 0 0 0 0 0 0 0 0 0
39971 - 0 0 0 0 0 0 0 0 0 0 0 0
39972 - 0 0 0 0 0 0 0 0 0 10 10 10
39973 - 30 30 30 70 70 70 180 133 36 226 170 11
39974 -239 182 13 242 186 14 242 186 14 246 186 14
39975 -246 190 14 246 190 14 246 190 14 246 190 14
39976 -246 190 14 246 190 14 246 190 14 246 190 14
39977 -246 190 14 232 195 16 98 70 6 2 2 6
39978 - 2 2 6 2 2 6 66 66 66 221 221 221
39979 -253 253 253 253 253 253 253 253 253 253 253 253
39980 -253 253 253 253 253 253 253 253 253 253 253 253
39981 -253 253 253 253 253 253 253 253 253 253 253 253
39982 -253 253 253 253 253 253 253 253 253 253 253 253
39983 -253 253 253 206 206 206 198 198 198 214 166 58
39984 -230 174 11 230 174 11 216 158 10 192 133 9
39985 -163 110 8 116 81 8 102 78 10 116 81 8
39986 -167 114 7 197 138 11 226 170 11 239 182 13
39987 -242 186 14 242 186 14 162 146 94 78 78 78
39988 - 34 34 34 14 14 14 6 6 6 0 0 0
39989 - 0 0 0 0 0 0 0 0 0 0 0 0
39990 - 0 0 0 0 0 0 0 0 0 0 0 0
39991 - 0 0 0 0 0 0 0 0 0 0 0 0
39992 - 0 0 0 0 0 0 0 0 0 6 6 6
39993 - 30 30 30 78 78 78 190 142 34 226 170 11
39994 -239 182 13 246 190 14 246 190 14 246 190 14
39995 -246 190 14 246 190 14 246 190 14 246 190 14
39996 -246 190 14 246 190 14 246 190 14 246 190 14
39997 -246 190 14 241 196 14 203 166 17 22 18 6
39998 - 2 2 6 2 2 6 2 2 6 38 38 38
39999 -218 218 218 253 253 253 253 253 253 253 253 253
40000 -253 253 253 253 253 253 253 253 253 253 253 253
40001 -253 253 253 253 253 253 253 253 253 253 253 253
40002 -253 253 253 253 253 253 253 253 253 253 253 253
40003 -250 250 250 206 206 206 198 198 198 202 162 69
40004 -226 170 11 236 178 12 224 166 10 210 150 10
40005 -200 144 11 197 138 11 192 133 9 197 138 11
40006 -210 150 10 226 170 11 242 186 14 246 190 14
40007 -246 190 14 246 186 14 225 175 15 124 112 88
40008 - 62 62 62 30 30 30 14 14 14 6 6 6
40009 - 0 0 0 0 0 0 0 0 0 0 0 0
40010 - 0 0 0 0 0 0 0 0 0 0 0 0
40011 - 0 0 0 0 0 0 0 0 0 0 0 0
40012 - 0 0 0 0 0 0 0 0 0 10 10 10
40013 - 30 30 30 78 78 78 174 135 50 224 166 10
40014 -239 182 13 246 190 14 246 190 14 246 190 14
40015 -246 190 14 246 190 14 246 190 14 246 190 14
40016 -246 190 14 246 190 14 246 190 14 246 190 14
40017 -246 190 14 246 190 14 241 196 14 139 102 15
40018 - 2 2 6 2 2 6 2 2 6 2 2 6
40019 - 78 78 78 250 250 250 253 253 253 253 253 253
40020 -253 253 253 253 253 253 253 253 253 253 253 253
40021 -253 253 253 253 253 253 253 253 253 253 253 253
40022 -253 253 253 253 253 253 253 253 253 253 253 253
40023 -250 250 250 214 214 214 198 198 198 190 150 46
40024 -219 162 10 236 178 12 234 174 13 224 166 10
40025 -216 158 10 213 154 11 213 154 11 216 158 10
40026 -226 170 11 239 182 13 246 190 14 246 190 14
40027 -246 190 14 246 190 14 242 186 14 206 162 42
40028 -101 101 101 58 58 58 30 30 30 14 14 14
40029 - 6 6 6 0 0 0 0 0 0 0 0 0
40030 - 0 0 0 0 0 0 0 0 0 0 0 0
40031 - 0 0 0 0 0 0 0 0 0 0 0 0
40032 - 0 0 0 0 0 0 0 0 0 10 10 10
40033 - 30 30 30 74 74 74 174 135 50 216 158 10
40034 -236 178 12 246 190 14 246 190 14 246 190 14
40035 -246 190 14 246 190 14 246 190 14 246 190 14
40036 -246 190 14 246 190 14 246 190 14 246 190 14
40037 -246 190 14 246 190 14 241 196 14 226 184 13
40038 - 61 42 6 2 2 6 2 2 6 2 2 6
40039 - 22 22 22 238 238 238 253 253 253 253 253 253
40040 -253 253 253 253 253 253 253 253 253 253 253 253
40041 -253 253 253 253 253 253 253 253 253 253 253 253
40042 -253 253 253 253 253 253 253 253 253 253 253 253
40043 -253 253 253 226 226 226 187 187 187 180 133 36
40044 -216 158 10 236 178 12 239 182 13 236 178 12
40045 -230 174 11 226 170 11 226 170 11 230 174 11
40046 -236 178 12 242 186 14 246 190 14 246 190 14
40047 -246 190 14 246 190 14 246 186 14 239 182 13
40048 -206 162 42 106 106 106 66 66 66 34 34 34
40049 - 14 14 14 6 6 6 0 0 0 0 0 0
40050 - 0 0 0 0 0 0 0 0 0 0 0 0
40051 - 0 0 0 0 0 0 0 0 0 0 0 0
40052 - 0 0 0 0 0 0 0 0 0 6 6 6
40053 - 26 26 26 70 70 70 163 133 67 213 154 11
40054 -236 178 12 246 190 14 246 190 14 246 190 14
40055 -246 190 14 246 190 14 246 190 14 246 190 14
40056 -246 190 14 246 190 14 246 190 14 246 190 14
40057 -246 190 14 246 190 14 246 190 14 241 196 14
40058 -190 146 13 18 14 6 2 2 6 2 2 6
40059 - 46 46 46 246 246 246 253 253 253 253 253 253
40060 -253 253 253 253 253 253 253 253 253 253 253 253
40061 -253 253 253 253 253 253 253 253 253 253 253 253
40062 -253 253 253 253 253 253 253 253 253 253 253 253
40063 -253 253 253 221 221 221 86 86 86 156 107 11
40064 -216 158 10 236 178 12 242 186 14 246 186 14
40065 -242 186 14 239 182 13 239 182 13 242 186 14
40066 -242 186 14 246 186 14 246 190 14 246 190 14
40067 -246 190 14 246 190 14 246 190 14 246 190 14
40068 -242 186 14 225 175 15 142 122 72 66 66 66
40069 - 30 30 30 10 10 10 0 0 0 0 0 0
40070 - 0 0 0 0 0 0 0 0 0 0 0 0
40071 - 0 0 0 0 0 0 0 0 0 0 0 0
40072 - 0 0 0 0 0 0 0 0 0 6 6 6
40073 - 26 26 26 70 70 70 163 133 67 210 150 10
40074 -236 178 12 246 190 14 246 190 14 246 190 14
40075 -246 190 14 246 190 14 246 190 14 246 190 14
40076 -246 190 14 246 190 14 246 190 14 246 190 14
40077 -246 190 14 246 190 14 246 190 14 246 190 14
40078 -232 195 16 121 92 8 34 34 34 106 106 106
40079 -221 221 221 253 253 253 253 253 253 253 253 253
40080 -253 253 253 253 253 253 253 253 253 253 253 253
40081 -253 253 253 253 253 253 253 253 253 253 253 253
40082 -253 253 253 253 253 253 253 253 253 253 253 253
40083 -242 242 242 82 82 82 18 14 6 163 110 8
40084 -216 158 10 236 178 12 242 186 14 246 190 14
40085 -246 190 14 246 190 14 246 190 14 246 190 14
40086 -246 190 14 246 190 14 246 190 14 246 190 14
40087 -246 190 14 246 190 14 246 190 14 246 190 14
40088 -246 190 14 246 190 14 242 186 14 163 133 67
40089 - 46 46 46 18 18 18 6 6 6 0 0 0
40090 - 0 0 0 0 0 0 0 0 0 0 0 0
40091 - 0 0 0 0 0 0 0 0 0 0 0 0
40092 - 0 0 0 0 0 0 0 0 0 10 10 10
40093 - 30 30 30 78 78 78 163 133 67 210 150 10
40094 -236 178 12 246 186 14 246 190 14 246 190 14
40095 -246 190 14 246 190 14 246 190 14 246 190 14
40096 -246 190 14 246 190 14 246 190 14 246 190 14
40097 -246 190 14 246 190 14 246 190 14 246 190 14
40098 -241 196 14 215 174 15 190 178 144 253 253 253
40099 -253 253 253 253 253 253 253 253 253 253 253 253
40100 -253 253 253 253 253 253 253 253 253 253 253 253
40101 -253 253 253 253 253 253 253 253 253 253 253 253
40102 -253 253 253 253 253 253 253 253 253 218 218 218
40103 - 58 58 58 2 2 6 22 18 6 167 114 7
40104 -216 158 10 236 178 12 246 186 14 246 190 14
40105 -246 190 14 246 190 14 246 190 14 246 190 14
40106 -246 190 14 246 190 14 246 190 14 246 190 14
40107 -246 190 14 246 190 14 246 190 14 246 190 14
40108 -246 190 14 246 186 14 242 186 14 190 150 46
40109 - 54 54 54 22 22 22 6 6 6 0 0 0
40110 - 0 0 0 0 0 0 0 0 0 0 0 0
40111 - 0 0 0 0 0 0 0 0 0 0 0 0
40112 - 0 0 0 0 0 0 0 0 0 14 14 14
40113 - 38 38 38 86 86 86 180 133 36 213 154 11
40114 -236 178 12 246 186 14 246 190 14 246 190 14
40115 -246 190 14 246 190 14 246 190 14 246 190 14
40116 -246 190 14 246 190 14 246 190 14 246 190 14
40117 -246 190 14 246 190 14 246 190 14 246 190 14
40118 -246 190 14 232 195 16 190 146 13 214 214 214
40119 -253 253 253 253 253 253 253 253 253 253 253 253
40120 -253 253 253 253 253 253 253 253 253 253 253 253
40121 -253 253 253 253 253 253 253 253 253 253 253 253
40122 -253 253 253 250 250 250 170 170 170 26 26 26
40123 - 2 2 6 2 2 6 37 26 9 163 110 8
40124 -219 162 10 239 182 13 246 186 14 246 190 14
40125 -246 190 14 246 190 14 246 190 14 246 190 14
40126 -246 190 14 246 190 14 246 190 14 246 190 14
40127 -246 190 14 246 190 14 246 190 14 246 190 14
40128 -246 186 14 236 178 12 224 166 10 142 122 72
40129 - 46 46 46 18 18 18 6 6 6 0 0 0
40130 - 0 0 0 0 0 0 0 0 0 0 0 0
40131 - 0 0 0 0 0 0 0 0 0 0 0 0
40132 - 0 0 0 0 0 0 6 6 6 18 18 18
40133 - 50 50 50 109 106 95 192 133 9 224 166 10
40134 -242 186 14 246 190 14 246 190 14 246 190 14
40135 -246 190 14 246 190 14 246 190 14 246 190 14
40136 -246 190 14 246 190 14 246 190 14 246 190 14
40137 -246 190 14 246 190 14 246 190 14 246 190 14
40138 -242 186 14 226 184 13 210 162 10 142 110 46
40139 -226 226 226 253 253 253 253 253 253 253 253 253
40140 -253 253 253 253 253 253 253 253 253 253 253 253
40141 -253 253 253 253 253 253 253 253 253 253 253 253
40142 -198 198 198 66 66 66 2 2 6 2 2 6
40143 - 2 2 6 2 2 6 50 34 6 156 107 11
40144 -219 162 10 239 182 13 246 186 14 246 190 14
40145 -246 190 14 246 190 14 246 190 14 246 190 14
40146 -246 190 14 246 190 14 246 190 14 246 190 14
40147 -246 190 14 246 190 14 246 190 14 242 186 14
40148 -234 174 13 213 154 11 154 122 46 66 66 66
40149 - 30 30 30 10 10 10 0 0 0 0 0 0
40150 - 0 0 0 0 0 0 0 0 0 0 0 0
40151 - 0 0 0 0 0 0 0 0 0 0 0 0
40152 - 0 0 0 0 0 0 6 6 6 22 22 22
40153 - 58 58 58 154 121 60 206 145 10 234 174 13
40154 -242 186 14 246 186 14 246 190 14 246 190 14
40155 -246 190 14 246 190 14 246 190 14 246 190 14
40156 -246 190 14 246 190 14 246 190 14 246 190 14
40157 -246 190 14 246 190 14 246 190 14 246 190 14
40158 -246 186 14 236 178 12 210 162 10 163 110 8
40159 - 61 42 6 138 138 138 218 218 218 250 250 250
40160 -253 253 253 253 253 253 253 253 253 250 250 250
40161 -242 242 242 210 210 210 144 144 144 66 66 66
40162 - 6 6 6 2 2 6 2 2 6 2 2 6
40163 - 2 2 6 2 2 6 61 42 6 163 110 8
40164 -216 158 10 236 178 12 246 190 14 246 190 14
40165 -246 190 14 246 190 14 246 190 14 246 190 14
40166 -246 190 14 246 190 14 246 190 14 246 190 14
40167 -246 190 14 239 182 13 230 174 11 216 158 10
40168 -190 142 34 124 112 88 70 70 70 38 38 38
40169 - 18 18 18 6 6 6 0 0 0 0 0 0
40170 - 0 0 0 0 0 0 0 0 0 0 0 0
40171 - 0 0 0 0 0 0 0 0 0 0 0 0
40172 - 0 0 0 0 0 0 6 6 6 22 22 22
40173 - 62 62 62 168 124 44 206 145 10 224 166 10
40174 -236 178 12 239 182 13 242 186 14 242 186 14
40175 -246 186 14 246 190 14 246 190 14 246 190 14
40176 -246 190 14 246 190 14 246 190 14 246 190 14
40177 -246 190 14 246 190 14 246 190 14 246 190 14
40178 -246 190 14 236 178 12 216 158 10 175 118 6
40179 - 80 54 7 2 2 6 6 6 6 30 30 30
40180 - 54 54 54 62 62 62 50 50 50 38 38 38
40181 - 14 14 14 2 2 6 2 2 6 2 2 6
40182 - 2 2 6 2 2 6 2 2 6 2 2 6
40183 - 2 2 6 6 6 6 80 54 7 167 114 7
40184 -213 154 11 236 178 12 246 190 14 246 190 14
40185 -246 190 14 246 190 14 246 190 14 246 190 14
40186 -246 190 14 242 186 14 239 182 13 239 182 13
40187 -230 174 11 210 150 10 174 135 50 124 112 88
40188 - 82 82 82 54 54 54 34 34 34 18 18 18
40189 - 6 6 6 0 0 0 0 0 0 0 0 0
40190 - 0 0 0 0 0 0 0 0 0 0 0 0
40191 - 0 0 0 0 0 0 0 0 0 0 0 0
40192 - 0 0 0 0 0 0 6 6 6 18 18 18
40193 - 50 50 50 158 118 36 192 133 9 200 144 11
40194 -216 158 10 219 162 10 224 166 10 226 170 11
40195 -230 174 11 236 178 12 239 182 13 239 182 13
40196 -242 186 14 246 186 14 246 190 14 246 190 14
40197 -246 190 14 246 190 14 246 190 14 246 190 14
40198 -246 186 14 230 174 11 210 150 10 163 110 8
40199 -104 69 6 10 10 10 2 2 6 2 2 6
40200 - 2 2 6 2 2 6 2 2 6 2 2 6
40201 - 2 2 6 2 2 6 2 2 6 2 2 6
40202 - 2 2 6 2 2 6 2 2 6 2 2 6
40203 - 2 2 6 6 6 6 91 60 6 167 114 7
40204 -206 145 10 230 174 11 242 186 14 246 190 14
40205 -246 190 14 246 190 14 246 186 14 242 186 14
40206 -239 182 13 230 174 11 224 166 10 213 154 11
40207 -180 133 36 124 112 88 86 86 86 58 58 58
40208 - 38 38 38 22 22 22 10 10 10 6 6 6
40209 - 0 0 0 0 0 0 0 0 0 0 0 0
40210 - 0 0 0 0 0 0 0 0 0 0 0 0
40211 - 0 0 0 0 0 0 0 0 0 0 0 0
40212 - 0 0 0 0 0 0 0 0 0 14 14 14
40213 - 34 34 34 70 70 70 138 110 50 158 118 36
40214 -167 114 7 180 123 7 192 133 9 197 138 11
40215 -200 144 11 206 145 10 213 154 11 219 162 10
40216 -224 166 10 230 174 11 239 182 13 242 186 14
40217 -246 186 14 246 186 14 246 186 14 246 186 14
40218 -239 182 13 216 158 10 185 133 11 152 99 6
40219 -104 69 6 18 14 6 2 2 6 2 2 6
40220 - 2 2 6 2 2 6 2 2 6 2 2 6
40221 - 2 2 6 2 2 6 2 2 6 2 2 6
40222 - 2 2 6 2 2 6 2 2 6 2 2 6
40223 - 2 2 6 6 6 6 80 54 7 152 99 6
40224 -192 133 9 219 162 10 236 178 12 239 182 13
40225 -246 186 14 242 186 14 239 182 13 236 178 12
40226 -224 166 10 206 145 10 192 133 9 154 121 60
40227 - 94 94 94 62 62 62 42 42 42 22 22 22
40228 - 14 14 14 6 6 6 0 0 0 0 0 0
40229 - 0 0 0 0 0 0 0 0 0 0 0 0
40230 - 0 0 0 0 0 0 0 0 0 0 0 0
40231 - 0 0 0 0 0 0 0 0 0 0 0 0
40232 - 0 0 0 0 0 0 0 0 0 6 6 6
40233 - 18 18 18 34 34 34 58 58 58 78 78 78
40234 -101 98 89 124 112 88 142 110 46 156 107 11
40235 -163 110 8 167 114 7 175 118 6 180 123 7
40236 -185 133 11 197 138 11 210 150 10 219 162 10
40237 -226 170 11 236 178 12 236 178 12 234 174 13
40238 -219 162 10 197 138 11 163 110 8 130 83 6
40239 - 91 60 6 10 10 10 2 2 6 2 2 6
40240 - 18 18 18 38 38 38 38 38 38 38 38 38
40241 - 38 38 38 38 38 38 38 38 38 38 38 38
40242 - 38 38 38 38 38 38 26 26 26 2 2 6
40243 - 2 2 6 6 6 6 70 47 6 137 92 6
40244 -175 118 6 200 144 11 219 162 10 230 174 11
40245 -234 174 13 230 174 11 219 162 10 210 150 10
40246 -192 133 9 163 110 8 124 112 88 82 82 82
40247 - 50 50 50 30 30 30 14 14 14 6 6 6
40248 - 0 0 0 0 0 0 0 0 0 0 0 0
40249 - 0 0 0 0 0 0 0 0 0 0 0 0
40250 - 0 0 0 0 0 0 0 0 0 0 0 0
40251 - 0 0 0 0 0 0 0 0 0 0 0 0
40252 - 0 0 0 0 0 0 0 0 0 0 0 0
40253 - 6 6 6 14 14 14 22 22 22 34 34 34
40254 - 42 42 42 58 58 58 74 74 74 86 86 86
40255 -101 98 89 122 102 70 130 98 46 121 87 25
40256 -137 92 6 152 99 6 163 110 8 180 123 7
40257 -185 133 11 197 138 11 206 145 10 200 144 11
40258 -180 123 7 156 107 11 130 83 6 104 69 6
40259 - 50 34 6 54 54 54 110 110 110 101 98 89
40260 - 86 86 86 82 82 82 78 78 78 78 78 78
40261 - 78 78 78 78 78 78 78 78 78 78 78 78
40262 - 78 78 78 82 82 82 86 86 86 94 94 94
40263 -106 106 106 101 101 101 86 66 34 124 80 6
40264 -156 107 11 180 123 7 192 133 9 200 144 11
40265 -206 145 10 200 144 11 192 133 9 175 118 6
40266 -139 102 15 109 106 95 70 70 70 42 42 42
40267 - 22 22 22 10 10 10 0 0 0 0 0 0
40268 - 0 0 0 0 0 0 0 0 0 0 0 0
40269 - 0 0 0 0 0 0 0 0 0 0 0 0
40270 - 0 0 0 0 0 0 0 0 0 0 0 0
40271 - 0 0 0 0 0 0 0 0 0 0 0 0
40272 - 0 0 0 0 0 0 0 0 0 0 0 0
40273 - 0 0 0 0 0 0 6 6 6 10 10 10
40274 - 14 14 14 22 22 22 30 30 30 38 38 38
40275 - 50 50 50 62 62 62 74 74 74 90 90 90
40276 -101 98 89 112 100 78 121 87 25 124 80 6
40277 -137 92 6 152 99 6 152 99 6 152 99 6
40278 -138 86 6 124 80 6 98 70 6 86 66 30
40279 -101 98 89 82 82 82 58 58 58 46 46 46
40280 - 38 38 38 34 34 34 34 34 34 34 34 34
40281 - 34 34 34 34 34 34 34 34 34 34 34 34
40282 - 34 34 34 34 34 34 38 38 38 42 42 42
40283 - 54 54 54 82 82 82 94 86 76 91 60 6
40284 -134 86 6 156 107 11 167 114 7 175 118 6
40285 -175 118 6 167 114 7 152 99 6 121 87 25
40286 -101 98 89 62 62 62 34 34 34 18 18 18
40287 - 6 6 6 0 0 0 0 0 0 0 0 0
40288 - 0 0 0 0 0 0 0 0 0 0 0 0
40289 - 0 0 0 0 0 0 0 0 0 0 0 0
40290 - 0 0 0 0 0 0 0 0 0 0 0 0
40291 - 0 0 0 0 0 0 0 0 0 0 0 0
40292 - 0 0 0 0 0 0 0 0 0 0 0 0
40293 - 0 0 0 0 0 0 0 0 0 0 0 0
40294 - 0 0 0 6 6 6 6 6 6 10 10 10
40295 - 18 18 18 22 22 22 30 30 30 42 42 42
40296 - 50 50 50 66 66 66 86 86 86 101 98 89
40297 -106 86 58 98 70 6 104 69 6 104 69 6
40298 -104 69 6 91 60 6 82 62 34 90 90 90
40299 - 62 62 62 38 38 38 22 22 22 14 14 14
40300 - 10 10 10 10 10 10 10 10 10 10 10 10
40301 - 10 10 10 10 10 10 6 6 6 10 10 10
40302 - 10 10 10 10 10 10 10 10 10 14 14 14
40303 - 22 22 22 42 42 42 70 70 70 89 81 66
40304 - 80 54 7 104 69 6 124 80 6 137 92 6
40305 -134 86 6 116 81 8 100 82 52 86 86 86
40306 - 58 58 58 30 30 30 14 14 14 6 6 6
40307 - 0 0 0 0 0 0 0 0 0 0 0 0
40308 - 0 0 0 0 0 0 0 0 0 0 0 0
40309 - 0 0 0 0 0 0 0 0 0 0 0 0
40310 - 0 0 0 0 0 0 0 0 0 0 0 0
40311 - 0 0 0 0 0 0 0 0 0 0 0 0
40312 - 0 0 0 0 0 0 0 0 0 0 0 0
40313 - 0 0 0 0 0 0 0 0 0 0 0 0
40314 - 0 0 0 0 0 0 0 0 0 0 0 0
40315 - 0 0 0 6 6 6 10 10 10 14 14 14
40316 - 18 18 18 26 26 26 38 38 38 54 54 54
40317 - 70 70 70 86 86 86 94 86 76 89 81 66
40318 - 89 81 66 86 86 86 74 74 74 50 50 50
40319 - 30 30 30 14 14 14 6 6 6 0 0 0
40320 - 0 0 0 0 0 0 0 0 0 0 0 0
40321 - 0 0 0 0 0 0 0 0 0 0 0 0
40322 - 0 0 0 0 0 0 0 0 0 0 0 0
40323 - 6 6 6 18 18 18 34 34 34 58 58 58
40324 - 82 82 82 89 81 66 89 81 66 89 81 66
40325 - 94 86 66 94 86 76 74 74 74 50 50 50
40326 - 26 26 26 14 14 14 6 6 6 0 0 0
40327 - 0 0 0 0 0 0 0 0 0 0 0 0
40328 - 0 0 0 0 0 0 0 0 0 0 0 0
40329 - 0 0 0 0 0 0 0 0 0 0 0 0
40330 - 0 0 0 0 0 0 0 0 0 0 0 0
40331 - 0 0 0 0 0 0 0 0 0 0 0 0
40332 - 0 0 0 0 0 0 0 0 0 0 0 0
40333 - 0 0 0 0 0 0 0 0 0 0 0 0
40334 - 0 0 0 0 0 0 0 0 0 0 0 0
40335 - 0 0 0 0 0 0 0 0 0 0 0 0
40336 - 6 6 6 6 6 6 14 14 14 18 18 18
40337 - 30 30 30 38 38 38 46 46 46 54 54 54
40338 - 50 50 50 42 42 42 30 30 30 18 18 18
40339 - 10 10 10 0 0 0 0 0 0 0 0 0
40340 - 0 0 0 0 0 0 0 0 0 0 0 0
40341 - 0 0 0 0 0 0 0 0 0 0 0 0
40342 - 0 0 0 0 0 0 0 0 0 0 0 0
40343 - 0 0 0 6 6 6 14 14 14 26 26 26
40344 - 38 38 38 50 50 50 58 58 58 58 58 58
40345 - 54 54 54 42 42 42 30 30 30 18 18 18
40346 - 10 10 10 0 0 0 0 0 0 0 0 0
40347 - 0 0 0 0 0 0 0 0 0 0 0 0
40348 - 0 0 0 0 0 0 0 0 0 0 0 0
40349 - 0 0 0 0 0 0 0 0 0 0 0 0
40350 - 0 0 0 0 0 0 0 0 0 0 0 0
40351 - 0 0 0 0 0 0 0 0 0 0 0 0
40352 - 0 0 0 0 0 0 0 0 0 0 0 0
40353 - 0 0 0 0 0 0 0 0 0 0 0 0
40354 - 0 0 0 0 0 0 0 0 0 0 0 0
40355 - 0 0 0 0 0 0 0 0 0 0 0 0
40356 - 0 0 0 0 0 0 0 0 0 6 6 6
40357 - 6 6 6 10 10 10 14 14 14 18 18 18
40358 - 18 18 18 14 14 14 10 10 10 6 6 6
40359 - 0 0 0 0 0 0 0 0 0 0 0 0
40360 - 0 0 0 0 0 0 0 0 0 0 0 0
40361 - 0 0 0 0 0 0 0 0 0 0 0 0
40362 - 0 0 0 0 0 0 0 0 0 0 0 0
40363 - 0 0 0 0 0 0 0 0 0 6 6 6
40364 - 14 14 14 18 18 18 22 22 22 22 22 22
40365 - 18 18 18 14 14 14 10 10 10 6 6 6
40366 - 0 0 0 0 0 0 0 0 0 0 0 0
40367 - 0 0 0 0 0 0 0 0 0 0 0 0
40368 - 0 0 0 0 0 0 0 0 0 0 0 0
40369 - 0 0 0 0 0 0 0 0 0 0 0 0
40370 - 0 0 0 0 0 0 0 0 0 0 0 0
40371 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40383 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40384 +4 4 4 4 4 4
40385 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40386 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40395 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40396 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40397 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40398 +4 4 4 4 4 4
40399 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40400 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40410 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40411 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40412 +4 4 4 4 4 4
40413 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40414 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40425 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40426 +4 4 4 4 4 4
40427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40439 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40440 +4 4 4 4 4 4
40441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40453 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40454 +4 4 4 4 4 4
40455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40459 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40460 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40464 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40465 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40466 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40467 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40468 +4 4 4 4 4 4
40469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40473 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40474 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40475 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40478 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40479 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40480 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40481 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40482 +4 4 4 4 4 4
40483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40487 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40488 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40489 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40492 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40493 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40494 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40495 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40496 +4 4 4 4 4 4
40497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40500 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40501 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40502 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40503 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40505 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40506 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40507 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40508 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40509 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40510 +4 4 4 4 4 4
40511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40514 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40515 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40516 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40517 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40518 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40519 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40520 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40521 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40522 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40523 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40524 +4 4 4 4 4 4
40525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40528 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40529 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40530 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40531 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40532 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40533 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40534 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40535 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40536 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40537 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40538 +4 4 4 4 4 4
40539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40541 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40542 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40543 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40544 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40545 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40546 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40547 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40548 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40549 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40550 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40551 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40552 +4 4 4 4 4 4
40553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40555 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40556 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40557 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40558 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40559 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40560 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40561 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40562 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40563 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40564 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40565 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40566 +4 4 4 4 4 4
40567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40569 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40570 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40571 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40572 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40573 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40574 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40575 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40576 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40577 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40578 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40579 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40580 +4 4 4 4 4 4
40581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40583 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40584 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40585 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40586 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40587 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40588 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40589 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40590 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40591 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40592 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40593 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40594 +4 4 4 4 4 4
40595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40596 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40597 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40598 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40599 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40600 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40601 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40602 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40603 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40604 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40605 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40606 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40607 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40608 +4 4 4 4 4 4
40609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40610 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40611 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40612 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40613 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40614 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40615 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40616 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40617 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40618 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40619 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40620 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40621 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40622 +0 0 0 4 4 4
40623 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40624 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40625 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40626 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40627 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40628 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40629 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40630 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40631 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40632 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40633 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40634 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40635 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40636 +2 0 0 0 0 0
40637 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40638 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40639 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40640 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40641 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40642 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40643 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40644 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40645 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40646 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40647 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40648 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40649 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40650 +37 38 37 0 0 0
40651 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40652 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40653 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40654 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40655 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40656 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40657 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40658 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40659 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40660 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40661 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40662 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40663 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40664 +85 115 134 4 0 0
40665 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40666 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40667 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40668 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40669 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40670 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40671 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40672 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40673 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40674 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40675 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40676 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40677 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40678 +60 73 81 4 0 0
40679 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40680 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40681 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40682 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40683 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40684 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40685 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40686 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40687 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40688 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40689 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40690 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40691 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40692 +16 19 21 4 0 0
40693 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40694 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40695 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40696 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40697 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40698 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40699 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40700 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40701 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40702 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40703 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40704 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40705 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40706 +4 0 0 4 3 3
40707 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40708 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40709 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40711 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40712 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40713 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40714 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40715 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40716 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40717 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40718 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40719 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40720 +3 2 2 4 4 4
40721 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40722 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40723 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40724 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40725 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40726 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40727 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40728 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40729 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40730 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40731 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40732 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40733 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40734 +4 4 4 4 4 4
40735 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40736 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40737 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40738 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40739 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40740 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40741 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40742 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40743 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40744 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40745 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40746 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40747 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40748 +4 4 4 4 4 4
40749 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40750 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40751 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40752 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40753 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40754 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40755 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40756 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40757 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40758 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40759 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40760 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40761 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40762 +5 5 5 5 5 5
40763 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40764 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40765 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40766 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40767 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40768 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40769 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40770 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40771 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40772 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40773 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40774 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40775 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40776 +5 5 5 4 4 4
40777 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40778 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40779 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40780 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40781 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40782 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40783 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40784 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40785 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40786 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40787 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40788 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40790 +4 4 4 4 4 4
40791 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40792 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40793 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40794 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40795 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40796 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40797 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40798 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40799 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40800 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40801 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40802 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40803 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40804 +4 4 4 4 4 4
40805 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40806 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40807 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40808 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40809 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40810 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40811 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40812 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40813 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40814 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40815 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40817 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40818 +4 4 4 4 4 4
40819 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40820 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40821 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40822 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40823 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40824 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40825 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40826 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40827 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40828 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40829 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40832 +4 4 4 4 4 4
40833 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40834 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40835 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40836 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40837 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40838 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40839 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40840 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40841 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40842 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40843 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40846 +4 4 4 4 4 4
40847 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40848 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40849 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40850 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40851 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40852 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40853 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40854 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40855 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40856 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40857 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40860 +4 4 4 4 4 4
40861 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40862 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40863 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40864 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40865 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40866 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40867 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40868 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40869 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40870 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40871 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40874 +4 4 4 4 4 4
40875 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40876 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40877 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40878 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40879 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40880 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40881 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40882 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40883 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40884 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40885 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40888 +4 4 4 4 4 4
40889 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40890 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40891 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40892 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40893 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40894 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40895 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40896 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40897 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40898 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40899 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40902 +4 4 4 4 4 4
40903 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40904 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40905 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40906 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40907 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40908 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40909 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40910 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40911 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40912 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40913 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40916 +4 4 4 4 4 4
40917 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40918 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40919 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40920 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40921 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40922 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40923 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40924 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40925 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40926 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40927 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40930 +4 4 4 4 4 4
40931 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40932 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40933 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40934 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40935 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40936 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40937 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40938 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40939 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40940 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40941 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40944 +4 4 4 4 4 4
40945 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40946 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40947 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40948 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40949 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40950 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40951 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40952 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40953 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40954 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40955 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40958 +4 4 4 4 4 4
40959 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40960 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40961 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40962 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40963 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40964 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40965 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40966 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40967 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40968 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40969 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40972 +4 4 4 4 4 4
40973 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40974 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40975 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40976 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40977 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40978 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40979 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40980 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40981 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40982 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40983 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40986 +4 4 4 4 4 4
40987 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40988 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40989 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40990 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40991 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40992 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40993 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40994 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40995 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40996 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40997 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41000 +4 4 4 4 4 4
41001 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41002 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41003 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41004 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41005 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41006 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41007 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41008 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41009 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41010 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41011 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41014 +4 4 4 4 4 4
41015 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41016 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41017 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41018 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41019 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41020 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41021 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41022 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41023 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41024 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41025 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41028 +4 4 4 4 4 4
41029 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41030 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41031 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41032 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41033 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41034 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41035 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41036 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41037 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41038 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41039 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41042 +4 4 4 4 4 4
41043 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41044 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41045 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41046 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41047 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41048 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41049 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41050 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41051 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41052 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41053 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41056 +4 4 4 4 4 4
41057 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41058 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41059 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41060 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41061 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41062 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41063 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41064 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41065 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41066 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41067 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41070 +4 4 4 4 4 4
41071 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41072 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41073 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41074 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41075 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41076 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41077 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41078 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41079 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41080 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41081 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41084 +4 4 4 4 4 4
41085 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41086 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41087 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41088 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41089 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41090 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41091 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41092 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41093 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41094 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41095 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41098 +4 4 4 4 4 4
41099 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41100 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41101 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41102 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41103 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41104 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41105 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41106 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41107 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41108 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41109 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41112 +4 4 4 4 4 4
41113 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41114 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41115 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41116 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41117 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41118 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41119 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41120 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41121 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41122 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41123 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41126 +4 4 4 4 4 4
41127 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41128 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41129 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41130 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41131 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41132 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41133 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41134 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41135 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41136 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41137 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41140 +4 4 4 4 4 4
41141 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41142 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41143 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41144 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41145 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41146 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41147 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41148 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41149 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41150 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41151 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41154 +4 4 4 4 4 4
41155 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41156 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41157 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41158 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41159 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41160 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41161 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41162 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41163 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41164 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41165 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41168 +4 4 4 4 4 4
41169 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41170 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41171 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41172 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41173 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41174 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41175 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41176 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41177 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41178 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41179 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41182 +4 4 4 4 4 4
41183 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41184 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41185 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41186 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41187 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41188 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41189 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41190 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41191 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41192 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41196 +4 4 4 4 4 4
41197 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41198 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41199 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41200 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41201 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41202 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41203 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41204 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41205 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41206 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41210 +4 4 4 4 4 4
41211 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41212 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41213 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41214 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41215 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41216 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41217 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41218 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41219 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41220 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41224 +4 4 4 4 4 4
41225 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41226 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41227 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41228 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41229 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41230 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41231 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41232 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41233 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41234 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41238 +4 4 4 4 4 4
41239 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41240 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41241 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41242 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41243 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41244 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41245 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41246 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41247 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41252 +4 4 4 4 4 4
41253 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41254 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41255 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41256 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41257 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41258 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41259 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41260 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41261 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41266 +4 4 4 4 4 4
41267 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41268 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41269 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41270 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41271 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41272 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41273 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41274 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41275 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41280 +4 4 4 4 4 4
41281 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41282 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41283 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41284 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41285 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41286 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41287 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41288 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41294 +4 4 4 4 4 4
41295 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41296 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41297 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41298 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41299 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41300 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41301 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41302 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41308 +4 4 4 4 4 4
41309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41310 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41311 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41312 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41313 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41314 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41315 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41316 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41322 +4 4 4 4 4 4
41323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41325 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41326 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41327 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41328 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41329 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41330 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41336 +4 4 4 4 4 4
41337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41338 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41339 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41340 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41341 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41342 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41343 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41344 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41350 +4 4 4 4 4 4
41351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41353 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41354 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41355 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41356 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41357 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41358 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41364 +4 4 4 4 4 4
41365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41368 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41369 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41370 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41371 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41378 +4 4 4 4 4 4
41379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41382 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41383 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41384 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41385 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41386 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41392 +4 4 4 4 4 4
41393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41395 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41396 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41397 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41398 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41399 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41400 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41406 +4 4 4 4 4 4
41407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41410 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41411 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41412 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41413 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41414 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41420 +4 4 4 4 4 4
41421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41425 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41426 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41427 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41434 +4 4 4 4 4 4
41435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41439 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41440 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41441 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41448 +4 4 4 4 4 4
41449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41453 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41454 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41455 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41462 +4 4 4 4 4 4
41463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41467 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41468 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41476 +4 4 4 4 4 4
41477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41481 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41482 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41490 +4 4 4 4 4 4
41491 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41492 index a40c05e..785c583 100644
41493 --- a/drivers/video/udlfb.c
41494 +++ b/drivers/video/udlfb.c
41495 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41496 dlfb_urb_completion(urb);
41497
41498 error:
41499 - atomic_add(bytes_sent, &dev->bytes_sent);
41500 - atomic_add(bytes_identical, &dev->bytes_identical);
41501 - atomic_add(width*height*2, &dev->bytes_rendered);
41502 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41503 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41504 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41505 end_cycles = get_cycles();
41506 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41507 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41508 >> 10)), /* Kcycles */
41509 &dev->cpu_kcycles_used);
41510
41511 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41512 dlfb_urb_completion(urb);
41513
41514 error:
41515 - atomic_add(bytes_sent, &dev->bytes_sent);
41516 - atomic_add(bytes_identical, &dev->bytes_identical);
41517 - atomic_add(bytes_rendered, &dev->bytes_rendered);
41518 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41519 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41520 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41521 end_cycles = get_cycles();
41522 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41523 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41524 >> 10)), /* Kcycles */
41525 &dev->cpu_kcycles_used);
41526 }
41527 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41528 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41529 struct dlfb_data *dev = fb_info->par;
41530 return snprintf(buf, PAGE_SIZE, "%u\n",
41531 - atomic_read(&dev->bytes_rendered));
41532 + atomic_read_unchecked(&dev->bytes_rendered));
41533 }
41534
41535 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41536 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41537 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41538 struct dlfb_data *dev = fb_info->par;
41539 return snprintf(buf, PAGE_SIZE, "%u\n",
41540 - atomic_read(&dev->bytes_identical));
41541 + atomic_read_unchecked(&dev->bytes_identical));
41542 }
41543
41544 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41545 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41546 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41547 struct dlfb_data *dev = fb_info->par;
41548 return snprintf(buf, PAGE_SIZE, "%u\n",
41549 - atomic_read(&dev->bytes_sent));
41550 + atomic_read_unchecked(&dev->bytes_sent));
41551 }
41552
41553 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41554 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41555 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41556 struct dlfb_data *dev = fb_info->par;
41557 return snprintf(buf, PAGE_SIZE, "%u\n",
41558 - atomic_read(&dev->cpu_kcycles_used));
41559 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41560 }
41561
41562 static ssize_t edid_show(
41563 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41564 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41565 struct dlfb_data *dev = fb_info->par;
41566
41567 - atomic_set(&dev->bytes_rendered, 0);
41568 - atomic_set(&dev->bytes_identical, 0);
41569 - atomic_set(&dev->bytes_sent, 0);
41570 - atomic_set(&dev->cpu_kcycles_used, 0);
41571 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41572 + atomic_set_unchecked(&dev->bytes_identical, 0);
41573 + atomic_set_unchecked(&dev->bytes_sent, 0);
41574 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41575
41576 return count;
41577 }
41578 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41579 index 8408543..d6f20f1 100644
41580 --- a/drivers/video/uvesafb.c
41581 +++ b/drivers/video/uvesafb.c
41582 @@ -19,6 +19,7 @@
41583 #include <linux/io.h>
41584 #include <linux/mutex.h>
41585 #include <linux/slab.h>
41586 +#include <linux/moduleloader.h>
41587 #include <video/edid.h>
41588 #include <video/uvesafb.h>
41589 #ifdef CONFIG_X86
41590 @@ -73,7 +74,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
41591 struct uvesafb_task *utask;
41592 struct uvesafb_ktask *task;
41593
41594 - if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
41595 + if (!capable(CAP_SYS_ADMIN))
41596 return;
41597
41598 if (msg->seq >= UVESAFB_TASKS_MAX)
41599 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
41600 NULL,
41601 };
41602
41603 - return call_usermodehelper(v86d_path, argv, envp, 1);
41604 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
41605 }
41606
41607 /*
41608 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41609 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41610 par->pmi_setpal = par->ypan = 0;
41611 } else {
41612 +
41613 +#ifdef CONFIG_PAX_KERNEXEC
41614 +#ifdef CONFIG_MODULES
41615 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41616 +#endif
41617 + if (!par->pmi_code) {
41618 + par->pmi_setpal = par->ypan = 0;
41619 + return 0;
41620 + }
41621 +#endif
41622 +
41623 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41624 + task->t.regs.edi);
41625 +
41626 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41627 + pax_open_kernel();
41628 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41629 + pax_close_kernel();
41630 +
41631 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41632 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41633 +#else
41634 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41635 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41636 +#endif
41637 +
41638 printk(KERN_INFO "uvesafb: protected mode interface info at "
41639 "%04x:%04x\n",
41640 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41641 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41642 par->ypan = ypan;
41643
41644 if (par->pmi_setpal || par->ypan) {
41645 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41646 if (__supported_pte_mask & _PAGE_NX) {
41647 par->pmi_setpal = par->ypan = 0;
41648 printk(KERN_WARNING "uvesafb: NX protection is actively."
41649 "We have better not to use the PMI.\n");
41650 - } else {
41651 + } else
41652 +#endif
41653 uvesafb_vbe_getpmi(task, par);
41654 - }
41655 }
41656 #else
41657 /* The protected mode interface is not available on non-x86. */
41658 @@ -1828,6 +1852,11 @@ out:
41659 if (par->vbe_modes)
41660 kfree(par->vbe_modes);
41661
41662 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41663 + if (par->pmi_code)
41664 + module_free_exec(NULL, par->pmi_code);
41665 +#endif
41666 +
41667 framebuffer_release(info);
41668 return err;
41669 }
41670 @@ -1854,6 +1883,12 @@ static int uvesafb_remove(struct platform_device *dev)
41671 kfree(par->vbe_state_orig);
41672 if (par->vbe_state_saved)
41673 kfree(par->vbe_state_saved);
41674 +
41675 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41676 + if (par->pmi_code)
41677 + module_free_exec(NULL, par->pmi_code);
41678 +#endif
41679 +
41680 }
41681
41682 framebuffer_release(info);
41683 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41684 index 501b340..86bd4cf 100644
41685 --- a/drivers/video/vesafb.c
41686 +++ b/drivers/video/vesafb.c
41687 @@ -9,6 +9,7 @@
41688 */
41689
41690 #include <linux/module.h>
41691 +#include <linux/moduleloader.h>
41692 #include <linux/kernel.h>
41693 #include <linux/errno.h>
41694 #include <linux/string.h>
41695 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41696 static int vram_total __initdata; /* Set total amount of memory */
41697 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41698 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41699 -static void (*pmi_start)(void) __read_mostly;
41700 -static void (*pmi_pal) (void) __read_mostly;
41701 +static void (*pmi_start)(void) __read_only;
41702 +static void (*pmi_pal) (void) __read_only;
41703 static int depth __read_mostly;
41704 static int vga_compat __read_mostly;
41705 /* --------------------------------------------------------------------- */
41706 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41707 unsigned int size_vmode;
41708 unsigned int size_remap;
41709 unsigned int size_total;
41710 + void *pmi_code = NULL;
41711
41712 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41713 return -ENODEV;
41714 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41715 size_remap = size_total;
41716 vesafb_fix.smem_len = size_remap;
41717
41718 -#ifndef __i386__
41719 - screen_info.vesapm_seg = 0;
41720 -#endif
41721 -
41722 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41723 printk(KERN_WARNING
41724 "vesafb: cannot reserve video memory at 0x%lx\n",
41725 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41726 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41727 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41728
41729 +#ifdef __i386__
41730 +
41731 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41732 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41733 + if (!pmi_code)
41734 +#elif !defined(CONFIG_PAX_KERNEXEC)
41735 + if (0)
41736 +#endif
41737 +
41738 +#endif
41739 + screen_info.vesapm_seg = 0;
41740 +
41741 if (screen_info.vesapm_seg) {
41742 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41743 - screen_info.vesapm_seg,screen_info.vesapm_off);
41744 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41745 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41746 }
41747
41748 if (screen_info.vesapm_seg < 0xc000)
41749 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41750
41751 if (ypan || pmi_setpal) {
41752 unsigned short *pmi_base;
41753 +
41754 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41755 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41756 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41757 +
41758 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41759 + pax_open_kernel();
41760 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41761 +#else
41762 + pmi_code = pmi_base;
41763 +#endif
41764 +
41765 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41766 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41767 +
41768 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41769 + pmi_start = ktva_ktla(pmi_start);
41770 + pmi_pal = ktva_ktla(pmi_pal);
41771 + pax_close_kernel();
41772 +#endif
41773 +
41774 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41775 if (pmi_base[3]) {
41776 printk(KERN_INFO "vesafb: pmi: ports = ");
41777 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41778 info->node, info->fix.id);
41779 return 0;
41780 err:
41781 +
41782 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41783 + module_free_exec(NULL, pmi_code);
41784 +#endif
41785 +
41786 if (info->screen_base)
41787 iounmap(info->screen_base);
41788 framebuffer_release(info);
41789 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41790 index 88714ae..16c2e11 100644
41791 --- a/drivers/video/via/via_clock.h
41792 +++ b/drivers/video/via/via_clock.h
41793 @@ -56,7 +56,7 @@ struct via_clock {
41794
41795 void (*set_engine_pll_state)(u8 state);
41796 void (*set_engine_pll)(struct via_pll_config config);
41797 -};
41798 +} __no_const;
41799
41800
41801 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41802 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41803 index e56c934..fc22f4b 100644
41804 --- a/drivers/xen/xen-pciback/conf_space.h
41805 +++ b/drivers/xen/xen-pciback/conf_space.h
41806 @@ -44,15 +44,15 @@ struct config_field {
41807 struct {
41808 conf_dword_write write;
41809 conf_dword_read read;
41810 - } dw;
41811 + } __no_const dw;
41812 struct {
41813 conf_word_write write;
41814 conf_word_read read;
41815 - } w;
41816 + } __no_const w;
41817 struct {
41818 conf_byte_write write;
41819 conf_byte_read read;
41820 - } b;
41821 + } __no_const b;
41822 } u;
41823 struct list_head list;
41824 };
41825 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41826 index 014c8dd..6f3dfe6 100644
41827 --- a/fs/9p/vfs_inode.c
41828 +++ b/fs/9p/vfs_inode.c
41829 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41830 void
41831 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41832 {
41833 - char *s = nd_get_link(nd);
41834 + const char *s = nd_get_link(nd);
41835
41836 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41837 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41838 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41839 index e95d1b6..3454244 100644
41840 --- a/fs/Kconfig.binfmt
41841 +++ b/fs/Kconfig.binfmt
41842 @@ -89,7 +89,7 @@ config HAVE_AOUT
41843
41844 config BINFMT_AOUT
41845 tristate "Kernel support for a.out and ECOFF binaries"
41846 - depends on HAVE_AOUT
41847 + depends on HAVE_AOUT && BROKEN
41848 ---help---
41849 A.out (Assembler.OUTput) is a set of formats for libraries and
41850 executables used in the earliest versions of UNIX. Linux used
41851 diff --git a/fs/aio.c b/fs/aio.c
41852 index b9d64d8..86cb1d5 100644
41853 --- a/fs/aio.c
41854 +++ b/fs/aio.c
41855 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41856 size += sizeof(struct io_event) * nr_events;
41857 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41858
41859 - if (nr_pages < 0)
41860 + if (nr_pages <= 0)
41861 return -EINVAL;
41862
41863 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41864 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41865 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41866 {
41867 ssize_t ret;
41868 + struct iovec iovstack;
41869
41870 #ifdef CONFIG_COMPAT
41871 if (compat)
41872 ret = compat_rw_copy_check_uvector(type,
41873 (struct compat_iovec __user *)kiocb->ki_buf,
41874 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41875 + kiocb->ki_nbytes, 1, &iovstack,
41876 &kiocb->ki_iovec, 1);
41877 else
41878 #endif
41879 ret = rw_copy_check_uvector(type,
41880 (struct iovec __user *)kiocb->ki_buf,
41881 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41882 + kiocb->ki_nbytes, 1, &iovstack,
41883 &kiocb->ki_iovec, 1);
41884 if (ret < 0)
41885 goto out;
41886
41887 + if (kiocb->ki_iovec == &iovstack) {
41888 + kiocb->ki_inline_vec = iovstack;
41889 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41890 + }
41891 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41892 kiocb->ki_cur_seg = 0;
41893 /* ki_nbytes/left now reflect bytes instead of segs */
41894 diff --git a/fs/attr.c b/fs/attr.c
41895 index 95053ad..2cc93ca 100644
41896 --- a/fs/attr.c
41897 +++ b/fs/attr.c
41898 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41899 unsigned long limit;
41900
41901 limit = rlimit(RLIMIT_FSIZE);
41902 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41903 if (limit != RLIM_INFINITY && offset > limit)
41904 goto out_sig;
41905 if (offset > inode->i_sb->s_maxbytes)
41906 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41907 index f624cd0..3d9a559 100644
41908 --- a/fs/autofs4/waitq.c
41909 +++ b/fs/autofs4/waitq.c
41910 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41911 {
41912 unsigned long sigpipe, flags;
41913 mm_segment_t fs;
41914 - const char *data = (const char *)addr;
41915 + const char __user *data = (const char __force_user *)addr;
41916 ssize_t wr = 0;
41917
41918 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41919 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41920 index 6e6d536..457113a 100644
41921 --- a/fs/befs/linuxvfs.c
41922 +++ b/fs/befs/linuxvfs.c
41923 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41924 {
41925 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41926 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41927 - char *link = nd_get_link(nd);
41928 + const char *link = nd_get_link(nd);
41929 if (!IS_ERR(link))
41930 kfree(link);
41931 }
41932 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41933 index 1ff9405..f1e376a 100644
41934 --- a/fs/binfmt_aout.c
41935 +++ b/fs/binfmt_aout.c
41936 @@ -16,6 +16,7 @@
41937 #include <linux/string.h>
41938 #include <linux/fs.h>
41939 #include <linux/file.h>
41940 +#include <linux/security.h>
41941 #include <linux/stat.h>
41942 #include <linux/fcntl.h>
41943 #include <linux/ptrace.h>
41944 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41945 #endif
41946 # define START_STACK(u) ((void __user *)u.start_stack)
41947
41948 + memset(&dump, 0, sizeof(dump));
41949 +
41950 fs = get_fs();
41951 set_fs(KERNEL_DS);
41952 has_dumped = 1;
41953 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41954
41955 /* If the size of the dump file exceeds the rlimit, then see what would happen
41956 if we wrote the stack, but not the data area. */
41957 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41958 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41959 dump.u_dsize = 0;
41960
41961 /* Make sure we have enough room to write the stack and data areas. */
41962 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41963 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41964 dump.u_ssize = 0;
41965
41966 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41967 rlim = rlimit(RLIMIT_DATA);
41968 if (rlim >= RLIM_INFINITY)
41969 rlim = ~0;
41970 +
41971 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41972 if (ex.a_data + ex.a_bss > rlim)
41973 return -ENOMEM;
41974
41975 @@ -269,6 +276,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41976 install_exec_creds(bprm);
41977 current->flags &= ~PF_FORKNOEXEC;
41978
41979 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41980 + current->mm->pax_flags = 0UL;
41981 +#endif
41982 +
41983 +#ifdef CONFIG_PAX_PAGEEXEC
41984 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41985 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41986 +
41987 +#ifdef CONFIG_PAX_EMUTRAMP
41988 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41989 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41990 +#endif
41991 +
41992 +#ifdef CONFIG_PAX_MPROTECT
41993 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41994 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41995 +#endif
41996 +
41997 + }
41998 +#endif
41999 +
42000 if (N_MAGIC(ex) == OMAGIC) {
42001 unsigned long text_addr, map_size;
42002 loff_t pos;
42003 @@ -341,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42004
42005 down_write(&current->mm->mmap_sem);
42006 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42007 - PROT_READ | PROT_WRITE | PROT_EXEC,
42008 + PROT_READ | PROT_WRITE,
42009 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42010 fd_offset + ex.a_text);
42011 up_write(&current->mm->mmap_sem);
42012 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
42013 index 07d096c..25762af 100644
42014 --- a/fs/binfmt_elf.c
42015 +++ b/fs/binfmt_elf.c
42016 @@ -32,6 +32,7 @@
42017 #include <linux/elf.h>
42018 #include <linux/utsname.h>
42019 #include <linux/coredump.h>
42020 +#include <linux/xattr.h>
42021 #include <asm/uaccess.h>
42022 #include <asm/param.h>
42023 #include <asm/page.h>
42024 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
42025 #define elf_core_dump NULL
42026 #endif
42027
42028 +#ifdef CONFIG_PAX_MPROTECT
42029 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42030 +#endif
42031 +
42032 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42033 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42034 #else
42035 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
42036 .load_binary = load_elf_binary,
42037 .load_shlib = load_elf_library,
42038 .core_dump = elf_core_dump,
42039 +
42040 +#ifdef CONFIG_PAX_MPROTECT
42041 + .handle_mprotect= elf_handle_mprotect,
42042 +#endif
42043 +
42044 .min_coredump = ELF_EXEC_PAGESIZE,
42045 };
42046
42047 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
42048
42049 static int set_brk(unsigned long start, unsigned long end)
42050 {
42051 + unsigned long e = end;
42052 +
42053 start = ELF_PAGEALIGN(start);
42054 end = ELF_PAGEALIGN(end);
42055 if (end > start) {
42056 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
42057 if (BAD_ADDR(addr))
42058 return addr;
42059 }
42060 - current->mm->start_brk = current->mm->brk = end;
42061 + current->mm->start_brk = current->mm->brk = e;
42062 return 0;
42063 }
42064
42065 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42066 elf_addr_t __user *u_rand_bytes;
42067 const char *k_platform = ELF_PLATFORM;
42068 const char *k_base_platform = ELF_BASE_PLATFORM;
42069 - unsigned char k_rand_bytes[16];
42070 + u32 k_rand_bytes[4];
42071 int items;
42072 elf_addr_t *elf_info;
42073 int ei_index = 0;
42074 const struct cred *cred = current_cred();
42075 struct vm_area_struct *vma;
42076 + unsigned long saved_auxv[AT_VECTOR_SIZE];
42077
42078 /*
42079 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42080 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42081 * Generate 16 random bytes for userspace PRNG seeding.
42082 */
42083 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42084 - u_rand_bytes = (elf_addr_t __user *)
42085 - STACK_ALLOC(p, sizeof(k_rand_bytes));
42086 + srandom32(k_rand_bytes[0] ^ random32());
42087 + srandom32(k_rand_bytes[1] ^ random32());
42088 + srandom32(k_rand_bytes[2] ^ random32());
42089 + srandom32(k_rand_bytes[3] ^ random32());
42090 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
42091 + u_rand_bytes = (elf_addr_t __user *) p;
42092 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42093 return -EFAULT;
42094
42095 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42096 return -EFAULT;
42097 current->mm->env_end = p;
42098
42099 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42100 +
42101 /* Put the elf_info on the stack in the right place. */
42102 sp = (elf_addr_t __user *)envp + 1;
42103 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42104 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42105 return -EFAULT;
42106 return 0;
42107 }
42108 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42109 {
42110 struct elf_phdr *elf_phdata;
42111 struct elf_phdr *eppnt;
42112 - unsigned long load_addr = 0;
42113 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42114 int load_addr_set = 0;
42115 unsigned long last_bss = 0, elf_bss = 0;
42116 - unsigned long error = ~0UL;
42117 + unsigned long error = -EINVAL;
42118 unsigned long total_size;
42119 int retval, i, size;
42120
42121 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42122 goto out_close;
42123 }
42124
42125 +#ifdef CONFIG_PAX_SEGMEXEC
42126 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42127 + pax_task_size = SEGMEXEC_TASK_SIZE;
42128 +#endif
42129 +
42130 eppnt = elf_phdata;
42131 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42132 if (eppnt->p_type == PT_LOAD) {
42133 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42134 k = load_addr + eppnt->p_vaddr;
42135 if (BAD_ADDR(k) ||
42136 eppnt->p_filesz > eppnt->p_memsz ||
42137 - eppnt->p_memsz > TASK_SIZE ||
42138 - TASK_SIZE - eppnt->p_memsz < k) {
42139 + eppnt->p_memsz > pax_task_size ||
42140 + pax_task_size - eppnt->p_memsz < k) {
42141 error = -ENOMEM;
42142 goto out_close;
42143 }
42144 @@ -528,6 +552,351 @@ out:
42145 return error;
42146 }
42147
42148 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42149 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42150 +{
42151 + unsigned long pax_flags = 0UL;
42152 +
42153 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42154 +
42155 +#ifdef CONFIG_PAX_PAGEEXEC
42156 + if (elf_phdata->p_flags & PF_PAGEEXEC)
42157 + pax_flags |= MF_PAX_PAGEEXEC;
42158 +#endif
42159 +
42160 +#ifdef CONFIG_PAX_SEGMEXEC
42161 + if (elf_phdata->p_flags & PF_SEGMEXEC)
42162 + pax_flags |= MF_PAX_SEGMEXEC;
42163 +#endif
42164 +
42165 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42166 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42167 + if ((__supported_pte_mask & _PAGE_NX))
42168 + pax_flags &= ~MF_PAX_SEGMEXEC;
42169 + else
42170 + pax_flags &= ~MF_PAX_PAGEEXEC;
42171 + }
42172 +#endif
42173 +
42174 +#ifdef CONFIG_PAX_EMUTRAMP
42175 + if (elf_phdata->p_flags & PF_EMUTRAMP)
42176 + pax_flags |= MF_PAX_EMUTRAMP;
42177 +#endif
42178 +
42179 +#ifdef CONFIG_PAX_MPROTECT
42180 + if (elf_phdata->p_flags & PF_MPROTECT)
42181 + pax_flags |= MF_PAX_MPROTECT;
42182 +#endif
42183 +
42184 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42185 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42186 + pax_flags |= MF_PAX_RANDMMAP;
42187 +#endif
42188 +
42189 +#endif
42190 +
42191 + return pax_flags;
42192 +}
42193 +
42194 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42195 +{
42196 + unsigned long pax_flags = 0UL;
42197 +
42198 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42199 +
42200 +#ifdef CONFIG_PAX_PAGEEXEC
42201 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42202 + pax_flags |= MF_PAX_PAGEEXEC;
42203 +#endif
42204 +
42205 +#ifdef CONFIG_PAX_SEGMEXEC
42206 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42207 + pax_flags |= MF_PAX_SEGMEXEC;
42208 +#endif
42209 +
42210 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42211 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42212 + if ((__supported_pte_mask & _PAGE_NX))
42213 + pax_flags &= ~MF_PAX_SEGMEXEC;
42214 + else
42215 + pax_flags &= ~MF_PAX_PAGEEXEC;
42216 + }
42217 +#endif
42218 +
42219 +#ifdef CONFIG_PAX_EMUTRAMP
42220 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42221 + pax_flags |= MF_PAX_EMUTRAMP;
42222 +#endif
42223 +
42224 +#ifdef CONFIG_PAX_MPROTECT
42225 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42226 + pax_flags |= MF_PAX_MPROTECT;
42227 +#endif
42228 +
42229 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42230 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42231 + pax_flags |= MF_PAX_RANDMMAP;
42232 +#endif
42233 +
42234 +#endif
42235 +
42236 + return pax_flags;
42237 +}
42238 +
42239 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42240 +{
42241 + unsigned long pax_flags = 0UL;
42242 +
42243 +#ifdef CONFIG_PAX_EI_PAX
42244 +
42245 +#ifdef CONFIG_PAX_PAGEEXEC
42246 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42247 + pax_flags |= MF_PAX_PAGEEXEC;
42248 +#endif
42249 +
42250 +#ifdef CONFIG_PAX_SEGMEXEC
42251 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42252 + pax_flags |= MF_PAX_SEGMEXEC;
42253 +#endif
42254 +
42255 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42256 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42257 + if ((__supported_pte_mask & _PAGE_NX))
42258 + pax_flags &= ~MF_PAX_SEGMEXEC;
42259 + else
42260 + pax_flags &= ~MF_PAX_PAGEEXEC;
42261 + }
42262 +#endif
42263 +
42264 +#ifdef CONFIG_PAX_EMUTRAMP
42265 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42266 + pax_flags |= MF_PAX_EMUTRAMP;
42267 +#endif
42268 +
42269 +#ifdef CONFIG_PAX_MPROTECT
42270 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42271 + pax_flags |= MF_PAX_MPROTECT;
42272 +#endif
42273 +
42274 +#ifdef CONFIG_PAX_ASLR
42275 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42276 + pax_flags |= MF_PAX_RANDMMAP;
42277 +#endif
42278 +
42279 +#else
42280 +
42281 +#ifdef CONFIG_PAX_PAGEEXEC
42282 + pax_flags |= MF_PAX_PAGEEXEC;
42283 +#endif
42284 +
42285 +#ifdef CONFIG_PAX_MPROTECT
42286 + pax_flags |= MF_PAX_MPROTECT;
42287 +#endif
42288 +
42289 +#ifdef CONFIG_PAX_RANDMMAP
42290 + pax_flags |= MF_PAX_RANDMMAP;
42291 +#endif
42292 +
42293 +#ifdef CONFIG_PAX_SEGMEXEC
42294 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
42295 + pax_flags &= ~MF_PAX_PAGEEXEC;
42296 + pax_flags |= MF_PAX_SEGMEXEC;
42297 + }
42298 +#endif
42299 +
42300 +#endif
42301 +
42302 + return pax_flags;
42303 +}
42304 +
42305 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42306 +{
42307 +
42308 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42309 + unsigned long i;
42310 +
42311 + for (i = 0UL; i < elf_ex->e_phnum; i++)
42312 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42313 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42314 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42315 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42316 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42317 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42318 + return ~0UL;
42319 +
42320 +#ifdef CONFIG_PAX_SOFTMODE
42321 + if (pax_softmode)
42322 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42323 + else
42324 +#endif
42325 +
42326 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42327 + break;
42328 + }
42329 +#endif
42330 +
42331 + return ~0UL;
42332 +}
42333 +
42334 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42335 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42336 +{
42337 + unsigned long pax_flags = 0UL;
42338 +
42339 +#ifdef CONFIG_PAX_PAGEEXEC
42340 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42341 + pax_flags |= MF_PAX_PAGEEXEC;
42342 +#endif
42343 +
42344 +#ifdef CONFIG_PAX_SEGMEXEC
42345 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42346 + pax_flags |= MF_PAX_SEGMEXEC;
42347 +#endif
42348 +
42349 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42350 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42351 + if ((__supported_pte_mask & _PAGE_NX))
42352 + pax_flags &= ~MF_PAX_SEGMEXEC;
42353 + else
42354 + pax_flags &= ~MF_PAX_PAGEEXEC;
42355 + }
42356 +#endif
42357 +
42358 +#ifdef CONFIG_PAX_EMUTRAMP
42359 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42360 + pax_flags |= MF_PAX_EMUTRAMP;
42361 +#endif
42362 +
42363 +#ifdef CONFIG_PAX_MPROTECT
42364 + if (pax_flags_softmode & MF_PAX_MPROTECT)
42365 + pax_flags |= MF_PAX_MPROTECT;
42366 +#endif
42367 +
42368 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42369 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42370 + pax_flags |= MF_PAX_RANDMMAP;
42371 +#endif
42372 +
42373 + return pax_flags;
42374 +}
42375 +
42376 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42377 +{
42378 + unsigned long pax_flags = 0UL;
42379 +
42380 +#ifdef CONFIG_PAX_PAGEEXEC
42381 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42382 + pax_flags |= MF_PAX_PAGEEXEC;
42383 +#endif
42384 +
42385 +#ifdef CONFIG_PAX_SEGMEXEC
42386 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42387 + pax_flags |= MF_PAX_SEGMEXEC;
42388 +#endif
42389 +
42390 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42391 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42392 + if ((__supported_pte_mask & _PAGE_NX))
42393 + pax_flags &= ~MF_PAX_SEGMEXEC;
42394 + else
42395 + pax_flags &= ~MF_PAX_PAGEEXEC;
42396 + }
42397 +#endif
42398 +
42399 +#ifdef CONFIG_PAX_EMUTRAMP
42400 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42401 + pax_flags |= MF_PAX_EMUTRAMP;
42402 +#endif
42403 +
42404 +#ifdef CONFIG_PAX_MPROTECT
42405 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42406 + pax_flags |= MF_PAX_MPROTECT;
42407 +#endif
42408 +
42409 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42410 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42411 + pax_flags |= MF_PAX_RANDMMAP;
42412 +#endif
42413 +
42414 + return pax_flags;
42415 +}
42416 +#endif
42417 +
42418 +static unsigned long pax_parse_xattr_pax(struct file * const file)
42419 +{
42420 +
42421 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42422 + ssize_t xattr_size, i;
42423 + unsigned char xattr_value[5];
42424 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42425 +
42426 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42427 + if (xattr_size <= 0)
42428 + return ~0UL;
42429 +
42430 + for (i = 0; i < xattr_size; i++)
42431 + switch (xattr_value[i]) {
42432 + default:
42433 + return ~0UL;
42434 +
42435 +#define parse_flag(option1, option2, flag) \
42436 + case option1: \
42437 + pax_flags_hardmode |= MF_PAX_##flag; \
42438 + break; \
42439 + case option2: \
42440 + pax_flags_softmode |= MF_PAX_##flag; \
42441 + break;
42442 +
42443 + parse_flag('p', 'P', PAGEEXEC);
42444 + parse_flag('e', 'E', EMUTRAMP);
42445 + parse_flag('m', 'M', MPROTECT);
42446 + parse_flag('r', 'R', RANDMMAP);
42447 + parse_flag('s', 'S', SEGMEXEC);
42448 +
42449 +#undef parse_flag
42450 + }
42451 +
42452 + if (pax_flags_hardmode & pax_flags_softmode)
42453 + return ~0UL;
42454 +
42455 +#ifdef CONFIG_PAX_SOFTMODE
42456 + if (pax_softmode)
42457 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42458 + else
42459 +#endif
42460 +
42461 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42462 +#else
42463 + return ~0UL;
42464 +#endif
42465 +
42466 +}
42467 +
42468 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42469 +{
42470 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42471 +
42472 + pax_flags = pax_parse_ei_pax(elf_ex);
42473 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42474 + xattr_pax_flags = pax_parse_xattr_pax(file);
42475 +
42476 + if (pt_pax_flags == ~0UL)
42477 + pt_pax_flags = xattr_pax_flags;
42478 + else if (xattr_pax_flags == ~0UL)
42479 + xattr_pax_flags = pt_pax_flags;
42480 + if (pt_pax_flags != xattr_pax_flags)
42481 + return -EINVAL;
42482 + if (pt_pax_flags != ~0UL)
42483 + pax_flags = pt_pax_flags;
42484 +
42485 + if (0 > pax_check_flags(&pax_flags))
42486 + return -EINVAL;
42487 +
42488 + current->mm->pax_flags = pax_flags;
42489 + return 0;
42490 +}
42491 +#endif
42492 +
42493 /*
42494 * These are the functions used to load ELF style executables and shared
42495 * libraries. There is no binary dependent code anywhere else.
42496 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42497 {
42498 unsigned int random_variable = 0;
42499
42500 +#ifdef CONFIG_PAX_RANDUSTACK
42501 + if (randomize_va_space)
42502 + return stack_top - current->mm->delta_stack;
42503 +#endif
42504 +
42505 if ((current->flags & PF_RANDOMIZE) &&
42506 !(current->personality & ADDR_NO_RANDOMIZE)) {
42507 random_variable = get_random_int() & STACK_RND_MASK;
42508 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42509 unsigned long load_addr = 0, load_bias = 0;
42510 int load_addr_set = 0;
42511 char * elf_interpreter = NULL;
42512 - unsigned long error;
42513 + unsigned long error = 0;
42514 struct elf_phdr *elf_ppnt, *elf_phdata;
42515 unsigned long elf_bss, elf_brk;
42516 int retval, i;
42517 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42518 unsigned long start_code, end_code, start_data, end_data;
42519 unsigned long reloc_func_desc __maybe_unused = 0;
42520 int executable_stack = EXSTACK_DEFAULT;
42521 - unsigned long def_flags = 0;
42522 struct {
42523 struct elfhdr elf_ex;
42524 struct elfhdr interp_elf_ex;
42525 } *loc;
42526 + unsigned long pax_task_size = TASK_SIZE;
42527
42528 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42529 if (!loc) {
42530 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42531
42532 /* OK, This is the point of no return */
42533 current->flags &= ~PF_FORKNOEXEC;
42534 - current->mm->def_flags = def_flags;
42535 +
42536 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42537 + current->mm->pax_flags = 0UL;
42538 +#endif
42539 +
42540 +#ifdef CONFIG_PAX_DLRESOLVE
42541 + current->mm->call_dl_resolve = 0UL;
42542 +#endif
42543 +
42544 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42545 + current->mm->call_syscall = 0UL;
42546 +#endif
42547 +
42548 +#ifdef CONFIG_PAX_ASLR
42549 + current->mm->delta_mmap = 0UL;
42550 + current->mm->delta_stack = 0UL;
42551 +#endif
42552 +
42553 + current->mm->def_flags = 0;
42554 +
42555 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42556 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42557 + send_sig(SIGKILL, current, 0);
42558 + goto out_free_dentry;
42559 + }
42560 +#endif
42561 +
42562 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42563 + pax_set_initial_flags(bprm);
42564 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42565 + if (pax_set_initial_flags_func)
42566 + (pax_set_initial_flags_func)(bprm);
42567 +#endif
42568 +
42569 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42570 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42571 + current->mm->context.user_cs_limit = PAGE_SIZE;
42572 + current->mm->def_flags |= VM_PAGEEXEC;
42573 + }
42574 +#endif
42575 +
42576 +#ifdef CONFIG_PAX_SEGMEXEC
42577 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42578 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42579 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42580 + pax_task_size = SEGMEXEC_TASK_SIZE;
42581 + current->mm->def_flags |= VM_NOHUGEPAGE;
42582 + }
42583 +#endif
42584 +
42585 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42586 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42587 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42588 + put_cpu();
42589 + }
42590 +#endif
42591
42592 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42593 may depend on the personality. */
42594 SET_PERSONALITY(loc->elf_ex);
42595 +
42596 +#ifdef CONFIG_PAX_ASLR
42597 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42598 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42599 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42600 + }
42601 +#endif
42602 +
42603 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42604 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42605 + executable_stack = EXSTACK_DISABLE_X;
42606 + current->personality &= ~READ_IMPLIES_EXEC;
42607 + } else
42608 +#endif
42609 +
42610 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42611 current->personality |= READ_IMPLIES_EXEC;
42612
42613 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42614 #else
42615 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42616 #endif
42617 +
42618 +#ifdef CONFIG_PAX_RANDMMAP
42619 + /* PaX: randomize base address at the default exe base if requested */
42620 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42621 +#ifdef CONFIG_SPARC64
42622 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42623 +#else
42624 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42625 +#endif
42626 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42627 + elf_flags |= MAP_FIXED;
42628 + }
42629 +#endif
42630 +
42631 }
42632
42633 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42634 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42635 * allowed task size. Note that p_filesz must always be
42636 * <= p_memsz so it is only necessary to check p_memsz.
42637 */
42638 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42639 - elf_ppnt->p_memsz > TASK_SIZE ||
42640 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42641 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42642 + elf_ppnt->p_memsz > pax_task_size ||
42643 + pax_task_size - elf_ppnt->p_memsz < k) {
42644 /* set_brk can never work. Avoid overflows. */
42645 send_sig(SIGKILL, current, 0);
42646 retval = -EINVAL;
42647 @@ -881,11 +1339,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42648 goto out_free_dentry;
42649 }
42650 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42651 - send_sig(SIGSEGV, current, 0);
42652 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42653 - goto out_free_dentry;
42654 + /*
42655 + * This bss-zeroing can fail if the ELF
42656 + * file specifies odd protections. So
42657 + * we don't check the return value
42658 + */
42659 }
42660
42661 +#ifdef CONFIG_PAX_RANDMMAP
42662 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42663 + unsigned long start, size;
42664 +
42665 + start = ELF_PAGEALIGN(elf_brk);
42666 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42667 + down_write(&current->mm->mmap_sem);
42668 + retval = -ENOMEM;
42669 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42670 + unsigned long prot = PROT_NONE;
42671 +
42672 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42673 +// if (current->personality & ADDR_NO_RANDOMIZE)
42674 +// prot = PROT_READ;
42675 + start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42676 + retval = IS_ERR_VALUE(start) ? start : 0;
42677 + }
42678 + up_write(&current->mm->mmap_sem);
42679 + if (retval == 0)
42680 + retval = set_brk(start + size, start + size + PAGE_SIZE);
42681 + if (retval < 0) {
42682 + send_sig(SIGKILL, current, 0);
42683 + goto out_free_dentry;
42684 + }
42685 + }
42686 +#endif
42687 +
42688 if (elf_interpreter) {
42689 unsigned long uninitialized_var(interp_map_addr);
42690
42691 @@ -1098,7 +1585,7 @@ out:
42692 * Decide what to dump of a segment, part, all or none.
42693 */
42694 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42695 - unsigned long mm_flags)
42696 + unsigned long mm_flags, long signr)
42697 {
42698 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42699
42700 @@ -1132,7 +1619,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42701 if (vma->vm_file == NULL)
42702 return 0;
42703
42704 - if (FILTER(MAPPED_PRIVATE))
42705 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42706 goto whole;
42707
42708 /*
42709 @@ -1354,9 +1841,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42710 {
42711 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42712 int i = 0;
42713 - do
42714 + do {
42715 i += 2;
42716 - while (auxv[i - 2] != AT_NULL);
42717 + } while (auxv[i - 2] != AT_NULL);
42718 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42719 }
42720
42721 @@ -1862,14 +2349,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42722 }
42723
42724 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42725 - unsigned long mm_flags)
42726 + struct coredump_params *cprm)
42727 {
42728 struct vm_area_struct *vma;
42729 size_t size = 0;
42730
42731 for (vma = first_vma(current, gate_vma); vma != NULL;
42732 vma = next_vma(vma, gate_vma))
42733 - size += vma_dump_size(vma, mm_flags);
42734 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42735 return size;
42736 }
42737
42738 @@ -1963,7 +2450,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42739
42740 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42741
42742 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42743 + offset += elf_core_vma_data_size(gate_vma, cprm);
42744 offset += elf_core_extra_data_size();
42745 e_shoff = offset;
42746
42747 @@ -1977,10 +2464,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42748 offset = dataoff;
42749
42750 size += sizeof(*elf);
42751 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42752 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42753 goto end_coredump;
42754
42755 size += sizeof(*phdr4note);
42756 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42757 if (size > cprm->limit
42758 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42759 goto end_coredump;
42760 @@ -1994,7 +2483,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42761 phdr.p_offset = offset;
42762 phdr.p_vaddr = vma->vm_start;
42763 phdr.p_paddr = 0;
42764 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42765 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42766 phdr.p_memsz = vma->vm_end - vma->vm_start;
42767 offset += phdr.p_filesz;
42768 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42769 @@ -2005,6 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42770 phdr.p_align = ELF_EXEC_PAGESIZE;
42771
42772 size += sizeof(phdr);
42773 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42774 if (size > cprm->limit
42775 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42776 goto end_coredump;
42777 @@ -2029,7 +2519,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42778 unsigned long addr;
42779 unsigned long end;
42780
42781 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42782 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42783
42784 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42785 struct page *page;
42786 @@ -2038,6 +2528,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42787 page = get_dump_page(addr);
42788 if (page) {
42789 void *kaddr = kmap(page);
42790 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42791 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42792 !dump_write(cprm->file, kaddr,
42793 PAGE_SIZE);
42794 @@ -2055,6 +2546,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42795
42796 if (e_phnum == PN_XNUM) {
42797 size += sizeof(*shdr4extnum);
42798 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42799 if (size > cprm->limit
42800 || !dump_write(cprm->file, shdr4extnum,
42801 sizeof(*shdr4extnum)))
42802 @@ -2075,6 +2567,97 @@ out:
42803
42804 #endif /* CONFIG_ELF_CORE */
42805
42806 +#ifdef CONFIG_PAX_MPROTECT
42807 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42808 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42809 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42810 + *
42811 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42812 + * basis because we want to allow the common case and not the special ones.
42813 + */
42814 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42815 +{
42816 + struct elfhdr elf_h;
42817 + struct elf_phdr elf_p;
42818 + unsigned long i;
42819 + unsigned long oldflags;
42820 + bool is_textrel_rw, is_textrel_rx, is_relro;
42821 +
42822 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42823 + return;
42824 +
42825 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42826 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42827 +
42828 +#ifdef CONFIG_PAX_ELFRELOCS
42829 + /* possible TEXTREL */
42830 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42831 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42832 +#else
42833 + is_textrel_rw = false;
42834 + is_textrel_rx = false;
42835 +#endif
42836 +
42837 + /* possible RELRO */
42838 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42839 +
42840 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42841 + return;
42842 +
42843 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42844 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42845 +
42846 +#ifdef CONFIG_PAX_ETEXECRELOCS
42847 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42848 +#else
42849 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42850 +#endif
42851 +
42852 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42853 + !elf_check_arch(&elf_h) ||
42854 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42855 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42856 + return;
42857 +
42858 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42859 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42860 + return;
42861 + switch (elf_p.p_type) {
42862 + case PT_DYNAMIC:
42863 + if (!is_textrel_rw && !is_textrel_rx)
42864 + continue;
42865 + i = 0UL;
42866 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42867 + elf_dyn dyn;
42868 +
42869 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42870 + return;
42871 + if (dyn.d_tag == DT_NULL)
42872 + return;
42873 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42874 + gr_log_textrel(vma);
42875 + if (is_textrel_rw)
42876 + vma->vm_flags |= VM_MAYWRITE;
42877 + else
42878 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42879 + vma->vm_flags &= ~VM_MAYWRITE;
42880 + return;
42881 + }
42882 + i++;
42883 + }
42884 + return;
42885 +
42886 + case PT_GNU_RELRO:
42887 + if (!is_relro)
42888 + continue;
42889 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42890 + vma->vm_flags &= ~VM_MAYWRITE;
42891 + return;
42892 + }
42893 + }
42894 +}
42895 +#endif
42896 +
42897 static int __init init_elf_binfmt(void)
42898 {
42899 return register_binfmt(&elf_format);
42900 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42901 index 1bffbe0..c8c283e 100644
42902 --- a/fs/binfmt_flat.c
42903 +++ b/fs/binfmt_flat.c
42904 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42905 realdatastart = (unsigned long) -ENOMEM;
42906 printk("Unable to allocate RAM for process data, errno %d\n",
42907 (int)-realdatastart);
42908 + down_write(&current->mm->mmap_sem);
42909 do_munmap(current->mm, textpos, text_len);
42910 + up_write(&current->mm->mmap_sem);
42911 ret = realdatastart;
42912 goto err;
42913 }
42914 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42915 }
42916 if (IS_ERR_VALUE(result)) {
42917 printk("Unable to read data+bss, errno %d\n", (int)-result);
42918 + down_write(&current->mm->mmap_sem);
42919 do_munmap(current->mm, textpos, text_len);
42920 do_munmap(current->mm, realdatastart, len);
42921 + up_write(&current->mm->mmap_sem);
42922 ret = result;
42923 goto err;
42924 }
42925 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42926 }
42927 if (IS_ERR_VALUE(result)) {
42928 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42929 + down_write(&current->mm->mmap_sem);
42930 do_munmap(current->mm, textpos, text_len + data_len + extra +
42931 MAX_SHARED_LIBS * sizeof(unsigned long));
42932 + up_write(&current->mm->mmap_sem);
42933 ret = result;
42934 goto err;
42935 }
42936 diff --git a/fs/bio.c b/fs/bio.c
42937 index b980ecd..74800bf 100644
42938 --- a/fs/bio.c
42939 +++ b/fs/bio.c
42940 @@ -833,7 +833,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42941 /*
42942 * Overflow, abort
42943 */
42944 - if (end < start)
42945 + if (end < start || end - start > INT_MAX - nr_pages)
42946 return ERR_PTR(-EINVAL);
42947
42948 nr_pages += end - start;
42949 @@ -1229,7 +1229,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42950 const int read = bio_data_dir(bio) == READ;
42951 struct bio_map_data *bmd = bio->bi_private;
42952 int i;
42953 - char *p = bmd->sgvecs[0].iov_base;
42954 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42955
42956 __bio_for_each_segment(bvec, bio, i, 0) {
42957 char *addr = page_address(bvec->bv_page);
42958 diff --git a/fs/block_dev.c b/fs/block_dev.c
42959 index 5e9f198..6bf9b1c 100644
42960 --- a/fs/block_dev.c
42961 +++ b/fs/block_dev.c
42962 @@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42963 else if (bdev->bd_contains == bdev)
42964 return true; /* is a whole device which isn't held */
42965
42966 - else if (whole->bd_holder == bd_may_claim)
42967 + else if (whole->bd_holder == (void *)bd_may_claim)
42968 return true; /* is a partition of a device that is being partitioned */
42969 else if (whole->bd_holder != NULL)
42970 return false; /* is a partition of a held device */
42971 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42972 index d986824..af1befd 100644
42973 --- a/fs/btrfs/check-integrity.c
42974 +++ b/fs/btrfs/check-integrity.c
42975 @@ -157,7 +157,7 @@ struct btrfsic_block {
42976 union {
42977 bio_end_io_t *bio;
42978 bh_end_io_t *bh;
42979 - } orig_bio_bh_end_io;
42980 + } __no_const orig_bio_bh_end_io;
42981 int submit_bio_bh_rw;
42982 u64 flush_gen; /* only valid if !never_written */
42983 };
42984 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42985 index 0639a55..7d9e07f 100644
42986 --- a/fs/btrfs/ctree.c
42987 +++ b/fs/btrfs/ctree.c
42988 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42989 free_extent_buffer(buf);
42990 add_root_to_dirty_list(root);
42991 } else {
42992 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42993 - parent_start = parent->start;
42994 - else
42995 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42996 + if (parent)
42997 + parent_start = parent->start;
42998 + else
42999 + parent_start = 0;
43000 + } else
43001 parent_start = 0;
43002
43003 WARN_ON(trans->transid != btrfs_header_generation(parent));
43004 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
43005 index 892b347..b3db246 100644
43006 --- a/fs/btrfs/inode.c
43007 +++ b/fs/btrfs/inode.c
43008 @@ -6930,7 +6930,7 @@ fail:
43009 return -ENOMEM;
43010 }
43011
43012 -static int btrfs_getattr(struct vfsmount *mnt,
43013 +int btrfs_getattr(struct vfsmount *mnt,
43014 struct dentry *dentry, struct kstat *stat)
43015 {
43016 struct inode *inode = dentry->d_inode;
43017 @@ -6944,6 +6944,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
43018 return 0;
43019 }
43020
43021 +EXPORT_SYMBOL(btrfs_getattr);
43022 +
43023 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
43024 +{
43025 + return BTRFS_I(inode)->root->anon_dev;
43026 +}
43027 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43028 +
43029 /*
43030 * If a file is moved, it will inherit the cow and compression flags of the new
43031 * directory.
43032 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
43033 index 1b36f19..5ac7360 100644
43034 --- a/fs/btrfs/ioctl.c
43035 +++ b/fs/btrfs/ioctl.c
43036 @@ -2783,9 +2783,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43037 for (i = 0; i < num_types; i++) {
43038 struct btrfs_space_info *tmp;
43039
43040 + /* Don't copy in more than we allocated */
43041 if (!slot_count)
43042 break;
43043
43044 + slot_count--;
43045 +
43046 info = NULL;
43047 rcu_read_lock();
43048 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
43049 @@ -2807,15 +2810,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43050 memcpy(dest, &space, sizeof(space));
43051 dest++;
43052 space_args.total_spaces++;
43053 - slot_count--;
43054 }
43055 - if (!slot_count)
43056 - break;
43057 }
43058 up_read(&info->groups_sem);
43059 }
43060
43061 - user_dest = (struct btrfs_ioctl_space_info *)
43062 + user_dest = (struct btrfs_ioctl_space_info __user *)
43063 (arg + sizeof(struct btrfs_ioctl_space_args));
43064
43065 if (copy_to_user(user_dest, dest_orig, alloc_size))
43066 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
43067 index 8c1aae2..1e46446 100644
43068 --- a/fs/btrfs/relocation.c
43069 +++ b/fs/btrfs/relocation.c
43070 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
43071 }
43072 spin_unlock(&rc->reloc_root_tree.lock);
43073
43074 - BUG_ON((struct btrfs_root *)node->data != root);
43075 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
43076
43077 if (!del) {
43078 spin_lock(&rc->reloc_root_tree.lock);
43079 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43080 index 622f469..e8d2d55 100644
43081 --- a/fs/cachefiles/bind.c
43082 +++ b/fs/cachefiles/bind.c
43083 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
43084 args);
43085
43086 /* start by checking things over */
43087 - ASSERT(cache->fstop_percent >= 0 &&
43088 - cache->fstop_percent < cache->fcull_percent &&
43089 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
43090 cache->fcull_percent < cache->frun_percent &&
43091 cache->frun_percent < 100);
43092
43093 - ASSERT(cache->bstop_percent >= 0 &&
43094 - cache->bstop_percent < cache->bcull_percent &&
43095 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
43096 cache->bcull_percent < cache->brun_percent &&
43097 cache->brun_percent < 100);
43098
43099 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43100 index 0a1467b..6a53245 100644
43101 --- a/fs/cachefiles/daemon.c
43102 +++ b/fs/cachefiles/daemon.c
43103 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
43104 if (n > buflen)
43105 return -EMSGSIZE;
43106
43107 - if (copy_to_user(_buffer, buffer, n) != 0)
43108 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43109 return -EFAULT;
43110
43111 return n;
43112 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
43113 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43114 return -EIO;
43115
43116 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
43117 + if (datalen > PAGE_SIZE - 1)
43118 return -EOPNOTSUPP;
43119
43120 /* drag the command string into the kernel so we can parse it */
43121 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
43122 if (args[0] != '%' || args[1] != '\0')
43123 return -EINVAL;
43124
43125 - if (fstop < 0 || fstop >= cache->fcull_percent)
43126 + if (fstop >= cache->fcull_percent)
43127 return cachefiles_daemon_range_error(cache, args);
43128
43129 cache->fstop_percent = fstop;
43130 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43131 if (args[0] != '%' || args[1] != '\0')
43132 return -EINVAL;
43133
43134 - if (bstop < 0 || bstop >= cache->bcull_percent)
43135 + if (bstop >= cache->bcull_percent)
43136 return cachefiles_daemon_range_error(cache, args);
43137
43138 cache->bstop_percent = bstop;
43139 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43140 index bd6bc1b..b627b53 100644
43141 --- a/fs/cachefiles/internal.h
43142 +++ b/fs/cachefiles/internal.h
43143 @@ -57,7 +57,7 @@ struct cachefiles_cache {
43144 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43145 struct rb_root active_nodes; /* active nodes (can't be culled) */
43146 rwlock_t active_lock; /* lock for active_nodes */
43147 - atomic_t gravecounter; /* graveyard uniquifier */
43148 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43149 unsigned frun_percent; /* when to stop culling (% files) */
43150 unsigned fcull_percent; /* when to start culling (% files) */
43151 unsigned fstop_percent; /* when to stop allocating (% files) */
43152 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
43153 * proc.c
43154 */
43155 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43156 -extern atomic_t cachefiles_lookup_histogram[HZ];
43157 -extern atomic_t cachefiles_mkdir_histogram[HZ];
43158 -extern atomic_t cachefiles_create_histogram[HZ];
43159 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43160 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43161 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43162
43163 extern int __init cachefiles_proc_init(void);
43164 extern void cachefiles_proc_cleanup(void);
43165 static inline
43166 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43167 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43168 {
43169 unsigned long jif = jiffies - start_jif;
43170 if (jif >= HZ)
43171 jif = HZ - 1;
43172 - atomic_inc(&histogram[jif]);
43173 + atomic_inc_unchecked(&histogram[jif]);
43174 }
43175
43176 #else
43177 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43178 index a0358c2..d6137f2 100644
43179 --- a/fs/cachefiles/namei.c
43180 +++ b/fs/cachefiles/namei.c
43181 @@ -318,7 +318,7 @@ try_again:
43182 /* first step is to make up a grave dentry in the graveyard */
43183 sprintf(nbuffer, "%08x%08x",
43184 (uint32_t) get_seconds(),
43185 - (uint32_t) atomic_inc_return(&cache->gravecounter));
43186 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43187
43188 /* do the multiway lock magic */
43189 trap = lock_rename(cache->graveyard, dir);
43190 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43191 index eccd339..4c1d995 100644
43192 --- a/fs/cachefiles/proc.c
43193 +++ b/fs/cachefiles/proc.c
43194 @@ -14,9 +14,9 @@
43195 #include <linux/seq_file.h>
43196 #include "internal.h"
43197
43198 -atomic_t cachefiles_lookup_histogram[HZ];
43199 -atomic_t cachefiles_mkdir_histogram[HZ];
43200 -atomic_t cachefiles_create_histogram[HZ];
43201 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43202 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43203 +atomic_unchecked_t cachefiles_create_histogram[HZ];
43204
43205 /*
43206 * display the latency histogram
43207 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43208 return 0;
43209 default:
43210 index = (unsigned long) v - 3;
43211 - x = atomic_read(&cachefiles_lookup_histogram[index]);
43212 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
43213 - z = atomic_read(&cachefiles_create_histogram[index]);
43214 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43215 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43216 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43217 if (x == 0 && y == 0 && z == 0)
43218 return 0;
43219
43220 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43221 index 0e3c092..818480e 100644
43222 --- a/fs/cachefiles/rdwr.c
43223 +++ b/fs/cachefiles/rdwr.c
43224 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43225 old_fs = get_fs();
43226 set_fs(KERNEL_DS);
43227 ret = file->f_op->write(
43228 - file, (const void __user *) data, len, &pos);
43229 + file, (const void __force_user *) data, len, &pos);
43230 set_fs(old_fs);
43231 kunmap(page);
43232 if (ret != len)
43233 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43234 index 3e8094b..cb3ff3d 100644
43235 --- a/fs/ceph/dir.c
43236 +++ b/fs/ceph/dir.c
43237 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43238 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43239 struct ceph_mds_client *mdsc = fsc->mdsc;
43240 unsigned frag = fpos_frag(filp->f_pos);
43241 - int off = fpos_off(filp->f_pos);
43242 + unsigned int off = fpos_off(filp->f_pos);
43243 int err;
43244 u32 ftype;
43245 struct ceph_mds_reply_info_parsed *rinfo;
43246 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43247 if (nd &&
43248 (nd->flags & LOOKUP_OPEN) &&
43249 !(nd->intent.open.flags & O_CREAT)) {
43250 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
43251 + int mode = nd->intent.open.create_mode & ~current_umask();
43252 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43253 }
43254
43255 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43256 index 24b3dfc..3cd5454 100644
43257 --- a/fs/cifs/cifs_debug.c
43258 +++ b/fs/cifs/cifs_debug.c
43259 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43260
43261 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43262 #ifdef CONFIG_CIFS_STATS2
43263 - atomic_set(&totBufAllocCount, 0);
43264 - atomic_set(&totSmBufAllocCount, 0);
43265 + atomic_set_unchecked(&totBufAllocCount, 0);
43266 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43267 #endif /* CONFIG_CIFS_STATS2 */
43268 spin_lock(&cifs_tcp_ses_lock);
43269 list_for_each(tmp1, &cifs_tcp_ses_list) {
43270 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43271 tcon = list_entry(tmp3,
43272 struct cifs_tcon,
43273 tcon_list);
43274 - atomic_set(&tcon->num_smbs_sent, 0);
43275 - atomic_set(&tcon->num_writes, 0);
43276 - atomic_set(&tcon->num_reads, 0);
43277 - atomic_set(&tcon->num_oplock_brks, 0);
43278 - atomic_set(&tcon->num_opens, 0);
43279 - atomic_set(&tcon->num_posixopens, 0);
43280 - atomic_set(&tcon->num_posixmkdirs, 0);
43281 - atomic_set(&tcon->num_closes, 0);
43282 - atomic_set(&tcon->num_deletes, 0);
43283 - atomic_set(&tcon->num_mkdirs, 0);
43284 - atomic_set(&tcon->num_rmdirs, 0);
43285 - atomic_set(&tcon->num_renames, 0);
43286 - atomic_set(&tcon->num_t2renames, 0);
43287 - atomic_set(&tcon->num_ffirst, 0);
43288 - atomic_set(&tcon->num_fnext, 0);
43289 - atomic_set(&tcon->num_fclose, 0);
43290 - atomic_set(&tcon->num_hardlinks, 0);
43291 - atomic_set(&tcon->num_symlinks, 0);
43292 - atomic_set(&tcon->num_locks, 0);
43293 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43294 + atomic_set_unchecked(&tcon->num_writes, 0);
43295 + atomic_set_unchecked(&tcon->num_reads, 0);
43296 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43297 + atomic_set_unchecked(&tcon->num_opens, 0);
43298 + atomic_set_unchecked(&tcon->num_posixopens, 0);
43299 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43300 + atomic_set_unchecked(&tcon->num_closes, 0);
43301 + atomic_set_unchecked(&tcon->num_deletes, 0);
43302 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
43303 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
43304 + atomic_set_unchecked(&tcon->num_renames, 0);
43305 + atomic_set_unchecked(&tcon->num_t2renames, 0);
43306 + atomic_set_unchecked(&tcon->num_ffirst, 0);
43307 + atomic_set_unchecked(&tcon->num_fnext, 0);
43308 + atomic_set_unchecked(&tcon->num_fclose, 0);
43309 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
43310 + atomic_set_unchecked(&tcon->num_symlinks, 0);
43311 + atomic_set_unchecked(&tcon->num_locks, 0);
43312 }
43313 }
43314 }
43315 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43316 smBufAllocCount.counter, cifs_min_small);
43317 #ifdef CONFIG_CIFS_STATS2
43318 seq_printf(m, "Total Large %d Small %d Allocations\n",
43319 - atomic_read(&totBufAllocCount),
43320 - atomic_read(&totSmBufAllocCount));
43321 + atomic_read_unchecked(&totBufAllocCount),
43322 + atomic_read_unchecked(&totSmBufAllocCount));
43323 #endif /* CONFIG_CIFS_STATS2 */
43324
43325 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43326 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43327 if (tcon->need_reconnect)
43328 seq_puts(m, "\tDISCONNECTED ");
43329 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43330 - atomic_read(&tcon->num_smbs_sent),
43331 - atomic_read(&tcon->num_oplock_brks));
43332 + atomic_read_unchecked(&tcon->num_smbs_sent),
43333 + atomic_read_unchecked(&tcon->num_oplock_brks));
43334 seq_printf(m, "\nReads: %d Bytes: %lld",
43335 - atomic_read(&tcon->num_reads),
43336 + atomic_read_unchecked(&tcon->num_reads),
43337 (long long)(tcon->bytes_read));
43338 seq_printf(m, "\nWrites: %d Bytes: %lld",
43339 - atomic_read(&tcon->num_writes),
43340 + atomic_read_unchecked(&tcon->num_writes),
43341 (long long)(tcon->bytes_written));
43342 seq_printf(m, "\nFlushes: %d",
43343 - atomic_read(&tcon->num_flushes));
43344 + atomic_read_unchecked(&tcon->num_flushes));
43345 seq_printf(m, "\nLocks: %d HardLinks: %d "
43346 "Symlinks: %d",
43347 - atomic_read(&tcon->num_locks),
43348 - atomic_read(&tcon->num_hardlinks),
43349 - atomic_read(&tcon->num_symlinks));
43350 + atomic_read_unchecked(&tcon->num_locks),
43351 + atomic_read_unchecked(&tcon->num_hardlinks),
43352 + atomic_read_unchecked(&tcon->num_symlinks));
43353 seq_printf(m, "\nOpens: %d Closes: %d "
43354 "Deletes: %d",
43355 - atomic_read(&tcon->num_opens),
43356 - atomic_read(&tcon->num_closes),
43357 - atomic_read(&tcon->num_deletes));
43358 + atomic_read_unchecked(&tcon->num_opens),
43359 + atomic_read_unchecked(&tcon->num_closes),
43360 + atomic_read_unchecked(&tcon->num_deletes));
43361 seq_printf(m, "\nPosix Opens: %d "
43362 "Posix Mkdirs: %d",
43363 - atomic_read(&tcon->num_posixopens),
43364 - atomic_read(&tcon->num_posixmkdirs));
43365 + atomic_read_unchecked(&tcon->num_posixopens),
43366 + atomic_read_unchecked(&tcon->num_posixmkdirs));
43367 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43368 - atomic_read(&tcon->num_mkdirs),
43369 - atomic_read(&tcon->num_rmdirs));
43370 + atomic_read_unchecked(&tcon->num_mkdirs),
43371 + atomic_read_unchecked(&tcon->num_rmdirs));
43372 seq_printf(m, "\nRenames: %d T2 Renames %d",
43373 - atomic_read(&tcon->num_renames),
43374 - atomic_read(&tcon->num_t2renames));
43375 + atomic_read_unchecked(&tcon->num_renames),
43376 + atomic_read_unchecked(&tcon->num_t2renames));
43377 seq_printf(m, "\nFindFirst: %d FNext %d "
43378 "FClose %d",
43379 - atomic_read(&tcon->num_ffirst),
43380 - atomic_read(&tcon->num_fnext),
43381 - atomic_read(&tcon->num_fclose));
43382 + atomic_read_unchecked(&tcon->num_ffirst),
43383 + atomic_read_unchecked(&tcon->num_fnext),
43384 + atomic_read_unchecked(&tcon->num_fclose));
43385 }
43386 }
43387 }
43388 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43389 index 70dd381..b8ce03b 100644
43390 --- a/fs/cifs/cifsfs.c
43391 +++ b/fs/cifs/cifsfs.c
43392 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
43393 cifs_req_cachep = kmem_cache_create("cifs_request",
43394 CIFSMaxBufSize +
43395 MAX_CIFS_HDR_SIZE, 0,
43396 - SLAB_HWCACHE_ALIGN, NULL);
43397 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43398 if (cifs_req_cachep == NULL)
43399 return -ENOMEM;
43400
43401 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
43402 efficient to alloc 1 per page off the slab compared to 17K (5page)
43403 alloc of large cifs buffers even when page debugging is on */
43404 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43405 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43406 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43407 NULL);
43408 if (cifs_sm_req_cachep == NULL) {
43409 mempool_destroy(cifs_req_poolp);
43410 @@ -1101,8 +1101,8 @@ init_cifs(void)
43411 atomic_set(&bufAllocCount, 0);
43412 atomic_set(&smBufAllocCount, 0);
43413 #ifdef CONFIG_CIFS_STATS2
43414 - atomic_set(&totBufAllocCount, 0);
43415 - atomic_set(&totSmBufAllocCount, 0);
43416 + atomic_set_unchecked(&totBufAllocCount, 0);
43417 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43418 #endif /* CONFIG_CIFS_STATS2 */
43419
43420 atomic_set(&midCount, 0);
43421 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43422 index d47d20a..77e8b33 100644
43423 --- a/fs/cifs/cifsglob.h
43424 +++ b/fs/cifs/cifsglob.h
43425 @@ -388,28 +388,28 @@ struct cifs_tcon {
43426 __u16 Flags; /* optional support bits */
43427 enum statusEnum tidStatus;
43428 #ifdef CONFIG_CIFS_STATS
43429 - atomic_t num_smbs_sent;
43430 - atomic_t num_writes;
43431 - atomic_t num_reads;
43432 - atomic_t num_flushes;
43433 - atomic_t num_oplock_brks;
43434 - atomic_t num_opens;
43435 - atomic_t num_closes;
43436 - atomic_t num_deletes;
43437 - atomic_t num_mkdirs;
43438 - atomic_t num_posixopens;
43439 - atomic_t num_posixmkdirs;
43440 - atomic_t num_rmdirs;
43441 - atomic_t num_renames;
43442 - atomic_t num_t2renames;
43443 - atomic_t num_ffirst;
43444 - atomic_t num_fnext;
43445 - atomic_t num_fclose;
43446 - atomic_t num_hardlinks;
43447 - atomic_t num_symlinks;
43448 - atomic_t num_locks;
43449 - atomic_t num_acl_get;
43450 - atomic_t num_acl_set;
43451 + atomic_unchecked_t num_smbs_sent;
43452 + atomic_unchecked_t num_writes;
43453 + atomic_unchecked_t num_reads;
43454 + atomic_unchecked_t num_flushes;
43455 + atomic_unchecked_t num_oplock_brks;
43456 + atomic_unchecked_t num_opens;
43457 + atomic_unchecked_t num_closes;
43458 + atomic_unchecked_t num_deletes;
43459 + atomic_unchecked_t num_mkdirs;
43460 + atomic_unchecked_t num_posixopens;
43461 + atomic_unchecked_t num_posixmkdirs;
43462 + atomic_unchecked_t num_rmdirs;
43463 + atomic_unchecked_t num_renames;
43464 + atomic_unchecked_t num_t2renames;
43465 + atomic_unchecked_t num_ffirst;
43466 + atomic_unchecked_t num_fnext;
43467 + atomic_unchecked_t num_fclose;
43468 + atomic_unchecked_t num_hardlinks;
43469 + atomic_unchecked_t num_symlinks;
43470 + atomic_unchecked_t num_locks;
43471 + atomic_unchecked_t num_acl_get;
43472 + atomic_unchecked_t num_acl_set;
43473 #ifdef CONFIG_CIFS_STATS2
43474 unsigned long long time_writes;
43475 unsigned long long time_reads;
43476 @@ -624,7 +624,7 @@ convert_delimiter(char *path, char delim)
43477 }
43478
43479 #ifdef CONFIG_CIFS_STATS
43480 -#define cifs_stats_inc atomic_inc
43481 +#define cifs_stats_inc atomic_inc_unchecked
43482
43483 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43484 unsigned int bytes)
43485 @@ -983,8 +983,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43486 /* Various Debug counters */
43487 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43488 #ifdef CONFIG_CIFS_STATS2
43489 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43490 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43491 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43492 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43493 #endif
43494 GLOBAL_EXTERN atomic_t smBufAllocCount;
43495 GLOBAL_EXTERN atomic_t midCount;
43496 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43497 index 6b0e064..94e6c3c 100644
43498 --- a/fs/cifs/link.c
43499 +++ b/fs/cifs/link.c
43500 @@ -600,7 +600,7 @@ symlink_exit:
43501
43502 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43503 {
43504 - char *p = nd_get_link(nd);
43505 + const char *p = nd_get_link(nd);
43506 if (!IS_ERR(p))
43507 kfree(p);
43508 }
43509 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43510 index 703ef5c..2a44ed5 100644
43511 --- a/fs/cifs/misc.c
43512 +++ b/fs/cifs/misc.c
43513 @@ -156,7 +156,7 @@ cifs_buf_get(void)
43514 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43515 atomic_inc(&bufAllocCount);
43516 #ifdef CONFIG_CIFS_STATS2
43517 - atomic_inc(&totBufAllocCount);
43518 + atomic_inc_unchecked(&totBufAllocCount);
43519 #endif /* CONFIG_CIFS_STATS2 */
43520 }
43521
43522 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43523 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43524 atomic_inc(&smBufAllocCount);
43525 #ifdef CONFIG_CIFS_STATS2
43526 - atomic_inc(&totSmBufAllocCount);
43527 + atomic_inc_unchecked(&totSmBufAllocCount);
43528 #endif /* CONFIG_CIFS_STATS2 */
43529
43530 }
43531 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43532 index 6901578..d402eb5 100644
43533 --- a/fs/coda/cache.c
43534 +++ b/fs/coda/cache.c
43535 @@ -24,7 +24,7 @@
43536 #include "coda_linux.h"
43537 #include "coda_cache.h"
43538
43539 -static atomic_t permission_epoch = ATOMIC_INIT(0);
43540 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43541
43542 /* replace or extend an acl cache hit */
43543 void coda_cache_enter(struct inode *inode, int mask)
43544 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43545 struct coda_inode_info *cii = ITOC(inode);
43546
43547 spin_lock(&cii->c_lock);
43548 - cii->c_cached_epoch = atomic_read(&permission_epoch);
43549 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43550 if (cii->c_uid != current_fsuid()) {
43551 cii->c_uid = current_fsuid();
43552 cii->c_cached_perm = mask;
43553 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43554 {
43555 struct coda_inode_info *cii = ITOC(inode);
43556 spin_lock(&cii->c_lock);
43557 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43558 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43559 spin_unlock(&cii->c_lock);
43560 }
43561
43562 /* remove all acl caches */
43563 void coda_cache_clear_all(struct super_block *sb)
43564 {
43565 - atomic_inc(&permission_epoch);
43566 + atomic_inc_unchecked(&permission_epoch);
43567 }
43568
43569
43570 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43571 spin_lock(&cii->c_lock);
43572 hit = (mask & cii->c_cached_perm) == mask &&
43573 cii->c_uid == current_fsuid() &&
43574 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43575 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43576 spin_unlock(&cii->c_lock);
43577
43578 return hit;
43579 diff --git a/fs/compat.c b/fs/compat.c
43580 index 07880ba..3fb2862 100644
43581 --- a/fs/compat.c
43582 +++ b/fs/compat.c
43583 @@ -491,7 +491,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43584
43585 set_fs(KERNEL_DS);
43586 /* The __user pointer cast is valid because of the set_fs() */
43587 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43588 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43589 set_fs(oldfs);
43590 /* truncating is ok because it's a user address */
43591 if (!ret)
43592 @@ -549,7 +549,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43593 goto out;
43594
43595 ret = -EINVAL;
43596 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43597 + if (nr_segs > UIO_MAXIOV)
43598 goto out;
43599 if (nr_segs > fast_segs) {
43600 ret = -ENOMEM;
43601 @@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
43602
43603 struct compat_readdir_callback {
43604 struct compat_old_linux_dirent __user *dirent;
43605 + struct file * file;
43606 int result;
43607 };
43608
43609 @@ -849,6 +850,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43610 buf->result = -EOVERFLOW;
43611 return -EOVERFLOW;
43612 }
43613 +
43614 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43615 + return 0;
43616 +
43617 buf->result++;
43618 dirent = buf->dirent;
43619 if (!access_ok(VERIFY_WRITE, dirent,
43620 @@ -881,6 +886,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43621
43622 buf.result = 0;
43623 buf.dirent = dirent;
43624 + buf.file = file;
43625
43626 error = vfs_readdir(file, compat_fillonedir, &buf);
43627 if (buf.result)
43628 @@ -901,6 +907,7 @@ struct compat_linux_dirent {
43629 struct compat_getdents_callback {
43630 struct compat_linux_dirent __user *current_dir;
43631 struct compat_linux_dirent __user *previous;
43632 + struct file * file;
43633 int count;
43634 int error;
43635 };
43636 @@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43637 buf->error = -EOVERFLOW;
43638 return -EOVERFLOW;
43639 }
43640 +
43641 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43642 + return 0;
43643 +
43644 dirent = buf->previous;
43645 if (dirent) {
43646 if (__put_user(offset, &dirent->d_off))
43647 @@ -969,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43648 buf.previous = NULL;
43649 buf.count = count;
43650 buf.error = 0;
43651 + buf.file = file;
43652
43653 error = vfs_readdir(file, compat_filldir, &buf);
43654 if (error >= 0)
43655 @@ -990,6 +1002,7 @@ out:
43656 struct compat_getdents_callback64 {
43657 struct linux_dirent64 __user *current_dir;
43658 struct linux_dirent64 __user *previous;
43659 + struct file * file;
43660 int count;
43661 int error;
43662 };
43663 @@ -1006,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43664 buf->error = -EINVAL; /* only used if we fail.. */
43665 if (reclen > buf->count)
43666 return -EINVAL;
43667 +
43668 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43669 + return 0;
43670 +
43671 dirent = buf->previous;
43672
43673 if (dirent) {
43674 @@ -1057,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43675 buf.previous = NULL;
43676 buf.count = count;
43677 buf.error = 0;
43678 + buf.file = file;
43679
43680 error = vfs_readdir(file, compat_filldir64, &buf);
43681 if (error >= 0)
43682 error = buf.error;
43683 lastdirent = buf.previous;
43684 if (lastdirent) {
43685 - typeof(lastdirent->d_off) d_off = file->f_pos;
43686 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43687 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43688 error = -EFAULT;
43689 else
43690 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43691 index 112e45a..b59845b 100644
43692 --- a/fs/compat_binfmt_elf.c
43693 +++ b/fs/compat_binfmt_elf.c
43694 @@ -30,11 +30,13 @@
43695 #undef elf_phdr
43696 #undef elf_shdr
43697 #undef elf_note
43698 +#undef elf_dyn
43699 #undef elf_addr_t
43700 #define elfhdr elf32_hdr
43701 #define elf_phdr elf32_phdr
43702 #define elf_shdr elf32_shdr
43703 #define elf_note elf32_note
43704 +#define elf_dyn Elf32_Dyn
43705 #define elf_addr_t Elf32_Addr
43706
43707 /*
43708 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43709 index a26bea1..ae23e72 100644
43710 --- a/fs/compat_ioctl.c
43711 +++ b/fs/compat_ioctl.c
43712 @@ -211,6 +211,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43713
43714 err = get_user(palp, &up->palette);
43715 err |= get_user(length, &up->length);
43716 + if (err)
43717 + return -EFAULT;
43718
43719 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43720 err = put_user(compat_ptr(palp), &up_native->palette);
43721 @@ -622,7 +624,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43722 return -EFAULT;
43723 if (__get_user(udata, &ss32->iomem_base))
43724 return -EFAULT;
43725 - ss.iomem_base = compat_ptr(udata);
43726 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43727 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43728 __get_user(ss.port_high, &ss32->port_high))
43729 return -EFAULT;
43730 @@ -797,7 +799,7 @@ static int compat_ioctl_preallocate(struct file *file,
43731 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43732 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43733 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43734 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43735 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43736 return -EFAULT;
43737
43738 return ioctl_preallocate(file, p);
43739 @@ -1611,8 +1613,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43740 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43741 {
43742 unsigned int a, b;
43743 - a = *(unsigned int *)p;
43744 - b = *(unsigned int *)q;
43745 + a = *(const unsigned int *)p;
43746 + b = *(const unsigned int *)q;
43747 if (a > b)
43748 return 1;
43749 if (a < b)
43750 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43751 index 5ddd7eb..c18bf04 100644
43752 --- a/fs/configfs/dir.c
43753 +++ b/fs/configfs/dir.c
43754 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43755 }
43756 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43757 struct configfs_dirent *next;
43758 - const char * name;
43759 + const unsigned char * name;
43760 + char d_name[sizeof(next->s_dentry->d_iname)];
43761 int len;
43762 struct inode *inode = NULL;
43763
43764 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43765 continue;
43766
43767 name = configfs_get_name(next);
43768 - len = strlen(name);
43769 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43770 + len = next->s_dentry->d_name.len;
43771 + memcpy(d_name, name, len);
43772 + name = d_name;
43773 + } else
43774 + len = strlen(name);
43775
43776 /*
43777 * We'll have a dentry and an inode for
43778 diff --git a/fs/dcache.c b/fs/dcache.c
43779 index 2576d14..0cec38d 100644
43780 --- a/fs/dcache.c
43781 +++ b/fs/dcache.c
43782 @@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
43783 static struct hlist_bl_head *dentry_hashtable __read_mostly;
43784
43785 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
43786 - unsigned long hash)
43787 + unsigned int hash)
43788 {
43789 - hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
43790 - hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
43791 + hash += (unsigned long) parent / L1_CACHE_BYTES;
43792 + hash = hash + (hash >> D_HASHBITS);
43793 return dentry_hashtable + (hash & D_HASHMASK);
43794 }
43795
43796 @@ -3067,7 +3067,7 @@ void __init vfs_caches_init(unsigned long mempages)
43797 mempages -= reserve;
43798
43799 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43800 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43801 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43802
43803 dcache_init();
43804 inode_init();
43805 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43806 index 956d5dd..e755e04 100644
43807 --- a/fs/debugfs/inode.c
43808 +++ b/fs/debugfs/inode.c
43809 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43810 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43811 {
43812 return debugfs_create_file(name,
43813 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43814 + S_IFDIR | S_IRWXU,
43815 +#else
43816 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43817 +#endif
43818 parent, NULL, NULL);
43819 }
43820 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43821 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43822 index ab35b11..b30af66 100644
43823 --- a/fs/ecryptfs/inode.c
43824 +++ b/fs/ecryptfs/inode.c
43825 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43826 old_fs = get_fs();
43827 set_fs(get_ds());
43828 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43829 - (char __user *)lower_buf,
43830 + (char __force_user *)lower_buf,
43831 lower_bufsiz);
43832 set_fs(old_fs);
43833 if (rc < 0)
43834 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43835 }
43836 old_fs = get_fs();
43837 set_fs(get_ds());
43838 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43839 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43840 set_fs(old_fs);
43841 if (rc < 0) {
43842 kfree(buf);
43843 @@ -733,7 +733,7 @@ out:
43844 static void
43845 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43846 {
43847 - char *buf = nd_get_link(nd);
43848 + const char *buf = nd_get_link(nd);
43849 if (!IS_ERR(buf)) {
43850 /* Free the char* */
43851 kfree(buf);
43852 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43853 index 3a06f40..f7af544 100644
43854 --- a/fs/ecryptfs/miscdev.c
43855 +++ b/fs/ecryptfs/miscdev.c
43856 @@ -345,7 +345,7 @@ check_list:
43857 goto out_unlock_msg_ctx;
43858 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43859 if (msg_ctx->msg) {
43860 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43861 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43862 goto out_unlock_msg_ctx;
43863 i += packet_length_size;
43864 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43865 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43866 index b2a34a1..162fa69 100644
43867 --- a/fs/ecryptfs/read_write.c
43868 +++ b/fs/ecryptfs/read_write.c
43869 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43870 return -EIO;
43871 fs_save = get_fs();
43872 set_fs(get_ds());
43873 - rc = vfs_write(lower_file, data, size, &offset);
43874 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43875 set_fs(fs_save);
43876 mark_inode_dirty_sync(ecryptfs_inode);
43877 return rc;
43878 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43879 return -EIO;
43880 fs_save = get_fs();
43881 set_fs(get_ds());
43882 - rc = vfs_read(lower_file, data, size, &offset);
43883 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43884 set_fs(fs_save);
43885 return rc;
43886 }
43887 diff --git a/fs/exec.c b/fs/exec.c
43888 index ae42277..32c9035 100644
43889 --- a/fs/exec.c
43890 +++ b/fs/exec.c
43891 @@ -55,6 +55,13 @@
43892 #include <linux/pipe_fs_i.h>
43893 #include <linux/oom.h>
43894 #include <linux/compat.h>
43895 +#include <linux/random.h>
43896 +#include <linux/seq_file.h>
43897 +
43898 +#ifdef CONFIG_PAX_REFCOUNT
43899 +#include <linux/kallsyms.h>
43900 +#include <linux/kdebug.h>
43901 +#endif
43902
43903 #include <asm/uaccess.h>
43904 #include <asm/mmu_context.h>
43905 @@ -63,6 +70,15 @@
43906 #include <trace/events/task.h>
43907 #include "internal.h"
43908
43909 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
43910 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
43911 +#endif
43912 +
43913 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43914 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43915 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43916 +#endif
43917 +
43918 int core_uses_pid;
43919 char core_pattern[CORENAME_MAX_SIZE] = "core";
43920 unsigned int core_pipe_limit;
43921 @@ -72,7 +88,7 @@ struct core_name {
43922 char *corename;
43923 int used, size;
43924 };
43925 -static atomic_t call_count = ATOMIC_INIT(1);
43926 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43927
43928 /* The maximal length of core_pattern is also specified in sysctl.c */
43929
43930 @@ -190,18 +206,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43931 int write)
43932 {
43933 struct page *page;
43934 - int ret;
43935
43936 -#ifdef CONFIG_STACK_GROWSUP
43937 - if (write) {
43938 - ret = expand_downwards(bprm->vma, pos);
43939 - if (ret < 0)
43940 - return NULL;
43941 - }
43942 -#endif
43943 - ret = get_user_pages(current, bprm->mm, pos,
43944 - 1, write, 1, &page, NULL);
43945 - if (ret <= 0)
43946 + if (0 > expand_downwards(bprm->vma, pos))
43947 + return NULL;
43948 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43949 return NULL;
43950
43951 if (write) {
43952 @@ -217,6 +225,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43953 if (size <= ARG_MAX)
43954 return page;
43955
43956 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43957 + // only allow 512KB for argv+env on suid/sgid binaries
43958 + // to prevent easy ASLR exhaustion
43959 + if (((bprm->cred->euid != current_euid()) ||
43960 + (bprm->cred->egid != current_egid())) &&
43961 + (size > (512 * 1024))) {
43962 + put_page(page);
43963 + return NULL;
43964 + }
43965 +#endif
43966 +
43967 /*
43968 * Limit to 1/4-th the stack size for the argv+env strings.
43969 * This ensures that:
43970 @@ -276,6 +295,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43971 vma->vm_end = STACK_TOP_MAX;
43972 vma->vm_start = vma->vm_end - PAGE_SIZE;
43973 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43974 +
43975 +#ifdef CONFIG_PAX_SEGMEXEC
43976 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43977 +#endif
43978 +
43979 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43980 INIT_LIST_HEAD(&vma->anon_vma_chain);
43981
43982 @@ -290,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43983 mm->stack_vm = mm->total_vm = 1;
43984 up_write(&mm->mmap_sem);
43985 bprm->p = vma->vm_end - sizeof(void *);
43986 +
43987 +#ifdef CONFIG_PAX_RANDUSTACK
43988 + if (randomize_va_space)
43989 + bprm->p ^= random32() & ~PAGE_MASK;
43990 +#endif
43991 +
43992 return 0;
43993 err:
43994 up_write(&mm->mmap_sem);
43995 @@ -398,19 +428,7 @@ err:
43996 return err;
43997 }
43998
43999 -struct user_arg_ptr {
44000 -#ifdef CONFIG_COMPAT
44001 - bool is_compat;
44002 -#endif
44003 - union {
44004 - const char __user *const __user *native;
44005 -#ifdef CONFIG_COMPAT
44006 - compat_uptr_t __user *compat;
44007 -#endif
44008 - } ptr;
44009 -};
44010 -
44011 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44012 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44013 {
44014 const char __user *native;
44015
44016 @@ -419,14 +437,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44017 compat_uptr_t compat;
44018
44019 if (get_user(compat, argv.ptr.compat + nr))
44020 - return ERR_PTR(-EFAULT);
44021 + return (const char __force_user *)ERR_PTR(-EFAULT);
44022
44023 return compat_ptr(compat);
44024 }
44025 #endif
44026
44027 if (get_user(native, argv.ptr.native + nr))
44028 - return ERR_PTR(-EFAULT);
44029 + return (const char __force_user *)ERR_PTR(-EFAULT);
44030
44031 return native;
44032 }
44033 @@ -445,7 +463,7 @@ static int count(struct user_arg_ptr argv, int max)
44034 if (!p)
44035 break;
44036
44037 - if (IS_ERR(p))
44038 + if (IS_ERR((const char __force_kernel *)p))
44039 return -EFAULT;
44040
44041 if (i++ >= max)
44042 @@ -479,7 +497,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
44043
44044 ret = -EFAULT;
44045 str = get_user_arg_ptr(argv, argc);
44046 - if (IS_ERR(str))
44047 + if (IS_ERR((const char __force_kernel *)str))
44048 goto out;
44049
44050 len = strnlen_user(str, MAX_ARG_STRLEN);
44051 @@ -561,7 +579,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
44052 int r;
44053 mm_segment_t oldfs = get_fs();
44054 struct user_arg_ptr argv = {
44055 - .ptr.native = (const char __user *const __user *)__argv,
44056 + .ptr.native = (const char __force_user *const __force_user *)__argv,
44057 };
44058
44059 set_fs(KERNEL_DS);
44060 @@ -596,7 +614,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44061 unsigned long new_end = old_end - shift;
44062 struct mmu_gather tlb;
44063
44064 - BUG_ON(new_start > new_end);
44065 + if (new_start >= new_end || new_start < mmap_min_addr)
44066 + return -ENOMEM;
44067
44068 /*
44069 * ensure there are no vmas between where we want to go
44070 @@ -605,6 +624,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44071 if (vma != find_vma(mm, new_start))
44072 return -EFAULT;
44073
44074 +#ifdef CONFIG_PAX_SEGMEXEC
44075 + BUG_ON(pax_find_mirror_vma(vma));
44076 +#endif
44077 +
44078 /*
44079 * cover the whole range: [new_start, old_end)
44080 */
44081 @@ -685,10 +708,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44082 stack_top = arch_align_stack(stack_top);
44083 stack_top = PAGE_ALIGN(stack_top);
44084
44085 - if (unlikely(stack_top < mmap_min_addr) ||
44086 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44087 - return -ENOMEM;
44088 -
44089 stack_shift = vma->vm_end - stack_top;
44090
44091 bprm->p -= stack_shift;
44092 @@ -700,8 +719,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
44093 bprm->exec -= stack_shift;
44094
44095 down_write(&mm->mmap_sem);
44096 +
44097 + /* Move stack pages down in memory. */
44098 + if (stack_shift) {
44099 + ret = shift_arg_pages(vma, stack_shift);
44100 + if (ret)
44101 + goto out_unlock;
44102 + }
44103 +
44104 vm_flags = VM_STACK_FLAGS;
44105
44106 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44107 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44108 + vm_flags &= ~VM_EXEC;
44109 +
44110 +#ifdef CONFIG_PAX_MPROTECT
44111 + if (mm->pax_flags & MF_PAX_MPROTECT)
44112 + vm_flags &= ~VM_MAYEXEC;
44113 +#endif
44114 +
44115 + }
44116 +#endif
44117 +
44118 /*
44119 * Adjust stack execute permissions; explicitly enable for
44120 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
44121 @@ -720,13 +759,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44122 goto out_unlock;
44123 BUG_ON(prev != vma);
44124
44125 - /* Move stack pages down in memory. */
44126 - if (stack_shift) {
44127 - ret = shift_arg_pages(vma, stack_shift);
44128 - if (ret)
44129 - goto out_unlock;
44130 - }
44131 -
44132 /* mprotect_fixup is overkill to remove the temporary stack flags */
44133 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
44134
44135 @@ -807,7 +839,7 @@ int kernel_read(struct file *file, loff_t offset,
44136 old_fs = get_fs();
44137 set_fs(get_ds());
44138 /* The cast to a user pointer is valid due to the set_fs() */
44139 - result = vfs_read(file, (void __user *)addr, count, &pos);
44140 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
44141 set_fs(old_fs);
44142 return result;
44143 }
44144 @@ -1255,7 +1287,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
44145 }
44146 rcu_read_unlock();
44147
44148 - if (p->fs->users > n_fs) {
44149 + if (atomic_read(&p->fs->users) > n_fs) {
44150 bprm->unsafe |= LSM_UNSAFE_SHARE;
44151 } else {
44152 res = -EAGAIN;
44153 @@ -1450,6 +1482,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
44154
44155 EXPORT_SYMBOL(search_binary_handler);
44156
44157 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44158 +static DEFINE_PER_CPU(u64, exec_counter);
44159 +static int __init init_exec_counters(void)
44160 +{
44161 + unsigned int cpu;
44162 +
44163 + for_each_possible_cpu(cpu) {
44164 + per_cpu(exec_counter, cpu) = (u64)cpu;
44165 + }
44166 +
44167 + return 0;
44168 +}
44169 +early_initcall(init_exec_counters);
44170 +static inline void increment_exec_counter(void)
44171 +{
44172 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
44173 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44174 +}
44175 +#else
44176 +static inline void increment_exec_counter(void) {}
44177 +#endif
44178 +
44179 /*
44180 * sys_execve() executes a new program.
44181 */
44182 @@ -1458,6 +1512,11 @@ static int do_execve_common(const char *filename,
44183 struct user_arg_ptr envp,
44184 struct pt_regs *regs)
44185 {
44186 +#ifdef CONFIG_GRKERNSEC
44187 + struct file *old_exec_file;
44188 + struct acl_subject_label *old_acl;
44189 + struct rlimit old_rlim[RLIM_NLIMITS];
44190 +#endif
44191 struct linux_binprm *bprm;
44192 struct file *file;
44193 struct files_struct *displaced;
44194 @@ -1465,6 +1524,8 @@ static int do_execve_common(const char *filename,
44195 int retval;
44196 const struct cred *cred = current_cred();
44197
44198 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44199 +
44200 /*
44201 * We move the actual failure in case of RLIMIT_NPROC excess from
44202 * set*uid() to execve() because too many poorly written programs
44203 @@ -1505,12 +1566,27 @@ static int do_execve_common(const char *filename,
44204 if (IS_ERR(file))
44205 goto out_unmark;
44206
44207 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
44208 + retval = -EPERM;
44209 + goto out_file;
44210 + }
44211 +
44212 sched_exec();
44213
44214 bprm->file = file;
44215 bprm->filename = filename;
44216 bprm->interp = filename;
44217
44218 + if (gr_process_user_ban()) {
44219 + retval = -EPERM;
44220 + goto out_file;
44221 + }
44222 +
44223 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44224 + retval = -EACCES;
44225 + goto out_file;
44226 + }
44227 +
44228 retval = bprm_mm_init(bprm);
44229 if (retval)
44230 goto out_file;
44231 @@ -1527,24 +1603,65 @@ static int do_execve_common(const char *filename,
44232 if (retval < 0)
44233 goto out;
44234
44235 +#ifdef CONFIG_GRKERNSEC
44236 + old_acl = current->acl;
44237 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44238 + old_exec_file = current->exec_file;
44239 + get_file(file);
44240 + current->exec_file = file;
44241 +#endif
44242 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44243 + /* limit suid stack to 8MB
44244 + we saved the old limits above and will restore them if this exec fails
44245 + */
44246 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
44247 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
44248 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
44249 +#endif
44250 +
44251 + if (!gr_tpe_allow(file)) {
44252 + retval = -EACCES;
44253 + goto out_fail;
44254 + }
44255 +
44256 + if (gr_check_crash_exec(file)) {
44257 + retval = -EACCES;
44258 + goto out_fail;
44259 + }
44260 +
44261 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44262 + bprm->unsafe);
44263 + if (retval < 0)
44264 + goto out_fail;
44265 +
44266 retval = copy_strings_kernel(1, &bprm->filename, bprm);
44267 if (retval < 0)
44268 - goto out;
44269 + goto out_fail;
44270
44271 bprm->exec = bprm->p;
44272 retval = copy_strings(bprm->envc, envp, bprm);
44273 if (retval < 0)
44274 - goto out;
44275 + goto out_fail;
44276
44277 retval = copy_strings(bprm->argc, argv, bprm);
44278 if (retval < 0)
44279 - goto out;
44280 + goto out_fail;
44281 +
44282 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44283 +
44284 + gr_handle_exec_args(bprm, argv);
44285
44286 retval = search_binary_handler(bprm,regs);
44287 if (retval < 0)
44288 - goto out;
44289 + goto out_fail;
44290 +#ifdef CONFIG_GRKERNSEC
44291 + if (old_exec_file)
44292 + fput(old_exec_file);
44293 +#endif
44294
44295 /* execve succeeded */
44296 +
44297 + increment_exec_counter();
44298 current->fs->in_exec = 0;
44299 current->in_execve = 0;
44300 acct_update_integrals(current);
44301 @@ -1553,6 +1670,14 @@ static int do_execve_common(const char *filename,
44302 put_files_struct(displaced);
44303 return retval;
44304
44305 +out_fail:
44306 +#ifdef CONFIG_GRKERNSEC
44307 + current->acl = old_acl;
44308 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44309 + fput(current->exec_file);
44310 + current->exec_file = old_exec_file;
44311 +#endif
44312 +
44313 out:
44314 if (bprm->mm) {
44315 acct_arg_size(bprm, 0);
44316 @@ -1626,7 +1751,7 @@ static int expand_corename(struct core_name *cn)
44317 {
44318 char *old_corename = cn->corename;
44319
44320 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44321 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44322 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44323
44324 if (!cn->corename) {
44325 @@ -1723,7 +1848,7 @@ static int format_corename(struct core_name *cn, long signr)
44326 int pid_in_pattern = 0;
44327 int err = 0;
44328
44329 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44330 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44331 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44332 cn->used = 0;
44333
44334 @@ -1820,6 +1945,228 @@ out:
44335 return ispipe;
44336 }
44337
44338 +int pax_check_flags(unsigned long *flags)
44339 +{
44340 + int retval = 0;
44341 +
44342 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44343 + if (*flags & MF_PAX_SEGMEXEC)
44344 + {
44345 + *flags &= ~MF_PAX_SEGMEXEC;
44346 + retval = -EINVAL;
44347 + }
44348 +#endif
44349 +
44350 + if ((*flags & MF_PAX_PAGEEXEC)
44351 +
44352 +#ifdef CONFIG_PAX_PAGEEXEC
44353 + && (*flags & MF_PAX_SEGMEXEC)
44354 +#endif
44355 +
44356 + )
44357 + {
44358 + *flags &= ~MF_PAX_PAGEEXEC;
44359 + retval = -EINVAL;
44360 + }
44361 +
44362 + if ((*flags & MF_PAX_MPROTECT)
44363 +
44364 +#ifdef CONFIG_PAX_MPROTECT
44365 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44366 +#endif
44367 +
44368 + )
44369 + {
44370 + *flags &= ~MF_PAX_MPROTECT;
44371 + retval = -EINVAL;
44372 + }
44373 +
44374 + if ((*flags & MF_PAX_EMUTRAMP)
44375 +
44376 +#ifdef CONFIG_PAX_EMUTRAMP
44377 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44378 +#endif
44379 +
44380 + )
44381 + {
44382 + *flags &= ~MF_PAX_EMUTRAMP;
44383 + retval = -EINVAL;
44384 + }
44385 +
44386 + return retval;
44387 +}
44388 +
44389 +EXPORT_SYMBOL(pax_check_flags);
44390 +
44391 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44392 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44393 +{
44394 + struct task_struct *tsk = current;
44395 + struct mm_struct *mm = current->mm;
44396 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44397 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44398 + char *path_exec = NULL;
44399 + char *path_fault = NULL;
44400 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
44401 +
44402 + if (buffer_exec && buffer_fault) {
44403 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44404 +
44405 + down_read(&mm->mmap_sem);
44406 + vma = mm->mmap;
44407 + while (vma && (!vma_exec || !vma_fault)) {
44408 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44409 + vma_exec = vma;
44410 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44411 + vma_fault = vma;
44412 + vma = vma->vm_next;
44413 + }
44414 + if (vma_exec) {
44415 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44416 + if (IS_ERR(path_exec))
44417 + path_exec = "<path too long>";
44418 + else {
44419 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44420 + if (path_exec) {
44421 + *path_exec = 0;
44422 + path_exec = buffer_exec;
44423 + } else
44424 + path_exec = "<path too long>";
44425 + }
44426 + }
44427 + if (vma_fault) {
44428 + start = vma_fault->vm_start;
44429 + end = vma_fault->vm_end;
44430 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44431 + if (vma_fault->vm_file) {
44432 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44433 + if (IS_ERR(path_fault))
44434 + path_fault = "<path too long>";
44435 + else {
44436 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44437 + if (path_fault) {
44438 + *path_fault = 0;
44439 + path_fault = buffer_fault;
44440 + } else
44441 + path_fault = "<path too long>";
44442 + }
44443 + } else
44444 + path_fault = "<anonymous mapping>";
44445 + }
44446 + up_read(&mm->mmap_sem);
44447 + }
44448 + if (tsk->signal->curr_ip)
44449 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44450 + else
44451 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44452 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44453 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44454 + task_uid(tsk), task_euid(tsk), pc, sp);
44455 + free_page((unsigned long)buffer_exec);
44456 + free_page((unsigned long)buffer_fault);
44457 + pax_report_insns(regs, pc, sp);
44458 + do_coredump(SIGKILL, SIGKILL, regs);
44459 +}
44460 +#endif
44461 +
44462 +#ifdef CONFIG_PAX_REFCOUNT
44463 +void pax_report_refcount_overflow(struct pt_regs *regs)
44464 +{
44465 + if (current->signal->curr_ip)
44466 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44467 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44468 + else
44469 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44470 + current->comm, task_pid_nr(current), current_uid(), current_euid());
44471 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44472 + show_regs(regs);
44473 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44474 +}
44475 +#endif
44476 +
44477 +#ifdef CONFIG_PAX_USERCOPY
44478 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44479 +int object_is_on_stack(const void *obj, unsigned long len)
44480 +{
44481 + const void * const stack = task_stack_page(current);
44482 + const void * const stackend = stack + THREAD_SIZE;
44483 +
44484 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44485 + const void *frame = NULL;
44486 + const void *oldframe;
44487 +#endif
44488 +
44489 + if (obj + len < obj)
44490 + return -1;
44491 +
44492 + if (obj + len <= stack || stackend <= obj)
44493 + return 0;
44494 +
44495 + if (obj < stack || stackend < obj + len)
44496 + return -1;
44497 +
44498 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44499 + oldframe = __builtin_frame_address(1);
44500 + if (oldframe)
44501 + frame = __builtin_frame_address(2);
44502 + /*
44503 + low ----------------------------------------------> high
44504 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
44505 + ^----------------^
44506 + allow copies only within here
44507 + */
44508 + while (stack <= frame && frame < stackend) {
44509 + /* if obj + len extends past the last frame, this
44510 + check won't pass and the next frame will be 0,
44511 + causing us to bail out and correctly report
44512 + the copy as invalid
44513 + */
44514 + if (obj + len <= frame)
44515 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44516 + oldframe = frame;
44517 + frame = *(const void * const *)frame;
44518 + }
44519 + return -1;
44520 +#else
44521 + return 1;
44522 +#endif
44523 +}
44524 +
44525 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44526 +{
44527 + if (current->signal->curr_ip)
44528 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44529 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44530 + else
44531 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44532 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44533 + dump_stack();
44534 + gr_handle_kernel_exploit();
44535 + do_group_exit(SIGKILL);
44536 +}
44537 +#endif
44538 +
44539 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44540 +void pax_track_stack(void)
44541 +{
44542 + unsigned long sp = (unsigned long)&sp;
44543 + if (sp < current_thread_info()->lowest_stack &&
44544 + sp > (unsigned long)task_stack_page(current))
44545 + current_thread_info()->lowest_stack = sp;
44546 +}
44547 +EXPORT_SYMBOL(pax_track_stack);
44548 +#endif
44549 +
44550 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
44551 +void report_size_overflow(const char *file, unsigned int line, const char *func)
44552 +{
44553 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44554 + dump_stack();
44555 + do_group_exit(SIGKILL);
44556 +}
44557 +EXPORT_SYMBOL(report_size_overflow);
44558 +#endif
44559 +
44560 static int zap_process(struct task_struct *start, int exit_code)
44561 {
44562 struct task_struct *t;
44563 @@ -2017,17 +2364,17 @@ static void wait_for_dump_helpers(struct file *file)
44564 pipe = file->f_path.dentry->d_inode->i_pipe;
44565
44566 pipe_lock(pipe);
44567 - pipe->readers++;
44568 - pipe->writers--;
44569 + atomic_inc(&pipe->readers);
44570 + atomic_dec(&pipe->writers);
44571
44572 - while ((pipe->readers > 1) && (!signal_pending(current))) {
44573 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44574 wake_up_interruptible_sync(&pipe->wait);
44575 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44576 pipe_wait(pipe);
44577 }
44578
44579 - pipe->readers--;
44580 - pipe->writers++;
44581 + atomic_dec(&pipe->readers);
44582 + atomic_inc(&pipe->writers);
44583 pipe_unlock(pipe);
44584
44585 }
44586 @@ -2088,7 +2435,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44587 int retval = 0;
44588 int flag = 0;
44589 int ispipe;
44590 - static atomic_t core_dump_count = ATOMIC_INIT(0);
44591 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44592 struct coredump_params cprm = {
44593 .signr = signr,
44594 .regs = regs,
44595 @@ -2103,6 +2450,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44596
44597 audit_core_dumps(signr);
44598
44599 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44600 + gr_handle_brute_attach(current, cprm.mm_flags);
44601 +
44602 binfmt = mm->binfmt;
44603 if (!binfmt || !binfmt->core_dump)
44604 goto fail;
44605 @@ -2170,7 +2520,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44606 }
44607 cprm.limit = RLIM_INFINITY;
44608
44609 - dump_count = atomic_inc_return(&core_dump_count);
44610 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
44611 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44612 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44613 task_tgid_vnr(current), current->comm);
44614 @@ -2197,6 +2547,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44615 } else {
44616 struct inode *inode;
44617
44618 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44619 +
44620 if (cprm.limit < binfmt->min_coredump)
44621 goto fail_unlock;
44622
44623 @@ -2240,7 +2592,7 @@ close_fail:
44624 filp_close(cprm.file, NULL);
44625 fail_dropcount:
44626 if (ispipe)
44627 - atomic_dec(&core_dump_count);
44628 + atomic_dec_unchecked(&core_dump_count);
44629 fail_unlock:
44630 kfree(cn.corename);
44631 fail_corename:
44632 @@ -2259,7 +2611,7 @@ fail:
44633 */
44634 int dump_write(struct file *file, const void *addr, int nr)
44635 {
44636 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44637 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44638 }
44639 EXPORT_SYMBOL(dump_write);
44640
44641 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44642 index a8cbe1b..fed04cb 100644
44643 --- a/fs/ext2/balloc.c
44644 +++ b/fs/ext2/balloc.c
44645 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44646
44647 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44648 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44649 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44650 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44651 sbi->s_resuid != current_fsuid() &&
44652 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44653 return 0;
44654 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44655 index a203892..4e64db5 100644
44656 --- a/fs/ext3/balloc.c
44657 +++ b/fs/ext3/balloc.c
44658 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44659
44660 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44661 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44662 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44663 + if (free_blocks < root_blocks + 1 &&
44664 !use_reservation && sbi->s_resuid != current_fsuid() &&
44665 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44666 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44667 + !capable_nolog(CAP_SYS_RESOURCE)) {
44668 return 0;
44669 }
44670 return 1;
44671 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44672 index f9e2cd8..bfdc476 100644
44673 --- a/fs/ext4/balloc.c
44674 +++ b/fs/ext4/balloc.c
44675 @@ -438,8 +438,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44676 /* Hm, nope. Are (enough) root reserved clusters available? */
44677 if (sbi->s_resuid == current_fsuid() ||
44678 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44679 - capable(CAP_SYS_RESOURCE) ||
44680 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44681 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44682 + capable_nolog(CAP_SYS_RESOURCE)) {
44683
44684 if (free_clusters >= (nclusters + dirty_clusters))
44685 return 1;
44686 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44687 index 9983ba8..2a5272c 100644
44688 --- a/fs/ext4/ext4.h
44689 +++ b/fs/ext4/ext4.h
44690 @@ -1217,19 +1217,19 @@ struct ext4_sb_info {
44691 unsigned long s_mb_last_start;
44692
44693 /* stats for buddy allocator */
44694 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44695 - atomic_t s_bal_success; /* we found long enough chunks */
44696 - atomic_t s_bal_allocated; /* in blocks */
44697 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44698 - atomic_t s_bal_goals; /* goal hits */
44699 - atomic_t s_bal_breaks; /* too long searches */
44700 - atomic_t s_bal_2orders; /* 2^order hits */
44701 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44702 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44703 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44704 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44705 + atomic_unchecked_t s_bal_goals; /* goal hits */
44706 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44707 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44708 spinlock_t s_bal_lock;
44709 unsigned long s_mb_buddies_generated;
44710 unsigned long long s_mb_generation_time;
44711 - atomic_t s_mb_lost_chunks;
44712 - atomic_t s_mb_preallocated;
44713 - atomic_t s_mb_discarded;
44714 + atomic_unchecked_t s_mb_lost_chunks;
44715 + atomic_unchecked_t s_mb_preallocated;
44716 + atomic_unchecked_t s_mb_discarded;
44717 atomic_t s_lock_busy;
44718
44719 /* locality groups */
44720 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44721 index cb990b2..4820141 100644
44722 --- a/fs/ext4/mballoc.c
44723 +++ b/fs/ext4/mballoc.c
44724 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44725 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44726
44727 if (EXT4_SB(sb)->s_mb_stats)
44728 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44729 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44730
44731 break;
44732 }
44733 @@ -2088,7 +2088,7 @@ repeat:
44734 ac->ac_status = AC_STATUS_CONTINUE;
44735 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44736 cr = 3;
44737 - atomic_inc(&sbi->s_mb_lost_chunks);
44738 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44739 goto repeat;
44740 }
44741 }
44742 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
44743 if (sbi->s_mb_stats) {
44744 ext4_msg(sb, KERN_INFO,
44745 "mballoc: %u blocks %u reqs (%u success)",
44746 - atomic_read(&sbi->s_bal_allocated),
44747 - atomic_read(&sbi->s_bal_reqs),
44748 - atomic_read(&sbi->s_bal_success));
44749 + atomic_read_unchecked(&sbi->s_bal_allocated),
44750 + atomic_read_unchecked(&sbi->s_bal_reqs),
44751 + atomic_read_unchecked(&sbi->s_bal_success));
44752 ext4_msg(sb, KERN_INFO,
44753 "mballoc: %u extents scanned, %u goal hits, "
44754 "%u 2^N hits, %u breaks, %u lost",
44755 - atomic_read(&sbi->s_bal_ex_scanned),
44756 - atomic_read(&sbi->s_bal_goals),
44757 - atomic_read(&sbi->s_bal_2orders),
44758 - atomic_read(&sbi->s_bal_breaks),
44759 - atomic_read(&sbi->s_mb_lost_chunks));
44760 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44761 + atomic_read_unchecked(&sbi->s_bal_goals),
44762 + atomic_read_unchecked(&sbi->s_bal_2orders),
44763 + atomic_read_unchecked(&sbi->s_bal_breaks),
44764 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44765 ext4_msg(sb, KERN_INFO,
44766 "mballoc: %lu generated and it took %Lu",
44767 sbi->s_mb_buddies_generated,
44768 sbi->s_mb_generation_time);
44769 ext4_msg(sb, KERN_INFO,
44770 "mballoc: %u preallocated, %u discarded",
44771 - atomic_read(&sbi->s_mb_preallocated),
44772 - atomic_read(&sbi->s_mb_discarded));
44773 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44774 + atomic_read_unchecked(&sbi->s_mb_discarded));
44775 }
44776
44777 free_percpu(sbi->s_locality_groups);
44778 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44779 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44780
44781 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44782 - atomic_inc(&sbi->s_bal_reqs);
44783 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44784 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44785 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44786 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44787 - atomic_inc(&sbi->s_bal_success);
44788 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44789 + atomic_inc_unchecked(&sbi->s_bal_success);
44790 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44791 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44792 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44793 - atomic_inc(&sbi->s_bal_goals);
44794 + atomic_inc_unchecked(&sbi->s_bal_goals);
44795 if (ac->ac_found > sbi->s_mb_max_to_scan)
44796 - atomic_inc(&sbi->s_bal_breaks);
44797 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44798 }
44799
44800 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44801 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44802 trace_ext4_mb_new_inode_pa(ac, pa);
44803
44804 ext4_mb_use_inode_pa(ac, pa);
44805 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44806 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44807
44808 ei = EXT4_I(ac->ac_inode);
44809 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44810 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44811 trace_ext4_mb_new_group_pa(ac, pa);
44812
44813 ext4_mb_use_group_pa(ac, pa);
44814 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44815 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44816
44817 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44818 lg = ac->ac_lg;
44819 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44820 * from the bitmap and continue.
44821 */
44822 }
44823 - atomic_add(free, &sbi->s_mb_discarded);
44824 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44825
44826 return err;
44827 }
44828 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44829 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44830 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44831 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44832 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44833 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44834 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44835
44836 return 0;
44837 diff --git a/fs/fcntl.c b/fs/fcntl.c
44838 index 22764c7..86372c9 100644
44839 --- a/fs/fcntl.c
44840 +++ b/fs/fcntl.c
44841 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44842 if (err)
44843 return err;
44844
44845 + if (gr_handle_chroot_fowner(pid, type))
44846 + return -ENOENT;
44847 + if (gr_check_protected_task_fowner(pid, type))
44848 + return -EACCES;
44849 +
44850 f_modown(filp, pid, type, force);
44851 return 0;
44852 }
44853 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44854
44855 static int f_setown_ex(struct file *filp, unsigned long arg)
44856 {
44857 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44858 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44859 struct f_owner_ex owner;
44860 struct pid *pid;
44861 int type;
44862 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44863
44864 static int f_getown_ex(struct file *filp, unsigned long arg)
44865 {
44866 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44867 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44868 struct f_owner_ex owner;
44869 int ret = 0;
44870
44871 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44872 switch (cmd) {
44873 case F_DUPFD:
44874 case F_DUPFD_CLOEXEC:
44875 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44876 if (arg >= rlimit(RLIMIT_NOFILE))
44877 break;
44878 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44879 diff --git a/fs/fifo.c b/fs/fifo.c
44880 index b1a524d..4ee270e 100644
44881 --- a/fs/fifo.c
44882 +++ b/fs/fifo.c
44883 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44884 */
44885 filp->f_op = &read_pipefifo_fops;
44886 pipe->r_counter++;
44887 - if (pipe->readers++ == 0)
44888 + if (atomic_inc_return(&pipe->readers) == 1)
44889 wake_up_partner(inode);
44890
44891 - if (!pipe->writers) {
44892 + if (!atomic_read(&pipe->writers)) {
44893 if ((filp->f_flags & O_NONBLOCK)) {
44894 /* suppress POLLHUP until we have
44895 * seen a writer */
44896 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44897 * errno=ENXIO when there is no process reading the FIFO.
44898 */
44899 ret = -ENXIO;
44900 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44901 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44902 goto err;
44903
44904 filp->f_op = &write_pipefifo_fops;
44905 pipe->w_counter++;
44906 - if (!pipe->writers++)
44907 + if (atomic_inc_return(&pipe->writers) == 1)
44908 wake_up_partner(inode);
44909
44910 - if (!pipe->readers) {
44911 + if (!atomic_read(&pipe->readers)) {
44912 wait_for_partner(inode, &pipe->r_counter);
44913 if (signal_pending(current))
44914 goto err_wr;
44915 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44916 */
44917 filp->f_op = &rdwr_pipefifo_fops;
44918
44919 - pipe->readers++;
44920 - pipe->writers++;
44921 + atomic_inc(&pipe->readers);
44922 + atomic_inc(&pipe->writers);
44923 pipe->r_counter++;
44924 pipe->w_counter++;
44925 - if (pipe->readers == 1 || pipe->writers == 1)
44926 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44927 wake_up_partner(inode);
44928 break;
44929
44930 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44931 return 0;
44932
44933 err_rd:
44934 - if (!--pipe->readers)
44935 + if (atomic_dec_and_test(&pipe->readers))
44936 wake_up_interruptible(&pipe->wait);
44937 ret = -ERESTARTSYS;
44938 goto err;
44939
44940 err_wr:
44941 - if (!--pipe->writers)
44942 + if (atomic_dec_and_test(&pipe->writers))
44943 wake_up_interruptible(&pipe->wait);
44944 ret = -ERESTARTSYS;
44945 goto err;
44946
44947 err:
44948 - if (!pipe->readers && !pipe->writers)
44949 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44950 free_pipe_info(inode);
44951
44952 err_nocleanup:
44953 diff --git a/fs/file.c b/fs/file.c
44954 index 4c6992d..104cdea 100644
44955 --- a/fs/file.c
44956 +++ b/fs/file.c
44957 @@ -15,6 +15,7 @@
44958 #include <linux/slab.h>
44959 #include <linux/vmalloc.h>
44960 #include <linux/file.h>
44961 +#include <linux/security.h>
44962 #include <linux/fdtable.h>
44963 #include <linux/bitops.h>
44964 #include <linux/interrupt.h>
44965 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
44966 * N.B. For clone tasks sharing a files structure, this test
44967 * will limit the total number of files that can be opened.
44968 */
44969 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44970 if (nr >= rlimit(RLIMIT_NOFILE))
44971 return -EMFILE;
44972
44973 diff --git a/fs/filesystems.c b/fs/filesystems.c
44974 index 96f2428..f5eeb8e 100644
44975 --- a/fs/filesystems.c
44976 +++ b/fs/filesystems.c
44977 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44978 int len = dot ? dot - name : strlen(name);
44979
44980 fs = __get_fs_type(name, len);
44981 +
44982 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44983 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44984 +#else
44985 if (!fs && (request_module("%.*s", len, name) == 0))
44986 +#endif
44987 fs = __get_fs_type(name, len);
44988
44989 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44990 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44991 index 78b519c..a8b4979 100644
44992 --- a/fs/fs_struct.c
44993 +++ b/fs/fs_struct.c
44994 @@ -4,6 +4,7 @@
44995 #include <linux/path.h>
44996 #include <linux/slab.h>
44997 #include <linux/fs_struct.h>
44998 +#include <linux/grsecurity.h>
44999 #include "internal.h"
45000
45001 static inline void path_get_longterm(struct path *path)
45002 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45003 old_root = fs->root;
45004 fs->root = *path;
45005 path_get_longterm(path);
45006 + gr_set_chroot_entries(current, path);
45007 write_seqcount_end(&fs->seq);
45008 spin_unlock(&fs->lock);
45009 if (old_root.dentry)
45010 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
45011 && fs->root.mnt == old_root->mnt) {
45012 path_get_longterm(new_root);
45013 fs->root = *new_root;
45014 + gr_set_chroot_entries(p, new_root);
45015 count++;
45016 }
45017 if (fs->pwd.dentry == old_root->dentry
45018 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
45019 spin_lock(&fs->lock);
45020 write_seqcount_begin(&fs->seq);
45021 tsk->fs = NULL;
45022 - kill = !--fs->users;
45023 + gr_clear_chroot_entries(tsk);
45024 + kill = !atomic_dec_return(&fs->users);
45025 write_seqcount_end(&fs->seq);
45026 spin_unlock(&fs->lock);
45027 task_unlock(tsk);
45028 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45029 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
45030 /* We don't need to lock fs - think why ;-) */
45031 if (fs) {
45032 - fs->users = 1;
45033 + atomic_set(&fs->users, 1);
45034 fs->in_exec = 0;
45035 spin_lock_init(&fs->lock);
45036 seqcount_init(&fs->seq);
45037 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45038 spin_lock(&old->lock);
45039 fs->root = old->root;
45040 path_get_longterm(&fs->root);
45041 + /* instead of calling gr_set_chroot_entries here,
45042 + we call it from every caller of this function
45043 + */
45044 fs->pwd = old->pwd;
45045 path_get_longterm(&fs->pwd);
45046 spin_unlock(&old->lock);
45047 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
45048
45049 task_lock(current);
45050 spin_lock(&fs->lock);
45051 - kill = !--fs->users;
45052 + kill = !atomic_dec_return(&fs->users);
45053 current->fs = new_fs;
45054 + gr_set_chroot_entries(current, &new_fs->root);
45055 spin_unlock(&fs->lock);
45056 task_unlock(current);
45057
45058 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
45059
45060 int current_umask(void)
45061 {
45062 - return current->fs->umask;
45063 + return current->fs->umask | gr_acl_umask();
45064 }
45065 EXPORT_SYMBOL(current_umask);
45066
45067 /* to be mentioned only in INIT_TASK */
45068 struct fs_struct init_fs = {
45069 - .users = 1,
45070 + .users = ATOMIC_INIT(1),
45071 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
45072 .seq = SEQCNT_ZERO,
45073 .umask = 0022,
45074 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
45075 task_lock(current);
45076
45077 spin_lock(&init_fs.lock);
45078 - init_fs.users++;
45079 + atomic_inc(&init_fs.users);
45080 spin_unlock(&init_fs.lock);
45081
45082 spin_lock(&fs->lock);
45083 current->fs = &init_fs;
45084 - kill = !--fs->users;
45085 + gr_set_chroot_entries(current, &current->fs->root);
45086 + kill = !atomic_dec_return(&fs->users);
45087 spin_unlock(&fs->lock);
45088
45089 task_unlock(current);
45090 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
45091 index 9905350..02eaec4 100644
45092 --- a/fs/fscache/cookie.c
45093 +++ b/fs/fscache/cookie.c
45094 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
45095 parent ? (char *) parent->def->name : "<no-parent>",
45096 def->name, netfs_data);
45097
45098 - fscache_stat(&fscache_n_acquires);
45099 + fscache_stat_unchecked(&fscache_n_acquires);
45100
45101 /* if there's no parent cookie, then we don't create one here either */
45102 if (!parent) {
45103 - fscache_stat(&fscache_n_acquires_null);
45104 + fscache_stat_unchecked(&fscache_n_acquires_null);
45105 _leave(" [no parent]");
45106 return NULL;
45107 }
45108 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
45109 /* allocate and initialise a cookie */
45110 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45111 if (!cookie) {
45112 - fscache_stat(&fscache_n_acquires_oom);
45113 + fscache_stat_unchecked(&fscache_n_acquires_oom);
45114 _leave(" [ENOMEM]");
45115 return NULL;
45116 }
45117 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45118
45119 switch (cookie->def->type) {
45120 case FSCACHE_COOKIE_TYPE_INDEX:
45121 - fscache_stat(&fscache_n_cookie_index);
45122 + fscache_stat_unchecked(&fscache_n_cookie_index);
45123 break;
45124 case FSCACHE_COOKIE_TYPE_DATAFILE:
45125 - fscache_stat(&fscache_n_cookie_data);
45126 + fscache_stat_unchecked(&fscache_n_cookie_data);
45127 break;
45128 default:
45129 - fscache_stat(&fscache_n_cookie_special);
45130 + fscache_stat_unchecked(&fscache_n_cookie_special);
45131 break;
45132 }
45133
45134 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45135 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45136 atomic_dec(&parent->n_children);
45137 __fscache_cookie_put(cookie);
45138 - fscache_stat(&fscache_n_acquires_nobufs);
45139 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45140 _leave(" = NULL");
45141 return NULL;
45142 }
45143 }
45144
45145 - fscache_stat(&fscache_n_acquires_ok);
45146 + fscache_stat_unchecked(&fscache_n_acquires_ok);
45147 _leave(" = %p", cookie);
45148 return cookie;
45149 }
45150 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45151 cache = fscache_select_cache_for_object(cookie->parent);
45152 if (!cache) {
45153 up_read(&fscache_addremove_sem);
45154 - fscache_stat(&fscache_n_acquires_no_cache);
45155 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45156 _leave(" = -ENOMEDIUM [no cache]");
45157 return -ENOMEDIUM;
45158 }
45159 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45160 object = cache->ops->alloc_object(cache, cookie);
45161 fscache_stat_d(&fscache_n_cop_alloc_object);
45162 if (IS_ERR(object)) {
45163 - fscache_stat(&fscache_n_object_no_alloc);
45164 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
45165 ret = PTR_ERR(object);
45166 goto error;
45167 }
45168
45169 - fscache_stat(&fscache_n_object_alloc);
45170 + fscache_stat_unchecked(&fscache_n_object_alloc);
45171
45172 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45173
45174 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
45175 struct fscache_object *object;
45176 struct hlist_node *_p;
45177
45178 - fscache_stat(&fscache_n_updates);
45179 + fscache_stat_unchecked(&fscache_n_updates);
45180
45181 if (!cookie) {
45182 - fscache_stat(&fscache_n_updates_null);
45183 + fscache_stat_unchecked(&fscache_n_updates_null);
45184 _leave(" [no cookie]");
45185 return;
45186 }
45187 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45188 struct fscache_object *object;
45189 unsigned long event;
45190
45191 - fscache_stat(&fscache_n_relinquishes);
45192 + fscache_stat_unchecked(&fscache_n_relinquishes);
45193 if (retire)
45194 - fscache_stat(&fscache_n_relinquishes_retire);
45195 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45196
45197 if (!cookie) {
45198 - fscache_stat(&fscache_n_relinquishes_null);
45199 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
45200 _leave(" [no cookie]");
45201 return;
45202 }
45203 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45204
45205 /* wait for the cookie to finish being instantiated (or to fail) */
45206 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45207 - fscache_stat(&fscache_n_relinquishes_waitcrt);
45208 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45209 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45210 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45211 }
45212 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45213 index f6aad48..88dcf26 100644
45214 --- a/fs/fscache/internal.h
45215 +++ b/fs/fscache/internal.h
45216 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45217 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45218 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45219
45220 -extern atomic_t fscache_n_op_pend;
45221 -extern atomic_t fscache_n_op_run;
45222 -extern atomic_t fscache_n_op_enqueue;
45223 -extern atomic_t fscache_n_op_deferred_release;
45224 -extern atomic_t fscache_n_op_release;
45225 -extern atomic_t fscache_n_op_gc;
45226 -extern atomic_t fscache_n_op_cancelled;
45227 -extern atomic_t fscache_n_op_rejected;
45228 +extern atomic_unchecked_t fscache_n_op_pend;
45229 +extern atomic_unchecked_t fscache_n_op_run;
45230 +extern atomic_unchecked_t fscache_n_op_enqueue;
45231 +extern atomic_unchecked_t fscache_n_op_deferred_release;
45232 +extern atomic_unchecked_t fscache_n_op_release;
45233 +extern atomic_unchecked_t fscache_n_op_gc;
45234 +extern atomic_unchecked_t fscache_n_op_cancelled;
45235 +extern atomic_unchecked_t fscache_n_op_rejected;
45236
45237 -extern atomic_t fscache_n_attr_changed;
45238 -extern atomic_t fscache_n_attr_changed_ok;
45239 -extern atomic_t fscache_n_attr_changed_nobufs;
45240 -extern atomic_t fscache_n_attr_changed_nomem;
45241 -extern atomic_t fscache_n_attr_changed_calls;
45242 +extern atomic_unchecked_t fscache_n_attr_changed;
45243 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
45244 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45245 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45246 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
45247
45248 -extern atomic_t fscache_n_allocs;
45249 -extern atomic_t fscache_n_allocs_ok;
45250 -extern atomic_t fscache_n_allocs_wait;
45251 -extern atomic_t fscache_n_allocs_nobufs;
45252 -extern atomic_t fscache_n_allocs_intr;
45253 -extern atomic_t fscache_n_allocs_object_dead;
45254 -extern atomic_t fscache_n_alloc_ops;
45255 -extern atomic_t fscache_n_alloc_op_waits;
45256 +extern atomic_unchecked_t fscache_n_allocs;
45257 +extern atomic_unchecked_t fscache_n_allocs_ok;
45258 +extern atomic_unchecked_t fscache_n_allocs_wait;
45259 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
45260 +extern atomic_unchecked_t fscache_n_allocs_intr;
45261 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
45262 +extern atomic_unchecked_t fscache_n_alloc_ops;
45263 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
45264
45265 -extern atomic_t fscache_n_retrievals;
45266 -extern atomic_t fscache_n_retrievals_ok;
45267 -extern atomic_t fscache_n_retrievals_wait;
45268 -extern atomic_t fscache_n_retrievals_nodata;
45269 -extern atomic_t fscache_n_retrievals_nobufs;
45270 -extern atomic_t fscache_n_retrievals_intr;
45271 -extern atomic_t fscache_n_retrievals_nomem;
45272 -extern atomic_t fscache_n_retrievals_object_dead;
45273 -extern atomic_t fscache_n_retrieval_ops;
45274 -extern atomic_t fscache_n_retrieval_op_waits;
45275 +extern atomic_unchecked_t fscache_n_retrievals;
45276 +extern atomic_unchecked_t fscache_n_retrievals_ok;
45277 +extern atomic_unchecked_t fscache_n_retrievals_wait;
45278 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
45279 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45280 +extern atomic_unchecked_t fscache_n_retrievals_intr;
45281 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
45282 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45283 +extern atomic_unchecked_t fscache_n_retrieval_ops;
45284 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45285
45286 -extern atomic_t fscache_n_stores;
45287 -extern atomic_t fscache_n_stores_ok;
45288 -extern atomic_t fscache_n_stores_again;
45289 -extern atomic_t fscache_n_stores_nobufs;
45290 -extern atomic_t fscache_n_stores_oom;
45291 -extern atomic_t fscache_n_store_ops;
45292 -extern atomic_t fscache_n_store_calls;
45293 -extern atomic_t fscache_n_store_pages;
45294 -extern atomic_t fscache_n_store_radix_deletes;
45295 -extern atomic_t fscache_n_store_pages_over_limit;
45296 +extern atomic_unchecked_t fscache_n_stores;
45297 +extern atomic_unchecked_t fscache_n_stores_ok;
45298 +extern atomic_unchecked_t fscache_n_stores_again;
45299 +extern atomic_unchecked_t fscache_n_stores_nobufs;
45300 +extern atomic_unchecked_t fscache_n_stores_oom;
45301 +extern atomic_unchecked_t fscache_n_store_ops;
45302 +extern atomic_unchecked_t fscache_n_store_calls;
45303 +extern atomic_unchecked_t fscache_n_store_pages;
45304 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
45305 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45306
45307 -extern atomic_t fscache_n_store_vmscan_not_storing;
45308 -extern atomic_t fscache_n_store_vmscan_gone;
45309 -extern atomic_t fscache_n_store_vmscan_busy;
45310 -extern atomic_t fscache_n_store_vmscan_cancelled;
45311 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45312 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45313 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45314 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45315
45316 -extern atomic_t fscache_n_marks;
45317 -extern atomic_t fscache_n_uncaches;
45318 +extern atomic_unchecked_t fscache_n_marks;
45319 +extern atomic_unchecked_t fscache_n_uncaches;
45320
45321 -extern atomic_t fscache_n_acquires;
45322 -extern atomic_t fscache_n_acquires_null;
45323 -extern atomic_t fscache_n_acquires_no_cache;
45324 -extern atomic_t fscache_n_acquires_ok;
45325 -extern atomic_t fscache_n_acquires_nobufs;
45326 -extern atomic_t fscache_n_acquires_oom;
45327 +extern atomic_unchecked_t fscache_n_acquires;
45328 +extern atomic_unchecked_t fscache_n_acquires_null;
45329 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
45330 +extern atomic_unchecked_t fscache_n_acquires_ok;
45331 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
45332 +extern atomic_unchecked_t fscache_n_acquires_oom;
45333
45334 -extern atomic_t fscache_n_updates;
45335 -extern atomic_t fscache_n_updates_null;
45336 -extern atomic_t fscache_n_updates_run;
45337 +extern atomic_unchecked_t fscache_n_updates;
45338 +extern atomic_unchecked_t fscache_n_updates_null;
45339 +extern atomic_unchecked_t fscache_n_updates_run;
45340
45341 -extern atomic_t fscache_n_relinquishes;
45342 -extern atomic_t fscache_n_relinquishes_null;
45343 -extern atomic_t fscache_n_relinquishes_waitcrt;
45344 -extern atomic_t fscache_n_relinquishes_retire;
45345 +extern atomic_unchecked_t fscache_n_relinquishes;
45346 +extern atomic_unchecked_t fscache_n_relinquishes_null;
45347 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45348 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
45349
45350 -extern atomic_t fscache_n_cookie_index;
45351 -extern atomic_t fscache_n_cookie_data;
45352 -extern atomic_t fscache_n_cookie_special;
45353 +extern atomic_unchecked_t fscache_n_cookie_index;
45354 +extern atomic_unchecked_t fscache_n_cookie_data;
45355 +extern atomic_unchecked_t fscache_n_cookie_special;
45356
45357 -extern atomic_t fscache_n_object_alloc;
45358 -extern atomic_t fscache_n_object_no_alloc;
45359 -extern atomic_t fscache_n_object_lookups;
45360 -extern atomic_t fscache_n_object_lookups_negative;
45361 -extern atomic_t fscache_n_object_lookups_positive;
45362 -extern atomic_t fscache_n_object_lookups_timed_out;
45363 -extern atomic_t fscache_n_object_created;
45364 -extern atomic_t fscache_n_object_avail;
45365 -extern atomic_t fscache_n_object_dead;
45366 +extern atomic_unchecked_t fscache_n_object_alloc;
45367 +extern atomic_unchecked_t fscache_n_object_no_alloc;
45368 +extern atomic_unchecked_t fscache_n_object_lookups;
45369 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
45370 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
45371 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45372 +extern atomic_unchecked_t fscache_n_object_created;
45373 +extern atomic_unchecked_t fscache_n_object_avail;
45374 +extern atomic_unchecked_t fscache_n_object_dead;
45375
45376 -extern atomic_t fscache_n_checkaux_none;
45377 -extern atomic_t fscache_n_checkaux_okay;
45378 -extern atomic_t fscache_n_checkaux_update;
45379 -extern atomic_t fscache_n_checkaux_obsolete;
45380 +extern atomic_unchecked_t fscache_n_checkaux_none;
45381 +extern atomic_unchecked_t fscache_n_checkaux_okay;
45382 +extern atomic_unchecked_t fscache_n_checkaux_update;
45383 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45384
45385 extern atomic_t fscache_n_cop_alloc_object;
45386 extern atomic_t fscache_n_cop_lookup_object;
45387 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
45388 atomic_inc(stat);
45389 }
45390
45391 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45392 +{
45393 + atomic_inc_unchecked(stat);
45394 +}
45395 +
45396 static inline void fscache_stat_d(atomic_t *stat)
45397 {
45398 atomic_dec(stat);
45399 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45400
45401 #define __fscache_stat(stat) (NULL)
45402 #define fscache_stat(stat) do {} while (0)
45403 +#define fscache_stat_unchecked(stat) do {} while (0)
45404 #define fscache_stat_d(stat) do {} while (0)
45405 #endif
45406
45407 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45408 index b6b897c..0ffff9c 100644
45409 --- a/fs/fscache/object.c
45410 +++ b/fs/fscache/object.c
45411 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45412 /* update the object metadata on disk */
45413 case FSCACHE_OBJECT_UPDATING:
45414 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45415 - fscache_stat(&fscache_n_updates_run);
45416 + fscache_stat_unchecked(&fscache_n_updates_run);
45417 fscache_stat(&fscache_n_cop_update_object);
45418 object->cache->ops->update_object(object);
45419 fscache_stat_d(&fscache_n_cop_update_object);
45420 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45421 spin_lock(&object->lock);
45422 object->state = FSCACHE_OBJECT_DEAD;
45423 spin_unlock(&object->lock);
45424 - fscache_stat(&fscache_n_object_dead);
45425 + fscache_stat_unchecked(&fscache_n_object_dead);
45426 goto terminal_transit;
45427
45428 /* handle the parent cache of this object being withdrawn from
45429 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45430 spin_lock(&object->lock);
45431 object->state = FSCACHE_OBJECT_DEAD;
45432 spin_unlock(&object->lock);
45433 - fscache_stat(&fscache_n_object_dead);
45434 + fscache_stat_unchecked(&fscache_n_object_dead);
45435 goto terminal_transit;
45436
45437 /* complain about the object being woken up once it is
45438 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45439 parent->cookie->def->name, cookie->def->name,
45440 object->cache->tag->name);
45441
45442 - fscache_stat(&fscache_n_object_lookups);
45443 + fscache_stat_unchecked(&fscache_n_object_lookups);
45444 fscache_stat(&fscache_n_cop_lookup_object);
45445 ret = object->cache->ops->lookup_object(object);
45446 fscache_stat_d(&fscache_n_cop_lookup_object);
45447 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45448 if (ret == -ETIMEDOUT) {
45449 /* probably stuck behind another object, so move this one to
45450 * the back of the queue */
45451 - fscache_stat(&fscache_n_object_lookups_timed_out);
45452 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45453 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45454 }
45455
45456 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45457
45458 spin_lock(&object->lock);
45459 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45460 - fscache_stat(&fscache_n_object_lookups_negative);
45461 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45462
45463 /* transit here to allow write requests to begin stacking up
45464 * and read requests to begin returning ENODATA */
45465 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45466 * result, in which case there may be data available */
45467 spin_lock(&object->lock);
45468 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45469 - fscache_stat(&fscache_n_object_lookups_positive);
45470 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45471
45472 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45473
45474 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45475 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45476 } else {
45477 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45478 - fscache_stat(&fscache_n_object_created);
45479 + fscache_stat_unchecked(&fscache_n_object_created);
45480
45481 object->state = FSCACHE_OBJECT_AVAILABLE;
45482 spin_unlock(&object->lock);
45483 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45484 fscache_enqueue_dependents(object);
45485
45486 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45487 - fscache_stat(&fscache_n_object_avail);
45488 + fscache_stat_unchecked(&fscache_n_object_avail);
45489
45490 _leave("");
45491 }
45492 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45493 enum fscache_checkaux result;
45494
45495 if (!object->cookie->def->check_aux) {
45496 - fscache_stat(&fscache_n_checkaux_none);
45497 + fscache_stat_unchecked(&fscache_n_checkaux_none);
45498 return FSCACHE_CHECKAUX_OKAY;
45499 }
45500
45501 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45502 switch (result) {
45503 /* entry okay as is */
45504 case FSCACHE_CHECKAUX_OKAY:
45505 - fscache_stat(&fscache_n_checkaux_okay);
45506 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
45507 break;
45508
45509 /* entry requires update */
45510 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45511 - fscache_stat(&fscache_n_checkaux_update);
45512 + fscache_stat_unchecked(&fscache_n_checkaux_update);
45513 break;
45514
45515 /* entry requires deletion */
45516 case FSCACHE_CHECKAUX_OBSOLETE:
45517 - fscache_stat(&fscache_n_checkaux_obsolete);
45518 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45519 break;
45520
45521 default:
45522 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45523 index 30afdfa..2256596 100644
45524 --- a/fs/fscache/operation.c
45525 +++ b/fs/fscache/operation.c
45526 @@ -17,7 +17,7 @@
45527 #include <linux/slab.h>
45528 #include "internal.h"
45529
45530 -atomic_t fscache_op_debug_id;
45531 +atomic_unchecked_t fscache_op_debug_id;
45532 EXPORT_SYMBOL(fscache_op_debug_id);
45533
45534 /**
45535 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45536 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45537 ASSERTCMP(atomic_read(&op->usage), >, 0);
45538
45539 - fscache_stat(&fscache_n_op_enqueue);
45540 + fscache_stat_unchecked(&fscache_n_op_enqueue);
45541 switch (op->flags & FSCACHE_OP_TYPE) {
45542 case FSCACHE_OP_ASYNC:
45543 _debug("queue async");
45544 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45545 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45546 if (op->processor)
45547 fscache_enqueue_operation(op);
45548 - fscache_stat(&fscache_n_op_run);
45549 + fscache_stat_unchecked(&fscache_n_op_run);
45550 }
45551
45552 /*
45553 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45554 if (object->n_ops > 1) {
45555 atomic_inc(&op->usage);
45556 list_add_tail(&op->pend_link, &object->pending_ops);
45557 - fscache_stat(&fscache_n_op_pend);
45558 + fscache_stat_unchecked(&fscache_n_op_pend);
45559 } else if (!list_empty(&object->pending_ops)) {
45560 atomic_inc(&op->usage);
45561 list_add_tail(&op->pend_link, &object->pending_ops);
45562 - fscache_stat(&fscache_n_op_pend);
45563 + fscache_stat_unchecked(&fscache_n_op_pend);
45564 fscache_start_operations(object);
45565 } else {
45566 ASSERTCMP(object->n_in_progress, ==, 0);
45567 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45568 object->n_exclusive++; /* reads and writes must wait */
45569 atomic_inc(&op->usage);
45570 list_add_tail(&op->pend_link, &object->pending_ops);
45571 - fscache_stat(&fscache_n_op_pend);
45572 + fscache_stat_unchecked(&fscache_n_op_pend);
45573 ret = 0;
45574 } else {
45575 /* not allowed to submit ops in any other state */
45576 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45577 if (object->n_exclusive > 0) {
45578 atomic_inc(&op->usage);
45579 list_add_tail(&op->pend_link, &object->pending_ops);
45580 - fscache_stat(&fscache_n_op_pend);
45581 + fscache_stat_unchecked(&fscache_n_op_pend);
45582 } else if (!list_empty(&object->pending_ops)) {
45583 atomic_inc(&op->usage);
45584 list_add_tail(&op->pend_link, &object->pending_ops);
45585 - fscache_stat(&fscache_n_op_pend);
45586 + fscache_stat_unchecked(&fscache_n_op_pend);
45587 fscache_start_operations(object);
45588 } else {
45589 ASSERTCMP(object->n_exclusive, ==, 0);
45590 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45591 object->n_ops++;
45592 atomic_inc(&op->usage);
45593 list_add_tail(&op->pend_link, &object->pending_ops);
45594 - fscache_stat(&fscache_n_op_pend);
45595 + fscache_stat_unchecked(&fscache_n_op_pend);
45596 ret = 0;
45597 } else if (object->state == FSCACHE_OBJECT_DYING ||
45598 object->state == FSCACHE_OBJECT_LC_DYING ||
45599 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45600 - fscache_stat(&fscache_n_op_rejected);
45601 + fscache_stat_unchecked(&fscache_n_op_rejected);
45602 ret = -ENOBUFS;
45603 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45604 fscache_report_unexpected_submission(object, op, ostate);
45605 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45606
45607 ret = -EBUSY;
45608 if (!list_empty(&op->pend_link)) {
45609 - fscache_stat(&fscache_n_op_cancelled);
45610 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45611 list_del_init(&op->pend_link);
45612 object->n_ops--;
45613 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45614 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45615 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45616 BUG();
45617
45618 - fscache_stat(&fscache_n_op_release);
45619 + fscache_stat_unchecked(&fscache_n_op_release);
45620
45621 if (op->release) {
45622 op->release(op);
45623 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45624 * lock, and defer it otherwise */
45625 if (!spin_trylock(&object->lock)) {
45626 _debug("defer put");
45627 - fscache_stat(&fscache_n_op_deferred_release);
45628 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45629
45630 cache = object->cache;
45631 spin_lock(&cache->op_gc_list_lock);
45632 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45633
45634 _debug("GC DEFERRED REL OBJ%x OP%x",
45635 object->debug_id, op->debug_id);
45636 - fscache_stat(&fscache_n_op_gc);
45637 + fscache_stat_unchecked(&fscache_n_op_gc);
45638
45639 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45640
45641 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45642 index 3f7a59b..cf196cc 100644
45643 --- a/fs/fscache/page.c
45644 +++ b/fs/fscache/page.c
45645 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45646 val = radix_tree_lookup(&cookie->stores, page->index);
45647 if (!val) {
45648 rcu_read_unlock();
45649 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45650 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45651 __fscache_uncache_page(cookie, page);
45652 return true;
45653 }
45654 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45655 spin_unlock(&cookie->stores_lock);
45656
45657 if (xpage) {
45658 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45659 - fscache_stat(&fscache_n_store_radix_deletes);
45660 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45661 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45662 ASSERTCMP(xpage, ==, page);
45663 } else {
45664 - fscache_stat(&fscache_n_store_vmscan_gone);
45665 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45666 }
45667
45668 wake_up_bit(&cookie->flags, 0);
45669 @@ -107,7 +107,7 @@ page_busy:
45670 /* we might want to wait here, but that could deadlock the allocator as
45671 * the work threads writing to the cache may all end up sleeping
45672 * on memory allocation */
45673 - fscache_stat(&fscache_n_store_vmscan_busy);
45674 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45675 return false;
45676 }
45677 EXPORT_SYMBOL(__fscache_maybe_release_page);
45678 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45679 FSCACHE_COOKIE_STORING_TAG);
45680 if (!radix_tree_tag_get(&cookie->stores, page->index,
45681 FSCACHE_COOKIE_PENDING_TAG)) {
45682 - fscache_stat(&fscache_n_store_radix_deletes);
45683 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45684 xpage = radix_tree_delete(&cookie->stores, page->index);
45685 }
45686 spin_unlock(&cookie->stores_lock);
45687 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45688
45689 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45690
45691 - fscache_stat(&fscache_n_attr_changed_calls);
45692 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45693
45694 if (fscache_object_is_active(object)) {
45695 fscache_stat(&fscache_n_cop_attr_changed);
45696 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45697
45698 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45699
45700 - fscache_stat(&fscache_n_attr_changed);
45701 + fscache_stat_unchecked(&fscache_n_attr_changed);
45702
45703 op = kzalloc(sizeof(*op), GFP_KERNEL);
45704 if (!op) {
45705 - fscache_stat(&fscache_n_attr_changed_nomem);
45706 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45707 _leave(" = -ENOMEM");
45708 return -ENOMEM;
45709 }
45710 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45711 if (fscache_submit_exclusive_op(object, op) < 0)
45712 goto nobufs;
45713 spin_unlock(&cookie->lock);
45714 - fscache_stat(&fscache_n_attr_changed_ok);
45715 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45716 fscache_put_operation(op);
45717 _leave(" = 0");
45718 return 0;
45719 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45720 nobufs:
45721 spin_unlock(&cookie->lock);
45722 kfree(op);
45723 - fscache_stat(&fscache_n_attr_changed_nobufs);
45724 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45725 _leave(" = %d", -ENOBUFS);
45726 return -ENOBUFS;
45727 }
45728 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45729 /* allocate a retrieval operation and attempt to submit it */
45730 op = kzalloc(sizeof(*op), GFP_NOIO);
45731 if (!op) {
45732 - fscache_stat(&fscache_n_retrievals_nomem);
45733 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45734 return NULL;
45735 }
45736
45737 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45738 return 0;
45739 }
45740
45741 - fscache_stat(&fscache_n_retrievals_wait);
45742 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45743
45744 jif = jiffies;
45745 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45746 fscache_wait_bit_interruptible,
45747 TASK_INTERRUPTIBLE) != 0) {
45748 - fscache_stat(&fscache_n_retrievals_intr);
45749 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45750 _leave(" = -ERESTARTSYS");
45751 return -ERESTARTSYS;
45752 }
45753 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45754 */
45755 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45756 struct fscache_retrieval *op,
45757 - atomic_t *stat_op_waits,
45758 - atomic_t *stat_object_dead)
45759 + atomic_unchecked_t *stat_op_waits,
45760 + atomic_unchecked_t *stat_object_dead)
45761 {
45762 int ret;
45763
45764 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45765 goto check_if_dead;
45766
45767 _debug(">>> WT");
45768 - fscache_stat(stat_op_waits);
45769 + fscache_stat_unchecked(stat_op_waits);
45770 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45771 fscache_wait_bit_interruptible,
45772 TASK_INTERRUPTIBLE) < 0) {
45773 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45774
45775 check_if_dead:
45776 if (unlikely(fscache_object_is_dead(object))) {
45777 - fscache_stat(stat_object_dead);
45778 + fscache_stat_unchecked(stat_object_dead);
45779 return -ENOBUFS;
45780 }
45781 return 0;
45782 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45783
45784 _enter("%p,%p,,,", cookie, page);
45785
45786 - fscache_stat(&fscache_n_retrievals);
45787 + fscache_stat_unchecked(&fscache_n_retrievals);
45788
45789 if (hlist_empty(&cookie->backing_objects))
45790 goto nobufs;
45791 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45792 goto nobufs_unlock;
45793 spin_unlock(&cookie->lock);
45794
45795 - fscache_stat(&fscache_n_retrieval_ops);
45796 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45797
45798 /* pin the netfs read context in case we need to do the actual netfs
45799 * read because we've encountered a cache read failure */
45800 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45801
45802 error:
45803 if (ret == -ENOMEM)
45804 - fscache_stat(&fscache_n_retrievals_nomem);
45805 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45806 else if (ret == -ERESTARTSYS)
45807 - fscache_stat(&fscache_n_retrievals_intr);
45808 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45809 else if (ret == -ENODATA)
45810 - fscache_stat(&fscache_n_retrievals_nodata);
45811 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45812 else if (ret < 0)
45813 - fscache_stat(&fscache_n_retrievals_nobufs);
45814 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45815 else
45816 - fscache_stat(&fscache_n_retrievals_ok);
45817 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45818
45819 fscache_put_retrieval(op);
45820 _leave(" = %d", ret);
45821 @@ -429,7 +429,7 @@ nobufs_unlock:
45822 spin_unlock(&cookie->lock);
45823 kfree(op);
45824 nobufs:
45825 - fscache_stat(&fscache_n_retrievals_nobufs);
45826 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45827 _leave(" = -ENOBUFS");
45828 return -ENOBUFS;
45829 }
45830 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45831
45832 _enter("%p,,%d,,,", cookie, *nr_pages);
45833
45834 - fscache_stat(&fscache_n_retrievals);
45835 + fscache_stat_unchecked(&fscache_n_retrievals);
45836
45837 if (hlist_empty(&cookie->backing_objects))
45838 goto nobufs;
45839 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45840 goto nobufs_unlock;
45841 spin_unlock(&cookie->lock);
45842
45843 - fscache_stat(&fscache_n_retrieval_ops);
45844 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45845
45846 /* pin the netfs read context in case we need to do the actual netfs
45847 * read because we've encountered a cache read failure */
45848 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45849
45850 error:
45851 if (ret == -ENOMEM)
45852 - fscache_stat(&fscache_n_retrievals_nomem);
45853 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45854 else if (ret == -ERESTARTSYS)
45855 - fscache_stat(&fscache_n_retrievals_intr);
45856 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45857 else if (ret == -ENODATA)
45858 - fscache_stat(&fscache_n_retrievals_nodata);
45859 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45860 else if (ret < 0)
45861 - fscache_stat(&fscache_n_retrievals_nobufs);
45862 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45863 else
45864 - fscache_stat(&fscache_n_retrievals_ok);
45865 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45866
45867 fscache_put_retrieval(op);
45868 _leave(" = %d", ret);
45869 @@ -545,7 +545,7 @@ nobufs_unlock:
45870 spin_unlock(&cookie->lock);
45871 kfree(op);
45872 nobufs:
45873 - fscache_stat(&fscache_n_retrievals_nobufs);
45874 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45875 _leave(" = -ENOBUFS");
45876 return -ENOBUFS;
45877 }
45878 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45879
45880 _enter("%p,%p,,,", cookie, page);
45881
45882 - fscache_stat(&fscache_n_allocs);
45883 + fscache_stat_unchecked(&fscache_n_allocs);
45884
45885 if (hlist_empty(&cookie->backing_objects))
45886 goto nobufs;
45887 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45888 goto nobufs_unlock;
45889 spin_unlock(&cookie->lock);
45890
45891 - fscache_stat(&fscache_n_alloc_ops);
45892 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45893
45894 ret = fscache_wait_for_retrieval_activation(
45895 object, op,
45896 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45897
45898 error:
45899 if (ret == -ERESTARTSYS)
45900 - fscache_stat(&fscache_n_allocs_intr);
45901 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45902 else if (ret < 0)
45903 - fscache_stat(&fscache_n_allocs_nobufs);
45904 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45905 else
45906 - fscache_stat(&fscache_n_allocs_ok);
45907 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45908
45909 fscache_put_retrieval(op);
45910 _leave(" = %d", ret);
45911 @@ -625,7 +625,7 @@ nobufs_unlock:
45912 spin_unlock(&cookie->lock);
45913 kfree(op);
45914 nobufs:
45915 - fscache_stat(&fscache_n_allocs_nobufs);
45916 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45917 _leave(" = -ENOBUFS");
45918 return -ENOBUFS;
45919 }
45920 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45921
45922 spin_lock(&cookie->stores_lock);
45923
45924 - fscache_stat(&fscache_n_store_calls);
45925 + fscache_stat_unchecked(&fscache_n_store_calls);
45926
45927 /* find a page to store */
45928 page = NULL;
45929 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45930 page = results[0];
45931 _debug("gang %d [%lx]", n, page->index);
45932 if (page->index > op->store_limit) {
45933 - fscache_stat(&fscache_n_store_pages_over_limit);
45934 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45935 goto superseded;
45936 }
45937
45938 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45939 spin_unlock(&cookie->stores_lock);
45940 spin_unlock(&object->lock);
45941
45942 - fscache_stat(&fscache_n_store_pages);
45943 + fscache_stat_unchecked(&fscache_n_store_pages);
45944 fscache_stat(&fscache_n_cop_write_page);
45945 ret = object->cache->ops->write_page(op, page);
45946 fscache_stat_d(&fscache_n_cop_write_page);
45947 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45948 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45949 ASSERT(PageFsCache(page));
45950
45951 - fscache_stat(&fscache_n_stores);
45952 + fscache_stat_unchecked(&fscache_n_stores);
45953
45954 op = kzalloc(sizeof(*op), GFP_NOIO);
45955 if (!op)
45956 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45957 spin_unlock(&cookie->stores_lock);
45958 spin_unlock(&object->lock);
45959
45960 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45961 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45962 op->store_limit = object->store_limit;
45963
45964 if (fscache_submit_op(object, &op->op) < 0)
45965 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45966
45967 spin_unlock(&cookie->lock);
45968 radix_tree_preload_end();
45969 - fscache_stat(&fscache_n_store_ops);
45970 - fscache_stat(&fscache_n_stores_ok);
45971 + fscache_stat_unchecked(&fscache_n_store_ops);
45972 + fscache_stat_unchecked(&fscache_n_stores_ok);
45973
45974 /* the work queue now carries its own ref on the object */
45975 fscache_put_operation(&op->op);
45976 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45977 return 0;
45978
45979 already_queued:
45980 - fscache_stat(&fscache_n_stores_again);
45981 + fscache_stat_unchecked(&fscache_n_stores_again);
45982 already_pending:
45983 spin_unlock(&cookie->stores_lock);
45984 spin_unlock(&object->lock);
45985 spin_unlock(&cookie->lock);
45986 radix_tree_preload_end();
45987 kfree(op);
45988 - fscache_stat(&fscache_n_stores_ok);
45989 + fscache_stat_unchecked(&fscache_n_stores_ok);
45990 _leave(" = 0");
45991 return 0;
45992
45993 @@ -851,14 +851,14 @@ nobufs:
45994 spin_unlock(&cookie->lock);
45995 radix_tree_preload_end();
45996 kfree(op);
45997 - fscache_stat(&fscache_n_stores_nobufs);
45998 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45999 _leave(" = -ENOBUFS");
46000 return -ENOBUFS;
46001
46002 nomem_free:
46003 kfree(op);
46004 nomem:
46005 - fscache_stat(&fscache_n_stores_oom);
46006 + fscache_stat_unchecked(&fscache_n_stores_oom);
46007 _leave(" = -ENOMEM");
46008 return -ENOMEM;
46009 }
46010 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
46011 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46012 ASSERTCMP(page, !=, NULL);
46013
46014 - fscache_stat(&fscache_n_uncaches);
46015 + fscache_stat_unchecked(&fscache_n_uncaches);
46016
46017 /* cache withdrawal may beat us to it */
46018 if (!PageFsCache(page))
46019 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
46020 unsigned long loop;
46021
46022 #ifdef CONFIG_FSCACHE_STATS
46023 - atomic_add(pagevec->nr, &fscache_n_marks);
46024 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46025 #endif
46026
46027 for (loop = 0; loop < pagevec->nr; loop++) {
46028 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
46029 index 4765190..2a067f2 100644
46030 --- a/fs/fscache/stats.c
46031 +++ b/fs/fscache/stats.c
46032 @@ -18,95 +18,95 @@
46033 /*
46034 * operation counters
46035 */
46036 -atomic_t fscache_n_op_pend;
46037 -atomic_t fscache_n_op_run;
46038 -atomic_t fscache_n_op_enqueue;
46039 -atomic_t fscache_n_op_requeue;
46040 -atomic_t fscache_n_op_deferred_release;
46041 -atomic_t fscache_n_op_release;
46042 -atomic_t fscache_n_op_gc;
46043 -atomic_t fscache_n_op_cancelled;
46044 -atomic_t fscache_n_op_rejected;
46045 +atomic_unchecked_t fscache_n_op_pend;
46046 +atomic_unchecked_t fscache_n_op_run;
46047 +atomic_unchecked_t fscache_n_op_enqueue;
46048 +atomic_unchecked_t fscache_n_op_requeue;
46049 +atomic_unchecked_t fscache_n_op_deferred_release;
46050 +atomic_unchecked_t fscache_n_op_release;
46051 +atomic_unchecked_t fscache_n_op_gc;
46052 +atomic_unchecked_t fscache_n_op_cancelled;
46053 +atomic_unchecked_t fscache_n_op_rejected;
46054
46055 -atomic_t fscache_n_attr_changed;
46056 -atomic_t fscache_n_attr_changed_ok;
46057 -atomic_t fscache_n_attr_changed_nobufs;
46058 -atomic_t fscache_n_attr_changed_nomem;
46059 -atomic_t fscache_n_attr_changed_calls;
46060 +atomic_unchecked_t fscache_n_attr_changed;
46061 +atomic_unchecked_t fscache_n_attr_changed_ok;
46062 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
46063 +atomic_unchecked_t fscache_n_attr_changed_nomem;
46064 +atomic_unchecked_t fscache_n_attr_changed_calls;
46065
46066 -atomic_t fscache_n_allocs;
46067 -atomic_t fscache_n_allocs_ok;
46068 -atomic_t fscache_n_allocs_wait;
46069 -atomic_t fscache_n_allocs_nobufs;
46070 -atomic_t fscache_n_allocs_intr;
46071 -atomic_t fscache_n_allocs_object_dead;
46072 -atomic_t fscache_n_alloc_ops;
46073 -atomic_t fscache_n_alloc_op_waits;
46074 +atomic_unchecked_t fscache_n_allocs;
46075 +atomic_unchecked_t fscache_n_allocs_ok;
46076 +atomic_unchecked_t fscache_n_allocs_wait;
46077 +atomic_unchecked_t fscache_n_allocs_nobufs;
46078 +atomic_unchecked_t fscache_n_allocs_intr;
46079 +atomic_unchecked_t fscache_n_allocs_object_dead;
46080 +atomic_unchecked_t fscache_n_alloc_ops;
46081 +atomic_unchecked_t fscache_n_alloc_op_waits;
46082
46083 -atomic_t fscache_n_retrievals;
46084 -atomic_t fscache_n_retrievals_ok;
46085 -atomic_t fscache_n_retrievals_wait;
46086 -atomic_t fscache_n_retrievals_nodata;
46087 -atomic_t fscache_n_retrievals_nobufs;
46088 -atomic_t fscache_n_retrievals_intr;
46089 -atomic_t fscache_n_retrievals_nomem;
46090 -atomic_t fscache_n_retrievals_object_dead;
46091 -atomic_t fscache_n_retrieval_ops;
46092 -atomic_t fscache_n_retrieval_op_waits;
46093 +atomic_unchecked_t fscache_n_retrievals;
46094 +atomic_unchecked_t fscache_n_retrievals_ok;
46095 +atomic_unchecked_t fscache_n_retrievals_wait;
46096 +atomic_unchecked_t fscache_n_retrievals_nodata;
46097 +atomic_unchecked_t fscache_n_retrievals_nobufs;
46098 +atomic_unchecked_t fscache_n_retrievals_intr;
46099 +atomic_unchecked_t fscache_n_retrievals_nomem;
46100 +atomic_unchecked_t fscache_n_retrievals_object_dead;
46101 +atomic_unchecked_t fscache_n_retrieval_ops;
46102 +atomic_unchecked_t fscache_n_retrieval_op_waits;
46103
46104 -atomic_t fscache_n_stores;
46105 -atomic_t fscache_n_stores_ok;
46106 -atomic_t fscache_n_stores_again;
46107 -atomic_t fscache_n_stores_nobufs;
46108 -atomic_t fscache_n_stores_oom;
46109 -atomic_t fscache_n_store_ops;
46110 -atomic_t fscache_n_store_calls;
46111 -atomic_t fscache_n_store_pages;
46112 -atomic_t fscache_n_store_radix_deletes;
46113 -atomic_t fscache_n_store_pages_over_limit;
46114 +atomic_unchecked_t fscache_n_stores;
46115 +atomic_unchecked_t fscache_n_stores_ok;
46116 +atomic_unchecked_t fscache_n_stores_again;
46117 +atomic_unchecked_t fscache_n_stores_nobufs;
46118 +atomic_unchecked_t fscache_n_stores_oom;
46119 +atomic_unchecked_t fscache_n_store_ops;
46120 +atomic_unchecked_t fscache_n_store_calls;
46121 +atomic_unchecked_t fscache_n_store_pages;
46122 +atomic_unchecked_t fscache_n_store_radix_deletes;
46123 +atomic_unchecked_t fscache_n_store_pages_over_limit;
46124
46125 -atomic_t fscache_n_store_vmscan_not_storing;
46126 -atomic_t fscache_n_store_vmscan_gone;
46127 -atomic_t fscache_n_store_vmscan_busy;
46128 -atomic_t fscache_n_store_vmscan_cancelled;
46129 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46130 +atomic_unchecked_t fscache_n_store_vmscan_gone;
46131 +atomic_unchecked_t fscache_n_store_vmscan_busy;
46132 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46133
46134 -atomic_t fscache_n_marks;
46135 -atomic_t fscache_n_uncaches;
46136 +atomic_unchecked_t fscache_n_marks;
46137 +atomic_unchecked_t fscache_n_uncaches;
46138
46139 -atomic_t fscache_n_acquires;
46140 -atomic_t fscache_n_acquires_null;
46141 -atomic_t fscache_n_acquires_no_cache;
46142 -atomic_t fscache_n_acquires_ok;
46143 -atomic_t fscache_n_acquires_nobufs;
46144 -atomic_t fscache_n_acquires_oom;
46145 +atomic_unchecked_t fscache_n_acquires;
46146 +atomic_unchecked_t fscache_n_acquires_null;
46147 +atomic_unchecked_t fscache_n_acquires_no_cache;
46148 +atomic_unchecked_t fscache_n_acquires_ok;
46149 +atomic_unchecked_t fscache_n_acquires_nobufs;
46150 +atomic_unchecked_t fscache_n_acquires_oom;
46151
46152 -atomic_t fscache_n_updates;
46153 -atomic_t fscache_n_updates_null;
46154 -atomic_t fscache_n_updates_run;
46155 +atomic_unchecked_t fscache_n_updates;
46156 +atomic_unchecked_t fscache_n_updates_null;
46157 +atomic_unchecked_t fscache_n_updates_run;
46158
46159 -atomic_t fscache_n_relinquishes;
46160 -atomic_t fscache_n_relinquishes_null;
46161 -atomic_t fscache_n_relinquishes_waitcrt;
46162 -atomic_t fscache_n_relinquishes_retire;
46163 +atomic_unchecked_t fscache_n_relinquishes;
46164 +atomic_unchecked_t fscache_n_relinquishes_null;
46165 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46166 +atomic_unchecked_t fscache_n_relinquishes_retire;
46167
46168 -atomic_t fscache_n_cookie_index;
46169 -atomic_t fscache_n_cookie_data;
46170 -atomic_t fscache_n_cookie_special;
46171 +atomic_unchecked_t fscache_n_cookie_index;
46172 +atomic_unchecked_t fscache_n_cookie_data;
46173 +atomic_unchecked_t fscache_n_cookie_special;
46174
46175 -atomic_t fscache_n_object_alloc;
46176 -atomic_t fscache_n_object_no_alloc;
46177 -atomic_t fscache_n_object_lookups;
46178 -atomic_t fscache_n_object_lookups_negative;
46179 -atomic_t fscache_n_object_lookups_positive;
46180 -atomic_t fscache_n_object_lookups_timed_out;
46181 -atomic_t fscache_n_object_created;
46182 -atomic_t fscache_n_object_avail;
46183 -atomic_t fscache_n_object_dead;
46184 +atomic_unchecked_t fscache_n_object_alloc;
46185 +atomic_unchecked_t fscache_n_object_no_alloc;
46186 +atomic_unchecked_t fscache_n_object_lookups;
46187 +atomic_unchecked_t fscache_n_object_lookups_negative;
46188 +atomic_unchecked_t fscache_n_object_lookups_positive;
46189 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
46190 +atomic_unchecked_t fscache_n_object_created;
46191 +atomic_unchecked_t fscache_n_object_avail;
46192 +atomic_unchecked_t fscache_n_object_dead;
46193
46194 -atomic_t fscache_n_checkaux_none;
46195 -atomic_t fscache_n_checkaux_okay;
46196 -atomic_t fscache_n_checkaux_update;
46197 -atomic_t fscache_n_checkaux_obsolete;
46198 +atomic_unchecked_t fscache_n_checkaux_none;
46199 +atomic_unchecked_t fscache_n_checkaux_okay;
46200 +atomic_unchecked_t fscache_n_checkaux_update;
46201 +atomic_unchecked_t fscache_n_checkaux_obsolete;
46202
46203 atomic_t fscache_n_cop_alloc_object;
46204 atomic_t fscache_n_cop_lookup_object;
46205 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
46206 seq_puts(m, "FS-Cache statistics\n");
46207
46208 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46209 - atomic_read(&fscache_n_cookie_index),
46210 - atomic_read(&fscache_n_cookie_data),
46211 - atomic_read(&fscache_n_cookie_special));
46212 + atomic_read_unchecked(&fscache_n_cookie_index),
46213 + atomic_read_unchecked(&fscache_n_cookie_data),
46214 + atomic_read_unchecked(&fscache_n_cookie_special));
46215
46216 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46217 - atomic_read(&fscache_n_object_alloc),
46218 - atomic_read(&fscache_n_object_no_alloc),
46219 - atomic_read(&fscache_n_object_avail),
46220 - atomic_read(&fscache_n_object_dead));
46221 + atomic_read_unchecked(&fscache_n_object_alloc),
46222 + atomic_read_unchecked(&fscache_n_object_no_alloc),
46223 + atomic_read_unchecked(&fscache_n_object_avail),
46224 + atomic_read_unchecked(&fscache_n_object_dead));
46225 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46226 - atomic_read(&fscache_n_checkaux_none),
46227 - atomic_read(&fscache_n_checkaux_okay),
46228 - atomic_read(&fscache_n_checkaux_update),
46229 - atomic_read(&fscache_n_checkaux_obsolete));
46230 + atomic_read_unchecked(&fscache_n_checkaux_none),
46231 + atomic_read_unchecked(&fscache_n_checkaux_okay),
46232 + atomic_read_unchecked(&fscache_n_checkaux_update),
46233 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46234
46235 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46236 - atomic_read(&fscache_n_marks),
46237 - atomic_read(&fscache_n_uncaches));
46238 + atomic_read_unchecked(&fscache_n_marks),
46239 + atomic_read_unchecked(&fscache_n_uncaches));
46240
46241 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46242 " oom=%u\n",
46243 - atomic_read(&fscache_n_acquires),
46244 - atomic_read(&fscache_n_acquires_null),
46245 - atomic_read(&fscache_n_acquires_no_cache),
46246 - atomic_read(&fscache_n_acquires_ok),
46247 - atomic_read(&fscache_n_acquires_nobufs),
46248 - atomic_read(&fscache_n_acquires_oom));
46249 + atomic_read_unchecked(&fscache_n_acquires),
46250 + atomic_read_unchecked(&fscache_n_acquires_null),
46251 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
46252 + atomic_read_unchecked(&fscache_n_acquires_ok),
46253 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
46254 + atomic_read_unchecked(&fscache_n_acquires_oom));
46255
46256 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46257 - atomic_read(&fscache_n_object_lookups),
46258 - atomic_read(&fscache_n_object_lookups_negative),
46259 - atomic_read(&fscache_n_object_lookups_positive),
46260 - atomic_read(&fscache_n_object_created),
46261 - atomic_read(&fscache_n_object_lookups_timed_out));
46262 + atomic_read_unchecked(&fscache_n_object_lookups),
46263 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
46264 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
46265 + atomic_read_unchecked(&fscache_n_object_created),
46266 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46267
46268 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46269 - atomic_read(&fscache_n_updates),
46270 - atomic_read(&fscache_n_updates_null),
46271 - atomic_read(&fscache_n_updates_run));
46272 + atomic_read_unchecked(&fscache_n_updates),
46273 + atomic_read_unchecked(&fscache_n_updates_null),
46274 + atomic_read_unchecked(&fscache_n_updates_run));
46275
46276 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46277 - atomic_read(&fscache_n_relinquishes),
46278 - atomic_read(&fscache_n_relinquishes_null),
46279 - atomic_read(&fscache_n_relinquishes_waitcrt),
46280 - atomic_read(&fscache_n_relinquishes_retire));
46281 + atomic_read_unchecked(&fscache_n_relinquishes),
46282 + atomic_read_unchecked(&fscache_n_relinquishes_null),
46283 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46284 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
46285
46286 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46287 - atomic_read(&fscache_n_attr_changed),
46288 - atomic_read(&fscache_n_attr_changed_ok),
46289 - atomic_read(&fscache_n_attr_changed_nobufs),
46290 - atomic_read(&fscache_n_attr_changed_nomem),
46291 - atomic_read(&fscache_n_attr_changed_calls));
46292 + atomic_read_unchecked(&fscache_n_attr_changed),
46293 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
46294 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46295 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46296 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
46297
46298 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46299 - atomic_read(&fscache_n_allocs),
46300 - atomic_read(&fscache_n_allocs_ok),
46301 - atomic_read(&fscache_n_allocs_wait),
46302 - atomic_read(&fscache_n_allocs_nobufs),
46303 - atomic_read(&fscache_n_allocs_intr));
46304 + atomic_read_unchecked(&fscache_n_allocs),
46305 + atomic_read_unchecked(&fscache_n_allocs_ok),
46306 + atomic_read_unchecked(&fscache_n_allocs_wait),
46307 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
46308 + atomic_read_unchecked(&fscache_n_allocs_intr));
46309 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46310 - atomic_read(&fscache_n_alloc_ops),
46311 - atomic_read(&fscache_n_alloc_op_waits),
46312 - atomic_read(&fscache_n_allocs_object_dead));
46313 + atomic_read_unchecked(&fscache_n_alloc_ops),
46314 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
46315 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
46316
46317 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46318 " int=%u oom=%u\n",
46319 - atomic_read(&fscache_n_retrievals),
46320 - atomic_read(&fscache_n_retrievals_ok),
46321 - atomic_read(&fscache_n_retrievals_wait),
46322 - atomic_read(&fscache_n_retrievals_nodata),
46323 - atomic_read(&fscache_n_retrievals_nobufs),
46324 - atomic_read(&fscache_n_retrievals_intr),
46325 - atomic_read(&fscache_n_retrievals_nomem));
46326 + atomic_read_unchecked(&fscache_n_retrievals),
46327 + atomic_read_unchecked(&fscache_n_retrievals_ok),
46328 + atomic_read_unchecked(&fscache_n_retrievals_wait),
46329 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
46330 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46331 + atomic_read_unchecked(&fscache_n_retrievals_intr),
46332 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
46333 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46334 - atomic_read(&fscache_n_retrieval_ops),
46335 - atomic_read(&fscache_n_retrieval_op_waits),
46336 - atomic_read(&fscache_n_retrievals_object_dead));
46337 + atomic_read_unchecked(&fscache_n_retrieval_ops),
46338 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46339 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46340
46341 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46342 - atomic_read(&fscache_n_stores),
46343 - atomic_read(&fscache_n_stores_ok),
46344 - atomic_read(&fscache_n_stores_again),
46345 - atomic_read(&fscache_n_stores_nobufs),
46346 - atomic_read(&fscache_n_stores_oom));
46347 + atomic_read_unchecked(&fscache_n_stores),
46348 + atomic_read_unchecked(&fscache_n_stores_ok),
46349 + atomic_read_unchecked(&fscache_n_stores_again),
46350 + atomic_read_unchecked(&fscache_n_stores_nobufs),
46351 + atomic_read_unchecked(&fscache_n_stores_oom));
46352 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46353 - atomic_read(&fscache_n_store_ops),
46354 - atomic_read(&fscache_n_store_calls),
46355 - atomic_read(&fscache_n_store_pages),
46356 - atomic_read(&fscache_n_store_radix_deletes),
46357 - atomic_read(&fscache_n_store_pages_over_limit));
46358 + atomic_read_unchecked(&fscache_n_store_ops),
46359 + atomic_read_unchecked(&fscache_n_store_calls),
46360 + atomic_read_unchecked(&fscache_n_store_pages),
46361 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
46362 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46363
46364 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46365 - atomic_read(&fscache_n_store_vmscan_not_storing),
46366 - atomic_read(&fscache_n_store_vmscan_gone),
46367 - atomic_read(&fscache_n_store_vmscan_busy),
46368 - atomic_read(&fscache_n_store_vmscan_cancelled));
46369 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46370 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46371 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46372 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46373
46374 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46375 - atomic_read(&fscache_n_op_pend),
46376 - atomic_read(&fscache_n_op_run),
46377 - atomic_read(&fscache_n_op_enqueue),
46378 - atomic_read(&fscache_n_op_cancelled),
46379 - atomic_read(&fscache_n_op_rejected));
46380 + atomic_read_unchecked(&fscache_n_op_pend),
46381 + atomic_read_unchecked(&fscache_n_op_run),
46382 + atomic_read_unchecked(&fscache_n_op_enqueue),
46383 + atomic_read_unchecked(&fscache_n_op_cancelled),
46384 + atomic_read_unchecked(&fscache_n_op_rejected));
46385 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46386 - atomic_read(&fscache_n_op_deferred_release),
46387 - atomic_read(&fscache_n_op_release),
46388 - atomic_read(&fscache_n_op_gc));
46389 + atomic_read_unchecked(&fscache_n_op_deferred_release),
46390 + atomic_read_unchecked(&fscache_n_op_release),
46391 + atomic_read_unchecked(&fscache_n_op_gc));
46392
46393 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46394 atomic_read(&fscache_n_cop_alloc_object),
46395 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46396 index 3426521..3b75162 100644
46397 --- a/fs/fuse/cuse.c
46398 +++ b/fs/fuse/cuse.c
46399 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
46400 INIT_LIST_HEAD(&cuse_conntbl[i]);
46401
46402 /* inherit and extend fuse_dev_operations */
46403 - cuse_channel_fops = fuse_dev_operations;
46404 - cuse_channel_fops.owner = THIS_MODULE;
46405 - cuse_channel_fops.open = cuse_channel_open;
46406 - cuse_channel_fops.release = cuse_channel_release;
46407 + pax_open_kernel();
46408 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46409 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46410 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
46411 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
46412 + pax_close_kernel();
46413
46414 cuse_class = class_create(THIS_MODULE, "cuse");
46415 if (IS_ERR(cuse_class))
46416 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46417 index 5f3368a..8306426 100644
46418 --- a/fs/fuse/dev.c
46419 +++ b/fs/fuse/dev.c
46420 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46421 ret = 0;
46422 pipe_lock(pipe);
46423
46424 - if (!pipe->readers) {
46425 + if (!atomic_read(&pipe->readers)) {
46426 send_sig(SIGPIPE, current, 0);
46427 if (!ret)
46428 ret = -EPIPE;
46429 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46430 index 2066328..f5add3b 100644
46431 --- a/fs/fuse/dir.c
46432 +++ b/fs/fuse/dir.c
46433 @@ -1175,7 +1175,7 @@ static char *read_link(struct dentry *dentry)
46434 return link;
46435 }
46436
46437 -static void free_link(char *link)
46438 +static void free_link(const char *link)
46439 {
46440 if (!IS_ERR(link))
46441 free_page((unsigned long) link);
46442 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46443 index 5698746..6086012 100644
46444 --- a/fs/gfs2/inode.c
46445 +++ b/fs/gfs2/inode.c
46446 @@ -1487,7 +1487,7 @@ out:
46447
46448 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46449 {
46450 - char *s = nd_get_link(nd);
46451 + const char *s = nd_get_link(nd);
46452 if (!IS_ERR(s))
46453 kfree(s);
46454 }
46455 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46456 index c60267e..193d9e4 100644
46457 --- a/fs/hugetlbfs/inode.c
46458 +++ b/fs/hugetlbfs/inode.c
46459 @@ -902,7 +902,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46460 .kill_sb = kill_litter_super,
46461 };
46462
46463 -static struct vfsmount *hugetlbfs_vfsmount;
46464 +struct vfsmount *hugetlbfs_vfsmount;
46465
46466 static int can_do_hugetlb_shm(void)
46467 {
46468 diff --git a/fs/inode.c b/fs/inode.c
46469 index 83ab215..8842101 100644
46470 --- a/fs/inode.c
46471 +++ b/fs/inode.c
46472 @@ -870,8 +870,8 @@ unsigned int get_next_ino(void)
46473
46474 #ifdef CONFIG_SMP
46475 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46476 - static atomic_t shared_last_ino;
46477 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46478 + static atomic_unchecked_t shared_last_ino;
46479 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46480
46481 res = next - LAST_INO_BATCH;
46482 }
46483 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46484 index eafb8d3..f423d37 100644
46485 --- a/fs/jffs2/erase.c
46486 +++ b/fs/jffs2/erase.c
46487 @@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46488 struct jffs2_unknown_node marker = {
46489 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46490 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46491 - .totlen = cpu_to_je32(c->cleanmarker_size)
46492 + .totlen = cpu_to_je32(c->cleanmarker_size),
46493 + .hdr_crc = cpu_to_je32(0)
46494 };
46495
46496 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46497 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46498 index 30e8f47..21f600c 100644
46499 --- a/fs/jffs2/wbuf.c
46500 +++ b/fs/jffs2/wbuf.c
46501 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46502 {
46503 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46504 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46505 - .totlen = constant_cpu_to_je32(8)
46506 + .totlen = constant_cpu_to_je32(8),
46507 + .hdr_crc = constant_cpu_to_je32(0)
46508 };
46509
46510 /*
46511 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46512 index 682bca6..86b8e6e 100644
46513 --- a/fs/jfs/super.c
46514 +++ b/fs/jfs/super.c
46515 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46516
46517 jfs_inode_cachep =
46518 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46519 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46520 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46521 init_once);
46522 if (jfs_inode_cachep == NULL)
46523 return -ENOMEM;
46524 diff --git a/fs/libfs.c b/fs/libfs.c
46525 index 5b2dbb3..7442d54 100644
46526 --- a/fs/libfs.c
46527 +++ b/fs/libfs.c
46528 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46529
46530 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46531 struct dentry *next;
46532 + char d_name[sizeof(next->d_iname)];
46533 + const unsigned char *name;
46534 +
46535 next = list_entry(p, struct dentry, d_u.d_child);
46536 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46537 if (!simple_positive(next)) {
46538 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46539
46540 spin_unlock(&next->d_lock);
46541 spin_unlock(&dentry->d_lock);
46542 - if (filldir(dirent, next->d_name.name,
46543 + name = next->d_name.name;
46544 + if (name == next->d_iname) {
46545 + memcpy(d_name, name, next->d_name.len);
46546 + name = d_name;
46547 + }
46548 + if (filldir(dirent, name,
46549 next->d_name.len, filp->f_pos,
46550 next->d_inode->i_ino,
46551 dt_type(next->d_inode)) < 0)
46552 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46553 index 8392cb8..80d6193 100644
46554 --- a/fs/lockd/clntproc.c
46555 +++ b/fs/lockd/clntproc.c
46556 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46557 /*
46558 * Cookie counter for NLM requests
46559 */
46560 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46561 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46562
46563 void nlmclnt_next_cookie(struct nlm_cookie *c)
46564 {
46565 - u32 cookie = atomic_inc_return(&nlm_cookie);
46566 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46567
46568 memcpy(c->data, &cookie, 4);
46569 c->len=4;
46570 diff --git a/fs/locks.c b/fs/locks.c
46571 index 0d68f1f..f216b79 100644
46572 --- a/fs/locks.c
46573 +++ b/fs/locks.c
46574 @@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46575 return;
46576
46577 if (filp->f_op && filp->f_op->flock) {
46578 - struct file_lock fl = {
46579 + struct file_lock flock = {
46580 .fl_pid = current->tgid,
46581 .fl_file = filp,
46582 .fl_flags = FL_FLOCK,
46583 .fl_type = F_UNLCK,
46584 .fl_end = OFFSET_MAX,
46585 };
46586 - filp->f_op->flock(filp, F_SETLKW, &fl);
46587 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46588 - fl.fl_ops->fl_release_private(&fl);
46589 + filp->f_op->flock(filp, F_SETLKW, &flock);
46590 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46591 + flock.fl_ops->fl_release_private(&flock);
46592 }
46593
46594 lock_flocks();
46595 diff --git a/fs/namei.c b/fs/namei.c
46596 index 46ea9cc..c7cf3a3 100644
46597 --- a/fs/namei.c
46598 +++ b/fs/namei.c
46599 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46600 if (ret != -EACCES)
46601 return ret;
46602
46603 +#ifdef CONFIG_GRKERNSEC
46604 + /* we'll block if we have to log due to a denied capability use */
46605 + if (mask & MAY_NOT_BLOCK)
46606 + return -ECHILD;
46607 +#endif
46608 +
46609 if (S_ISDIR(inode->i_mode)) {
46610 /* DACs are overridable for directories */
46611 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46612 - return 0;
46613 if (!(mask & MAY_WRITE))
46614 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46615 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46616 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46617 return 0;
46618 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46619 + return 0;
46620 return -EACCES;
46621 }
46622 /*
46623 + * Searching includes executable on directories, else just read.
46624 + */
46625 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46626 + if (mask == MAY_READ)
46627 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46628 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46629 + return 0;
46630 +
46631 + /*
46632 * Read/write DACs are always overridable.
46633 * Executable DACs are overridable when there is
46634 * at least one exec bit set.
46635 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46636 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46637 return 0;
46638
46639 - /*
46640 - * Searching includes executable on directories, else just read.
46641 - */
46642 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46643 - if (mask == MAY_READ)
46644 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46645 - return 0;
46646 -
46647 return -EACCES;
46648 }
46649
46650 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46651 return error;
46652 }
46653
46654 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46655 + dentry->d_inode, dentry, nd->path.mnt)) {
46656 + error = -EACCES;
46657 + *p = ERR_PTR(error); /* no ->put_link(), please */
46658 + path_put(&nd->path);
46659 + return error;
46660 + }
46661 +
46662 nd->last_type = LAST_BIND;
46663 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46664 error = PTR_ERR(*p);
46665 if (!IS_ERR(*p)) {
46666 - char *s = nd_get_link(nd);
46667 + const char *s = nd_get_link(nd);
46668 error = 0;
46669 if (s)
46670 error = __vfs_follow_link(nd, s);
46671 @@ -1650,6 +1666,21 @@ static int path_lookupat(int dfd, const char *name,
46672 if (!err)
46673 err = complete_walk(nd);
46674
46675 + if (!(nd->flags & LOOKUP_PARENT)) {
46676 +#ifdef CONFIG_GRKERNSEC
46677 + if (flags & LOOKUP_RCU) {
46678 + if (!err)
46679 + path_put(&nd->path);
46680 + err = -ECHILD;
46681 + } else
46682 +#endif
46683 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46684 + if (!err)
46685 + path_put(&nd->path);
46686 + err = -ENOENT;
46687 + }
46688 + }
46689 +
46690 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46691 if (!nd->inode->i_op->lookup) {
46692 path_put(&nd->path);
46693 @@ -1677,6 +1708,15 @@ static int do_path_lookup(int dfd, const char *name,
46694 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46695
46696 if (likely(!retval)) {
46697 + if (*name != '/' && nd->path.dentry && nd->inode) {
46698 +#ifdef CONFIG_GRKERNSEC
46699 + if (flags & LOOKUP_RCU)
46700 + return -ECHILD;
46701 +#endif
46702 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46703 + return -ENOENT;
46704 + }
46705 +
46706 if (unlikely(!audit_dummy_context())) {
46707 if (nd->path.dentry && nd->inode)
46708 audit_inode(name, nd->path.dentry);
46709 @@ -2071,6 +2111,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46710 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46711 return -EPERM;
46712
46713 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46714 + return -EPERM;
46715 + if (gr_handle_rawio(inode))
46716 + return -EPERM;
46717 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46718 + return -EACCES;
46719 +
46720 return 0;
46721 }
46722
46723 @@ -2132,6 +2179,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46724 error = complete_walk(nd);
46725 if (error)
46726 return ERR_PTR(error);
46727 +#ifdef CONFIG_GRKERNSEC
46728 + if (nd->flags & LOOKUP_RCU) {
46729 + error = -ECHILD;
46730 + goto exit;
46731 + }
46732 +#endif
46733 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46734 + error = -ENOENT;
46735 + goto exit;
46736 + }
46737 audit_inode(pathname, nd->path.dentry);
46738 if (open_flag & O_CREAT) {
46739 error = -EISDIR;
46740 @@ -2142,6 +2199,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46741 error = complete_walk(nd);
46742 if (error)
46743 return ERR_PTR(error);
46744 +#ifdef CONFIG_GRKERNSEC
46745 + if (nd->flags & LOOKUP_RCU) {
46746 + error = -ECHILD;
46747 + goto exit;
46748 + }
46749 +#endif
46750 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46751 + error = -ENOENT;
46752 + goto exit;
46753 + }
46754 audit_inode(pathname, dir);
46755 goto ok;
46756 }
46757 @@ -2163,6 +2230,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46758 error = complete_walk(nd);
46759 if (error)
46760 return ERR_PTR(error);
46761 +#ifdef CONFIG_GRKERNSEC
46762 + if (nd->flags & LOOKUP_RCU) {
46763 + error = -ECHILD;
46764 + goto exit;
46765 + }
46766 +#endif
46767 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46768 + error = -ENOENT;
46769 + goto exit;
46770 + }
46771
46772 error = -ENOTDIR;
46773 if (nd->flags & LOOKUP_DIRECTORY) {
46774 @@ -2203,6 +2280,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46775 /* Negative dentry, just create the file */
46776 if (!dentry->d_inode) {
46777 umode_t mode = op->mode;
46778 +
46779 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46780 + error = -EACCES;
46781 + goto exit_mutex_unlock;
46782 + }
46783 +
46784 if (!IS_POSIXACL(dir->d_inode))
46785 mode &= ~current_umask();
46786 /*
46787 @@ -2226,6 +2309,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46788 error = vfs_create(dir->d_inode, dentry, mode, nd);
46789 if (error)
46790 goto exit_mutex_unlock;
46791 + else
46792 + gr_handle_create(path->dentry, path->mnt);
46793 mutex_unlock(&dir->d_inode->i_mutex);
46794 dput(nd->path.dentry);
46795 nd->path.dentry = dentry;
46796 @@ -2235,6 +2320,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46797 /*
46798 * It already exists.
46799 */
46800 +
46801 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46802 + error = -ENOENT;
46803 + goto exit_mutex_unlock;
46804 + }
46805 +
46806 + /* only check if O_CREAT is specified, all other checks need to go
46807 + into may_open */
46808 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46809 + error = -EACCES;
46810 + goto exit_mutex_unlock;
46811 + }
46812 +
46813 mutex_unlock(&dir->d_inode->i_mutex);
46814 audit_inode(pathname, path->dentry);
46815
46816 @@ -2447,6 +2545,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46817 *path = nd.path;
46818 return dentry;
46819 eexist:
46820 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46821 + dput(dentry);
46822 + dentry = ERR_PTR(-ENOENT);
46823 + goto fail;
46824 + }
46825 dput(dentry);
46826 dentry = ERR_PTR(-EEXIST);
46827 fail:
46828 @@ -2469,6 +2572,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46829 }
46830 EXPORT_SYMBOL(user_path_create);
46831
46832 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46833 +{
46834 + char *tmp = getname(pathname);
46835 + struct dentry *res;
46836 + if (IS_ERR(tmp))
46837 + return ERR_CAST(tmp);
46838 + res = kern_path_create(dfd, tmp, path, is_dir);
46839 + if (IS_ERR(res))
46840 + putname(tmp);
46841 + else
46842 + *to = tmp;
46843 + return res;
46844 +}
46845 +
46846 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46847 {
46848 int error = may_create(dir, dentry);
46849 @@ -2536,6 +2653,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46850 error = mnt_want_write(path.mnt);
46851 if (error)
46852 goto out_dput;
46853 +
46854 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46855 + error = -EPERM;
46856 + goto out_drop_write;
46857 + }
46858 +
46859 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46860 + error = -EACCES;
46861 + goto out_drop_write;
46862 + }
46863 +
46864 error = security_path_mknod(&path, dentry, mode, dev);
46865 if (error)
46866 goto out_drop_write;
46867 @@ -2553,6 +2681,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46868 }
46869 out_drop_write:
46870 mnt_drop_write(path.mnt);
46871 +
46872 + if (!error)
46873 + gr_handle_create(dentry, path.mnt);
46874 out_dput:
46875 dput(dentry);
46876 mutex_unlock(&path.dentry->d_inode->i_mutex);
46877 @@ -2602,12 +2733,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46878 error = mnt_want_write(path.mnt);
46879 if (error)
46880 goto out_dput;
46881 +
46882 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46883 + error = -EACCES;
46884 + goto out_drop_write;
46885 + }
46886 +
46887 error = security_path_mkdir(&path, dentry, mode);
46888 if (error)
46889 goto out_drop_write;
46890 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46891 out_drop_write:
46892 mnt_drop_write(path.mnt);
46893 +
46894 + if (!error)
46895 + gr_handle_create(dentry, path.mnt);
46896 out_dput:
46897 dput(dentry);
46898 mutex_unlock(&path.dentry->d_inode->i_mutex);
46899 @@ -2687,6 +2827,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46900 char * name;
46901 struct dentry *dentry;
46902 struct nameidata nd;
46903 + ino_t saved_ino = 0;
46904 + dev_t saved_dev = 0;
46905
46906 error = user_path_parent(dfd, pathname, &nd, &name);
46907 if (error)
46908 @@ -2715,6 +2857,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46909 error = -ENOENT;
46910 goto exit3;
46911 }
46912 +
46913 + saved_ino = dentry->d_inode->i_ino;
46914 + saved_dev = gr_get_dev_from_dentry(dentry);
46915 +
46916 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46917 + error = -EACCES;
46918 + goto exit3;
46919 + }
46920 +
46921 error = mnt_want_write(nd.path.mnt);
46922 if (error)
46923 goto exit3;
46924 @@ -2722,6 +2873,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46925 if (error)
46926 goto exit4;
46927 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46928 + if (!error && (saved_dev || saved_ino))
46929 + gr_handle_delete(saved_ino, saved_dev);
46930 exit4:
46931 mnt_drop_write(nd.path.mnt);
46932 exit3:
46933 @@ -2784,6 +2937,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46934 struct dentry *dentry;
46935 struct nameidata nd;
46936 struct inode *inode = NULL;
46937 + ino_t saved_ino = 0;
46938 + dev_t saved_dev = 0;
46939
46940 error = user_path_parent(dfd, pathname, &nd, &name);
46941 if (error)
46942 @@ -2806,6 +2961,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46943 if (!inode)
46944 goto slashes;
46945 ihold(inode);
46946 +
46947 + if (inode->i_nlink <= 1) {
46948 + saved_ino = inode->i_ino;
46949 + saved_dev = gr_get_dev_from_dentry(dentry);
46950 + }
46951 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46952 + error = -EACCES;
46953 + goto exit2;
46954 + }
46955 +
46956 error = mnt_want_write(nd.path.mnt);
46957 if (error)
46958 goto exit2;
46959 @@ -2813,6 +2978,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46960 if (error)
46961 goto exit3;
46962 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46963 + if (!error && (saved_ino || saved_dev))
46964 + gr_handle_delete(saved_ino, saved_dev);
46965 exit3:
46966 mnt_drop_write(nd.path.mnt);
46967 exit2:
46968 @@ -2888,10 +3055,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46969 error = mnt_want_write(path.mnt);
46970 if (error)
46971 goto out_dput;
46972 +
46973 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46974 + error = -EACCES;
46975 + goto out_drop_write;
46976 + }
46977 +
46978 error = security_path_symlink(&path, dentry, from);
46979 if (error)
46980 goto out_drop_write;
46981 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46982 + if (!error)
46983 + gr_handle_create(dentry, path.mnt);
46984 out_drop_write:
46985 mnt_drop_write(path.mnt);
46986 out_dput:
46987 @@ -2963,6 +3138,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46988 {
46989 struct dentry *new_dentry;
46990 struct path old_path, new_path;
46991 + char *to = NULL;
46992 int how = 0;
46993 int error;
46994
46995 @@ -2986,7 +3162,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46996 if (error)
46997 return error;
46998
46999 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
47000 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
47001 error = PTR_ERR(new_dentry);
47002 if (IS_ERR(new_dentry))
47003 goto out;
47004 @@ -2997,13 +3173,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47005 error = mnt_want_write(new_path.mnt);
47006 if (error)
47007 goto out_dput;
47008 +
47009 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47010 + old_path.dentry->d_inode,
47011 + old_path.dentry->d_inode->i_mode, to)) {
47012 + error = -EACCES;
47013 + goto out_drop_write;
47014 + }
47015 +
47016 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
47017 + old_path.dentry, old_path.mnt, to)) {
47018 + error = -EACCES;
47019 + goto out_drop_write;
47020 + }
47021 +
47022 error = security_path_link(old_path.dentry, &new_path, new_dentry);
47023 if (error)
47024 goto out_drop_write;
47025 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
47026 + if (!error)
47027 + gr_handle_create(new_dentry, new_path.mnt);
47028 out_drop_write:
47029 mnt_drop_write(new_path.mnt);
47030 out_dput:
47031 + putname(to);
47032 dput(new_dentry);
47033 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
47034 path_put(&new_path);
47035 @@ -3231,6 +3424,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47036 if (new_dentry == trap)
47037 goto exit5;
47038
47039 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47040 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
47041 + to);
47042 + if (error)
47043 + goto exit5;
47044 +
47045 error = mnt_want_write(oldnd.path.mnt);
47046 if (error)
47047 goto exit5;
47048 @@ -3240,6 +3439,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47049 goto exit6;
47050 error = vfs_rename(old_dir->d_inode, old_dentry,
47051 new_dir->d_inode, new_dentry);
47052 + if (!error)
47053 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47054 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47055 exit6:
47056 mnt_drop_write(oldnd.path.mnt);
47057 exit5:
47058 @@ -3265,6 +3467,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47059
47060 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47061 {
47062 + char tmpbuf[64];
47063 + const char *newlink;
47064 int len;
47065
47066 len = PTR_ERR(link);
47067 @@ -3274,7 +3478,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47068 len = strlen(link);
47069 if (len > (unsigned) buflen)
47070 len = buflen;
47071 - if (copy_to_user(buffer, link, len))
47072 +
47073 + if (len < sizeof(tmpbuf)) {
47074 + memcpy(tmpbuf, link, len);
47075 + newlink = tmpbuf;
47076 + } else
47077 + newlink = link;
47078 +
47079 + if (copy_to_user(buffer, newlink, len))
47080 len = -EFAULT;
47081 out:
47082 return len;
47083 diff --git a/fs/namespace.c b/fs/namespace.c
47084 index e608199..9609cb9 100644
47085 --- a/fs/namespace.c
47086 +++ b/fs/namespace.c
47087 @@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
47088 if (!(sb->s_flags & MS_RDONLY))
47089 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47090 up_write(&sb->s_umount);
47091 +
47092 + gr_log_remount(mnt->mnt_devname, retval);
47093 +
47094 return retval;
47095 }
47096
47097 @@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
47098 br_write_unlock(vfsmount_lock);
47099 up_write(&namespace_sem);
47100 release_mounts(&umount_list);
47101 +
47102 + gr_log_unmount(mnt->mnt_devname, retval);
47103 +
47104 return retval;
47105 }
47106
47107 @@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47108 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47109 MS_STRICTATIME);
47110
47111 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47112 + retval = -EPERM;
47113 + goto dput_out;
47114 + }
47115 +
47116 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47117 + retval = -EPERM;
47118 + goto dput_out;
47119 + }
47120 +
47121 if (flags & MS_REMOUNT)
47122 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47123 data_page);
47124 @@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47125 dev_name, data_page);
47126 dput_out:
47127 path_put(&path);
47128 +
47129 + gr_log_mount(dev_name, dir_name, retval);
47130 +
47131 return retval;
47132 }
47133
47134 @@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47135 if (error)
47136 goto out2;
47137
47138 + if (gr_handle_chroot_pivot()) {
47139 + error = -EPERM;
47140 + goto out2;
47141 + }
47142 +
47143 get_fs_root(current->fs, &root);
47144 error = lock_mount(&old);
47145 if (error)
47146 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47147 index f649fba..236bf92 100644
47148 --- a/fs/nfs/inode.c
47149 +++ b/fs/nfs/inode.c
47150 @@ -151,7 +151,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47151 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47152 nfsi->attrtimeo_timestamp = jiffies;
47153
47154 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47155 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47156 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47157 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47158 else
47159 @@ -1003,16 +1003,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47160 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47161 }
47162
47163 -static atomic_long_t nfs_attr_generation_counter;
47164 +static atomic_long_unchecked_t nfs_attr_generation_counter;
47165
47166 static unsigned long nfs_read_attr_generation_counter(void)
47167 {
47168 - return atomic_long_read(&nfs_attr_generation_counter);
47169 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47170 }
47171
47172 unsigned long nfs_inc_attr_generation_counter(void)
47173 {
47174 - return atomic_long_inc_return(&nfs_attr_generation_counter);
47175 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47176 }
47177
47178 void nfs_fattr_init(struct nfs_fattr *fattr)
47179 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47180 index b96fe94..a4dbece 100644
47181 --- a/fs/nfsd/vfs.c
47182 +++ b/fs/nfsd/vfs.c
47183 @@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47184 } else {
47185 oldfs = get_fs();
47186 set_fs(KERNEL_DS);
47187 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47188 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47189 set_fs(oldfs);
47190 }
47191
47192 @@ -1029,7 +1029,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47193
47194 /* Write the data. */
47195 oldfs = get_fs(); set_fs(KERNEL_DS);
47196 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47197 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47198 set_fs(oldfs);
47199 if (host_err < 0)
47200 goto out_nfserr;
47201 @@ -1564,7 +1564,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47202 */
47203
47204 oldfs = get_fs(); set_fs(KERNEL_DS);
47205 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
47206 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
47207 set_fs(oldfs);
47208
47209 if (host_err < 0)
47210 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47211 index 3568c8a..e0240d8 100644
47212 --- a/fs/notify/fanotify/fanotify_user.c
47213 +++ b/fs/notify/fanotify/fanotify_user.c
47214 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47215 goto out_close_fd;
47216
47217 ret = -EFAULT;
47218 - if (copy_to_user(buf, &fanotify_event_metadata,
47219 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47220 + copy_to_user(buf, &fanotify_event_metadata,
47221 fanotify_event_metadata.event_len))
47222 goto out_kill_access_response;
47223
47224 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47225 index ee18815..7aa5d01 100644
47226 --- a/fs/notify/notification.c
47227 +++ b/fs/notify/notification.c
47228 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47229 * get set to 0 so it will never get 'freed'
47230 */
47231 static struct fsnotify_event *q_overflow_event;
47232 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47233 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47234
47235 /**
47236 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47237 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47238 */
47239 u32 fsnotify_get_cookie(void)
47240 {
47241 - return atomic_inc_return(&fsnotify_sync_cookie);
47242 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47243 }
47244 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47245
47246 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47247 index 99e3610..02c1068 100644
47248 --- a/fs/ntfs/dir.c
47249 +++ b/fs/ntfs/dir.c
47250 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
47251 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47252 ~(s64)(ndir->itype.index.block_size - 1)));
47253 /* Bounds checks. */
47254 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47255 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47256 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47257 "inode 0x%lx or driver bug.", vdir->i_ino);
47258 goto err_out;
47259 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47260 index c587e2d..3641eaa 100644
47261 --- a/fs/ntfs/file.c
47262 +++ b/fs/ntfs/file.c
47263 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47264 #endif /* NTFS_RW */
47265 };
47266
47267 -const struct file_operations ntfs_empty_file_ops = {};
47268 +const struct file_operations ntfs_empty_file_ops __read_only;
47269
47270 -const struct inode_operations ntfs_empty_inode_ops = {};
47271 +const struct inode_operations ntfs_empty_inode_ops __read_only;
47272 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47273 index 210c352..a174f83 100644
47274 --- a/fs/ocfs2/localalloc.c
47275 +++ b/fs/ocfs2/localalloc.c
47276 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47277 goto bail;
47278 }
47279
47280 - atomic_inc(&osb->alloc_stats.moves);
47281 + atomic_inc_unchecked(&osb->alloc_stats.moves);
47282
47283 bail:
47284 if (handle)
47285 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47286 index d355e6e..578d905 100644
47287 --- a/fs/ocfs2/ocfs2.h
47288 +++ b/fs/ocfs2/ocfs2.h
47289 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
47290
47291 struct ocfs2_alloc_stats
47292 {
47293 - atomic_t moves;
47294 - atomic_t local_data;
47295 - atomic_t bitmap_data;
47296 - atomic_t bg_allocs;
47297 - atomic_t bg_extends;
47298 + atomic_unchecked_t moves;
47299 + atomic_unchecked_t local_data;
47300 + atomic_unchecked_t bitmap_data;
47301 + atomic_unchecked_t bg_allocs;
47302 + atomic_unchecked_t bg_extends;
47303 };
47304
47305 enum ocfs2_local_alloc_state
47306 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47307 index f169da4..9112253 100644
47308 --- a/fs/ocfs2/suballoc.c
47309 +++ b/fs/ocfs2/suballoc.c
47310 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47311 mlog_errno(status);
47312 goto bail;
47313 }
47314 - atomic_inc(&osb->alloc_stats.bg_extends);
47315 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47316
47317 /* You should never ask for this much metadata */
47318 BUG_ON(bits_wanted >
47319 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47320 mlog_errno(status);
47321 goto bail;
47322 }
47323 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47324 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47325
47326 *suballoc_loc = res.sr_bg_blkno;
47327 *suballoc_bit_start = res.sr_bit_offset;
47328 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47329 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47330 res->sr_bits);
47331
47332 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47333 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47334
47335 BUG_ON(res->sr_bits != 1);
47336
47337 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47338 mlog_errno(status);
47339 goto bail;
47340 }
47341 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47342 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47343
47344 BUG_ON(res.sr_bits != 1);
47345
47346 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47347 cluster_start,
47348 num_clusters);
47349 if (!status)
47350 - atomic_inc(&osb->alloc_stats.local_data);
47351 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
47352 } else {
47353 if (min_clusters > (osb->bitmap_cpg - 1)) {
47354 /* The only paths asking for contiguousness
47355 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47356 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47357 res.sr_bg_blkno,
47358 res.sr_bit_offset);
47359 - atomic_inc(&osb->alloc_stats.bitmap_data);
47360 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47361 *num_clusters = res.sr_bits;
47362 }
47363 }
47364 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47365 index 604e12c..8426483 100644
47366 --- a/fs/ocfs2/super.c
47367 +++ b/fs/ocfs2/super.c
47368 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47369 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47370 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47371 "Stats",
47372 - atomic_read(&osb->alloc_stats.bitmap_data),
47373 - atomic_read(&osb->alloc_stats.local_data),
47374 - atomic_read(&osb->alloc_stats.bg_allocs),
47375 - atomic_read(&osb->alloc_stats.moves),
47376 - atomic_read(&osb->alloc_stats.bg_extends));
47377 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47378 + atomic_read_unchecked(&osb->alloc_stats.local_data),
47379 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47380 + atomic_read_unchecked(&osb->alloc_stats.moves),
47381 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47382
47383 out += snprintf(buf + out, len - out,
47384 "%10s => State: %u Descriptor: %llu Size: %u bits "
47385 @@ -2117,11 +2117,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47386 spin_lock_init(&osb->osb_xattr_lock);
47387 ocfs2_init_steal_slots(osb);
47388
47389 - atomic_set(&osb->alloc_stats.moves, 0);
47390 - atomic_set(&osb->alloc_stats.local_data, 0);
47391 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
47392 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
47393 - atomic_set(&osb->alloc_stats.bg_extends, 0);
47394 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47395 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47396 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47397 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47398 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47399
47400 /* Copy the blockcheck stats from the superblock probe */
47401 osb->osb_ecc_stats = *stats;
47402 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47403 index 5d22872..523db20 100644
47404 --- a/fs/ocfs2/symlink.c
47405 +++ b/fs/ocfs2/symlink.c
47406 @@ -142,7 +142,7 @@ bail:
47407
47408 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47409 {
47410 - char *link = nd_get_link(nd);
47411 + const char *link = nd_get_link(nd);
47412 if (!IS_ERR(link))
47413 kfree(link);
47414 }
47415 diff --git a/fs/open.c b/fs/open.c
47416 index 77becc0..aad7bd9 100644
47417 --- a/fs/open.c
47418 +++ b/fs/open.c
47419 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47420 error = locks_verify_truncate(inode, NULL, length);
47421 if (!error)
47422 error = security_path_truncate(&path);
47423 +
47424 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47425 + error = -EACCES;
47426 +
47427 if (!error)
47428 error = do_truncate(path.dentry, length, 0, NULL);
47429
47430 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47431 if (__mnt_is_readonly(path.mnt))
47432 res = -EROFS;
47433
47434 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47435 + res = -EACCES;
47436 +
47437 out_path_release:
47438 path_put(&path);
47439 out:
47440 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47441 if (error)
47442 goto dput_and_out;
47443
47444 + gr_log_chdir(path.dentry, path.mnt);
47445 +
47446 set_fs_pwd(current->fs, &path);
47447
47448 dput_and_out:
47449 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47450 goto out_putf;
47451
47452 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47453 +
47454 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47455 + error = -EPERM;
47456 +
47457 + if (!error)
47458 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47459 +
47460 if (!error)
47461 set_fs_pwd(current->fs, &file->f_path);
47462 out_putf:
47463 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47464 if (error)
47465 goto dput_and_out;
47466
47467 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47468 + goto dput_and_out;
47469 +
47470 set_fs_root(current->fs, &path);
47471 +
47472 + gr_handle_chroot_chdir(&path);
47473 +
47474 error = 0;
47475 dput_and_out:
47476 path_put(&path);
47477 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
47478 if (error)
47479 return error;
47480 mutex_lock(&inode->i_mutex);
47481 +
47482 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
47483 + error = -EACCES;
47484 + goto out_unlock;
47485 + }
47486 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47487 + error = -EACCES;
47488 + goto out_unlock;
47489 + }
47490 +
47491 error = security_path_chmod(path, mode);
47492 if (error)
47493 goto out_unlock;
47494 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47495 int error;
47496 struct iattr newattrs;
47497
47498 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
47499 + return -EACCES;
47500 +
47501 newattrs.ia_valid = ATTR_CTIME;
47502 if (user != (uid_t) -1) {
47503 newattrs.ia_valid |= ATTR_UID;
47504 diff --git a/fs/pipe.c b/fs/pipe.c
47505 index 82e651b..8a68573 100644
47506 --- a/fs/pipe.c
47507 +++ b/fs/pipe.c
47508 @@ -437,9 +437,9 @@ redo:
47509 }
47510 if (bufs) /* More to do? */
47511 continue;
47512 - if (!pipe->writers)
47513 + if (!atomic_read(&pipe->writers))
47514 break;
47515 - if (!pipe->waiting_writers) {
47516 + if (!atomic_read(&pipe->waiting_writers)) {
47517 /* syscall merging: Usually we must not sleep
47518 * if O_NONBLOCK is set, or if we got some data.
47519 * But if a writer sleeps in kernel space, then
47520 @@ -503,7 +503,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47521 mutex_lock(&inode->i_mutex);
47522 pipe = inode->i_pipe;
47523
47524 - if (!pipe->readers) {
47525 + if (!atomic_read(&pipe->readers)) {
47526 send_sig(SIGPIPE, current, 0);
47527 ret = -EPIPE;
47528 goto out;
47529 @@ -552,7 +552,7 @@ redo1:
47530 for (;;) {
47531 int bufs;
47532
47533 - if (!pipe->readers) {
47534 + if (!atomic_read(&pipe->readers)) {
47535 send_sig(SIGPIPE, current, 0);
47536 if (!ret)
47537 ret = -EPIPE;
47538 @@ -643,9 +643,9 @@ redo2:
47539 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47540 do_wakeup = 0;
47541 }
47542 - pipe->waiting_writers++;
47543 + atomic_inc(&pipe->waiting_writers);
47544 pipe_wait(pipe);
47545 - pipe->waiting_writers--;
47546 + atomic_dec(&pipe->waiting_writers);
47547 }
47548 out:
47549 mutex_unlock(&inode->i_mutex);
47550 @@ -712,7 +712,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47551 mask = 0;
47552 if (filp->f_mode & FMODE_READ) {
47553 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47554 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47555 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47556 mask |= POLLHUP;
47557 }
47558
47559 @@ -722,7 +722,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47560 * Most Unices do not set POLLERR for FIFOs but on Linux they
47561 * behave exactly like pipes for poll().
47562 */
47563 - if (!pipe->readers)
47564 + if (!atomic_read(&pipe->readers))
47565 mask |= POLLERR;
47566 }
47567
47568 @@ -736,10 +736,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47569
47570 mutex_lock(&inode->i_mutex);
47571 pipe = inode->i_pipe;
47572 - pipe->readers -= decr;
47573 - pipe->writers -= decw;
47574 + atomic_sub(decr, &pipe->readers);
47575 + atomic_sub(decw, &pipe->writers);
47576
47577 - if (!pipe->readers && !pipe->writers) {
47578 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47579 free_pipe_info(inode);
47580 } else {
47581 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47582 @@ -829,7 +829,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47583
47584 if (inode->i_pipe) {
47585 ret = 0;
47586 - inode->i_pipe->readers++;
47587 + atomic_inc(&inode->i_pipe->readers);
47588 }
47589
47590 mutex_unlock(&inode->i_mutex);
47591 @@ -846,7 +846,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47592
47593 if (inode->i_pipe) {
47594 ret = 0;
47595 - inode->i_pipe->writers++;
47596 + atomic_inc(&inode->i_pipe->writers);
47597 }
47598
47599 mutex_unlock(&inode->i_mutex);
47600 @@ -864,9 +864,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47601 if (inode->i_pipe) {
47602 ret = 0;
47603 if (filp->f_mode & FMODE_READ)
47604 - inode->i_pipe->readers++;
47605 + atomic_inc(&inode->i_pipe->readers);
47606 if (filp->f_mode & FMODE_WRITE)
47607 - inode->i_pipe->writers++;
47608 + atomic_inc(&inode->i_pipe->writers);
47609 }
47610
47611 mutex_unlock(&inode->i_mutex);
47612 @@ -958,7 +958,7 @@ void free_pipe_info(struct inode *inode)
47613 inode->i_pipe = NULL;
47614 }
47615
47616 -static struct vfsmount *pipe_mnt __read_mostly;
47617 +struct vfsmount *pipe_mnt __read_mostly;
47618
47619 /*
47620 * pipefs_dname() is called from d_path().
47621 @@ -988,7 +988,8 @@ static struct inode * get_pipe_inode(void)
47622 goto fail_iput;
47623 inode->i_pipe = pipe;
47624
47625 - pipe->readers = pipe->writers = 1;
47626 + atomic_set(&pipe->readers, 1);
47627 + atomic_set(&pipe->writers, 1);
47628 inode->i_fop = &rdwr_pipefifo_fops;
47629
47630 /*
47631 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47632 index 15af622..0e9f4467 100644
47633 --- a/fs/proc/Kconfig
47634 +++ b/fs/proc/Kconfig
47635 @@ -30,12 +30,12 @@ config PROC_FS
47636
47637 config PROC_KCORE
47638 bool "/proc/kcore support" if !ARM
47639 - depends on PROC_FS && MMU
47640 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47641
47642 config PROC_VMCORE
47643 bool "/proc/vmcore support"
47644 - depends on PROC_FS && CRASH_DUMP
47645 - default y
47646 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47647 + default n
47648 help
47649 Exports the dump image of crashed kernel in ELF format.
47650
47651 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47652 limited in memory.
47653
47654 config PROC_PAGE_MONITOR
47655 - default y
47656 - depends on PROC_FS && MMU
47657 + default n
47658 + depends on PROC_FS && MMU && !GRKERNSEC
47659 bool "Enable /proc page monitoring" if EXPERT
47660 help
47661 Various /proc files exist to monitor process memory utilization:
47662 diff --git a/fs/proc/array.c b/fs/proc/array.c
47663 index c602b8d..a7de642 100644
47664 --- a/fs/proc/array.c
47665 +++ b/fs/proc/array.c
47666 @@ -60,6 +60,7 @@
47667 #include <linux/tty.h>
47668 #include <linux/string.h>
47669 #include <linux/mman.h>
47670 +#include <linux/grsecurity.h>
47671 #include <linux/proc_fs.h>
47672 #include <linux/ioport.h>
47673 #include <linux/uaccess.h>
47674 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47675 seq_putc(m, '\n');
47676 }
47677
47678 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47679 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47680 +{
47681 + if (p->mm)
47682 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47683 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47684 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47685 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47686 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47687 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47688 + else
47689 + seq_printf(m, "PaX:\t-----\n");
47690 +}
47691 +#endif
47692 +
47693 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47694 struct pid *pid, struct task_struct *task)
47695 {
47696 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47697 task_cpus_allowed(m, task);
47698 cpuset_task_status_allowed(m, task);
47699 task_context_switch_counts(m, task);
47700 +
47701 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47702 + task_pax(m, task);
47703 +#endif
47704 +
47705 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47706 + task_grsec_rbac(m, task);
47707 +#endif
47708 +
47709 return 0;
47710 }
47711
47712 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47713 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47714 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47715 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47716 +#endif
47717 +
47718 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47719 struct pid *pid, struct task_struct *task, int whole)
47720 {
47721 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47722 char tcomm[sizeof(task->comm)];
47723 unsigned long flags;
47724
47725 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47726 + if (current->exec_id != m->exec_id) {
47727 + gr_log_badprocpid("stat");
47728 + return 0;
47729 + }
47730 +#endif
47731 +
47732 state = *get_task_state(task);
47733 vsize = eip = esp = 0;
47734 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47735 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47736 gtime = task->gtime;
47737 }
47738
47739 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47740 + if (PAX_RAND_FLAGS(mm)) {
47741 + eip = 0;
47742 + esp = 0;
47743 + wchan = 0;
47744 + }
47745 +#endif
47746 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47747 + wchan = 0;
47748 + eip =0;
47749 + esp =0;
47750 +#endif
47751 +
47752 /* scale priority and nice values from timeslices to -20..20 */
47753 /* to make it look like a "normal" Unix priority/nice value */
47754 priority = task_prio(task);
47755 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47756 vsize,
47757 mm ? get_mm_rss(mm) : 0,
47758 rsslim,
47759 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47760 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
47761 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
47762 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
47763 +#else
47764 mm ? (permitted ? mm->start_code : 1) : 0,
47765 mm ? (permitted ? mm->end_code : 1) : 0,
47766 (permitted && mm) ? mm->start_stack : 0,
47767 +#endif
47768 esp,
47769 eip,
47770 /* The signal information here is obsolete.
47771 @@ -536,8 +593,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47772 struct pid *pid, struct task_struct *task)
47773 {
47774 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47775 - struct mm_struct *mm = get_task_mm(task);
47776 + struct mm_struct *mm;
47777
47778 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47779 + if (current->exec_id != m->exec_id) {
47780 + gr_log_badprocpid("statm");
47781 + return 0;
47782 + }
47783 +#endif
47784 + mm = get_task_mm(task);
47785 if (mm) {
47786 size = task_statm(mm, &shared, &text, &data, &resident);
47787 mmput(mm);
47788 @@ -547,3 +611,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47789
47790 return 0;
47791 }
47792 +
47793 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47794 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47795 +{
47796 + u32 curr_ip = 0;
47797 + unsigned long flags;
47798 +
47799 + if (lock_task_sighand(task, &flags)) {
47800 + curr_ip = task->signal->curr_ip;
47801 + unlock_task_sighand(task, &flags);
47802 + }
47803 +
47804 + return sprintf(buffer, "%pI4\n", &curr_ip);
47805 +}
47806 +#endif
47807 diff --git a/fs/proc/base.c b/fs/proc/base.c
47808 index d4548dd..d101f84 100644
47809 --- a/fs/proc/base.c
47810 +++ b/fs/proc/base.c
47811 @@ -109,6 +109,14 @@ struct pid_entry {
47812 union proc_op op;
47813 };
47814
47815 +struct getdents_callback {
47816 + struct linux_dirent __user * current_dir;
47817 + struct linux_dirent __user * previous;
47818 + struct file * file;
47819 + int count;
47820 + int error;
47821 +};
47822 +
47823 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47824 .name = (NAME), \
47825 .len = sizeof(NAME) - 1, \
47826 @@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47827 if (!mm->arg_end)
47828 goto out_mm; /* Shh! No looking before we're done */
47829
47830 + if (gr_acl_handle_procpidmem(task))
47831 + goto out_mm;
47832 +
47833 len = mm->arg_end - mm->arg_start;
47834
47835 if (len > PAGE_SIZE)
47836 @@ -240,12 +251,28 @@ out:
47837 return res;
47838 }
47839
47840 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47841 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47842 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47843 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47844 +#endif
47845 +
47846 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47847 {
47848 struct mm_struct *mm = mm_for_maps(task);
47849 int res = PTR_ERR(mm);
47850 if (mm && !IS_ERR(mm)) {
47851 unsigned int nwords = 0;
47852 +
47853 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47854 + /* allow if we're currently ptracing this task */
47855 + if (PAX_RAND_FLAGS(mm) &&
47856 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47857 + mmput(mm);
47858 + return 0;
47859 + }
47860 +#endif
47861 +
47862 do {
47863 nwords += 2;
47864 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47865 @@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47866 }
47867
47868
47869 -#ifdef CONFIG_KALLSYMS
47870 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47871 /*
47872 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47873 * Returns the resolved symbol. If that fails, simply return the address.
47874 @@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
47875 mutex_unlock(&task->signal->cred_guard_mutex);
47876 }
47877
47878 -#ifdef CONFIG_STACKTRACE
47879 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47880
47881 #define MAX_STACK_TRACE_DEPTH 64
47882
47883 @@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47884 return count;
47885 }
47886
47887 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47888 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47889 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47890 {
47891 long nr;
47892 @@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47893 /************************************************************************/
47894
47895 /* permission checks */
47896 -static int proc_fd_access_allowed(struct inode *inode)
47897 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47898 {
47899 struct task_struct *task;
47900 int allowed = 0;
47901 @@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47902 */
47903 task = get_proc_task(inode);
47904 if (task) {
47905 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47906 + if (log)
47907 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47908 + else
47909 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47910 put_task_struct(task);
47911 }
47912 return allowed;
47913 @@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47914 struct task_struct *task,
47915 int hide_pid_min)
47916 {
47917 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47918 + return false;
47919 +
47920 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47921 + rcu_read_lock();
47922 + {
47923 + const struct cred *tmpcred = current_cred();
47924 + const struct cred *cred = __task_cred(task);
47925 +
47926 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47927 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47928 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47929 +#endif
47930 + ) {
47931 + rcu_read_unlock();
47932 + return true;
47933 + }
47934 + }
47935 + rcu_read_unlock();
47936 +
47937 + if (!pid->hide_pid)
47938 + return false;
47939 +#endif
47940 +
47941 if (pid->hide_pid < hide_pid_min)
47942 return true;
47943 if (in_group_p(pid->pid_gid))
47944 return true;
47945 +
47946 return ptrace_may_access(task, PTRACE_MODE_READ);
47947 }
47948
47949 @@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47950 put_task_struct(task);
47951
47952 if (!has_perms) {
47953 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47954 + {
47955 +#else
47956 if (pid->hide_pid == 2) {
47957 +#endif
47958 /*
47959 * Let's make getdents(), stat(), and open()
47960 * consistent with each other. If a process
47961 @@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
47962 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47963 file->private_data = mm;
47964
47965 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47966 + file->f_version = current->exec_id;
47967 +#endif
47968 +
47969 return 0;
47970 }
47971
47972 @@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47973 ssize_t copied;
47974 char *page;
47975
47976 +#ifdef CONFIG_GRKERNSEC
47977 + if (write)
47978 + return -EPERM;
47979 +#endif
47980 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47981 + if (file->f_version != current->exec_id) {
47982 + gr_log_badprocpid("mem");
47983 + return 0;
47984 + }
47985 +#endif
47986 +
47987 if (!mm)
47988 return 0;
47989
47990 @@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47991 if (!task)
47992 goto out_no_task;
47993
47994 + if (gr_acl_handle_procpidmem(task))
47995 + goto out;
47996 +
47997 ret = -ENOMEM;
47998 page = (char *)__get_free_page(GFP_TEMPORARY);
47999 if (!page)
48000 @@ -1434,7 +1511,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48001 path_put(&nd->path);
48002
48003 /* Are we allowed to snoop on the tasks file descriptors? */
48004 - if (!proc_fd_access_allowed(inode))
48005 + if (!proc_fd_access_allowed(inode, 0))
48006 goto out;
48007
48008 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
48009 @@ -1473,8 +1550,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48010 struct path path;
48011
48012 /* Are we allowed to snoop on the tasks file descriptors? */
48013 - if (!proc_fd_access_allowed(inode))
48014 - goto out;
48015 + /* logging this is needed for learning on chromium to work properly,
48016 + but we don't want to flood the logs from 'ps' which does a readlink
48017 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48018 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
48019 + */
48020 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48021 + if (!proc_fd_access_allowed(inode,0))
48022 + goto out;
48023 + } else {
48024 + if (!proc_fd_access_allowed(inode,1))
48025 + goto out;
48026 + }
48027
48028 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
48029 if (error)
48030 @@ -1539,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48031 rcu_read_lock();
48032 cred = __task_cred(task);
48033 inode->i_uid = cred->euid;
48034 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48035 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48036 +#else
48037 inode->i_gid = cred->egid;
48038 +#endif
48039 rcu_read_unlock();
48040 }
48041 security_task_to_inode(task, inode);
48042 @@ -1575,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48043 return -ENOENT;
48044 }
48045 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48046 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48047 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48048 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48049 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48050 +#endif
48051 task_dumpable(task)) {
48052 cred = __task_cred(task);
48053 stat->uid = cred->euid;
48054 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48055 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48056 +#else
48057 stat->gid = cred->egid;
48058 +#endif
48059 }
48060 }
48061 rcu_read_unlock();
48062 @@ -1616,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48063
48064 if (task) {
48065 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48066 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48067 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48068 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48069 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48070 +#endif
48071 task_dumpable(task)) {
48072 rcu_read_lock();
48073 cred = __task_cred(task);
48074 inode->i_uid = cred->euid;
48075 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48076 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48077 +#else
48078 inode->i_gid = cred->egid;
48079 +#endif
48080 rcu_read_unlock();
48081 } else {
48082 inode->i_uid = 0;
48083 @@ -1738,7 +1847,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48084 int fd = proc_fd(inode);
48085
48086 if (task) {
48087 - files = get_files_struct(task);
48088 + if (!gr_acl_handle_procpidmem(task))
48089 + files = get_files_struct(task);
48090 put_task_struct(task);
48091 }
48092 if (files) {
48093 @@ -2355,11 +2465,21 @@ static const struct file_operations proc_map_files_operations = {
48094 */
48095 static int proc_fd_permission(struct inode *inode, int mask)
48096 {
48097 + struct task_struct *task;
48098 int rv = generic_permission(inode, mask);
48099 - if (rv == 0)
48100 - return 0;
48101 +
48102 if (task_pid(current) == proc_pid(inode))
48103 rv = 0;
48104 +
48105 + task = get_proc_task(inode);
48106 + if (task == NULL)
48107 + return rv;
48108 +
48109 + if (gr_acl_handle_procpidmem(task))
48110 + rv = -EACCES;
48111 +
48112 + put_task_struct(task);
48113 +
48114 return rv;
48115 }
48116
48117 @@ -2469,6 +2589,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48118 if (!task)
48119 goto out_no_task;
48120
48121 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48122 + goto out;
48123 +
48124 /*
48125 * Yes, it does not scale. And it should not. Don't add
48126 * new entries into /proc/<tgid>/ without very good reasons.
48127 @@ -2513,6 +2636,9 @@ static int proc_pident_readdir(struct file *filp,
48128 if (!task)
48129 goto out_no_task;
48130
48131 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48132 + goto out;
48133 +
48134 ret = 0;
48135 i = filp->f_pos;
48136 switch (i) {
48137 @@ -2783,7 +2909,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48138 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48139 void *cookie)
48140 {
48141 - char *s = nd_get_link(nd);
48142 + const char *s = nd_get_link(nd);
48143 if (!IS_ERR(s))
48144 __putname(s);
48145 }
48146 @@ -2984,7 +3110,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48147 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48148 #endif
48149 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48150 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48151 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48152 INF("syscall", S_IRUGO, proc_pid_syscall),
48153 #endif
48154 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48155 @@ -3009,10 +3135,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48156 #ifdef CONFIG_SECURITY
48157 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48158 #endif
48159 -#ifdef CONFIG_KALLSYMS
48160 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48161 INF("wchan", S_IRUGO, proc_pid_wchan),
48162 #endif
48163 -#ifdef CONFIG_STACKTRACE
48164 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48165 ONE("stack", S_IRUGO, proc_pid_stack),
48166 #endif
48167 #ifdef CONFIG_SCHEDSTATS
48168 @@ -3046,6 +3172,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48169 #ifdef CONFIG_HARDWALL
48170 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48171 #endif
48172 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48173 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48174 +#endif
48175 };
48176
48177 static int proc_tgid_base_readdir(struct file * filp,
48178 @@ -3172,7 +3301,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48179 if (!inode)
48180 goto out;
48181
48182 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48183 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48184 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48185 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48186 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48187 +#else
48188 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48189 +#endif
48190 inode->i_op = &proc_tgid_base_inode_operations;
48191 inode->i_fop = &proc_tgid_base_operations;
48192 inode->i_flags|=S_IMMUTABLE;
48193 @@ -3214,7 +3350,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48194 if (!task)
48195 goto out;
48196
48197 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48198 + goto out_put_task;
48199 +
48200 result = proc_pid_instantiate(dir, dentry, task, NULL);
48201 +out_put_task:
48202 put_task_struct(task);
48203 out:
48204 return result;
48205 @@ -3277,6 +3417,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
48206 static int fake_filldir(void *buf, const char *name, int namelen,
48207 loff_t offset, u64 ino, unsigned d_type)
48208 {
48209 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
48210 + __buf->error = -EINVAL;
48211 return 0;
48212 }
48213
48214 @@ -3343,7 +3485,7 @@ static const struct pid_entry tid_base_stuff[] = {
48215 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48216 #endif
48217 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48218 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48219 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48220 INF("syscall", S_IRUGO, proc_pid_syscall),
48221 #endif
48222 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48223 @@ -3367,10 +3509,10 @@ static const struct pid_entry tid_base_stuff[] = {
48224 #ifdef CONFIG_SECURITY
48225 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48226 #endif
48227 -#ifdef CONFIG_KALLSYMS
48228 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48229 INF("wchan", S_IRUGO, proc_pid_wchan),
48230 #endif
48231 -#ifdef CONFIG_STACKTRACE
48232 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48233 ONE("stack", S_IRUGO, proc_pid_stack),
48234 #endif
48235 #ifdef CONFIG_SCHEDSTATS
48236 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48237 index 82676e3..5f8518a 100644
48238 --- a/fs/proc/cmdline.c
48239 +++ b/fs/proc/cmdline.c
48240 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48241
48242 static int __init proc_cmdline_init(void)
48243 {
48244 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48245 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48246 +#else
48247 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48248 +#endif
48249 return 0;
48250 }
48251 module_init(proc_cmdline_init);
48252 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48253 index b143471..bb105e5 100644
48254 --- a/fs/proc/devices.c
48255 +++ b/fs/proc/devices.c
48256 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48257
48258 static int __init proc_devices_init(void)
48259 {
48260 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48261 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48262 +#else
48263 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48264 +#endif
48265 return 0;
48266 }
48267 module_init(proc_devices_init);
48268 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48269 index 84fd323..f698a32 100644
48270 --- a/fs/proc/inode.c
48271 +++ b/fs/proc/inode.c
48272 @@ -21,12 +21,18 @@
48273 #include <linux/seq_file.h>
48274 #include <linux/slab.h>
48275 #include <linux/mount.h>
48276 +#include <linux/grsecurity.h>
48277
48278 #include <asm/system.h>
48279 #include <asm/uaccess.h>
48280
48281 #include "internal.h"
48282
48283 +#ifdef CONFIG_PROC_SYSCTL
48284 +extern const struct inode_operations proc_sys_inode_operations;
48285 +extern const struct inode_operations proc_sys_dir_operations;
48286 +#endif
48287 +
48288 static void proc_evict_inode(struct inode *inode)
48289 {
48290 struct proc_dir_entry *de;
48291 @@ -52,6 +58,13 @@ static void proc_evict_inode(struct inode *inode)
48292 ns_ops = PROC_I(inode)->ns_ops;
48293 if (ns_ops && ns_ops->put)
48294 ns_ops->put(PROC_I(inode)->ns);
48295 +
48296 +#ifdef CONFIG_PROC_SYSCTL
48297 + if (inode->i_op == &proc_sys_inode_operations ||
48298 + inode->i_op == &proc_sys_dir_operations)
48299 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48300 +#endif
48301 +
48302 }
48303
48304 static struct kmem_cache * proc_inode_cachep;
48305 @@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48306 if (de->mode) {
48307 inode->i_mode = de->mode;
48308 inode->i_uid = de->uid;
48309 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48310 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48311 +#else
48312 inode->i_gid = de->gid;
48313 +#endif
48314 }
48315 if (de->size)
48316 inode->i_size = de->size;
48317 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48318 index 2925775..4f08fae 100644
48319 --- a/fs/proc/internal.h
48320 +++ b/fs/proc/internal.h
48321 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48322 struct pid *pid, struct task_struct *task);
48323 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48324 struct pid *pid, struct task_struct *task);
48325 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48326 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48327 +#endif
48328 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48329
48330 extern const struct file_operations proc_maps_operations;
48331 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48332 index d245cb2..f4e8498 100644
48333 --- a/fs/proc/kcore.c
48334 +++ b/fs/proc/kcore.c
48335 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48336 * the addresses in the elf_phdr on our list.
48337 */
48338 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48339 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48340 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48341 + if (tsz > buflen)
48342 tsz = buflen;
48343 -
48344 +
48345 while (buflen) {
48346 struct kcore_list *m;
48347
48348 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48349 kfree(elf_buf);
48350 } else {
48351 if (kern_addr_valid(start)) {
48352 - unsigned long n;
48353 + char *elf_buf;
48354 + mm_segment_t oldfs;
48355
48356 - n = copy_to_user(buffer, (char *)start, tsz);
48357 - /*
48358 - * We cannot distingush between fault on source
48359 - * and fault on destination. When this happens
48360 - * we clear too and hope it will trigger the
48361 - * EFAULT again.
48362 - */
48363 - if (n) {
48364 - if (clear_user(buffer + tsz - n,
48365 - n))
48366 + elf_buf = kmalloc(tsz, GFP_KERNEL);
48367 + if (!elf_buf)
48368 + return -ENOMEM;
48369 + oldfs = get_fs();
48370 + set_fs(KERNEL_DS);
48371 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48372 + set_fs(oldfs);
48373 + if (copy_to_user(buffer, elf_buf, tsz)) {
48374 + kfree(elf_buf);
48375 return -EFAULT;
48376 + }
48377 }
48378 + set_fs(oldfs);
48379 + kfree(elf_buf);
48380 } else {
48381 if (clear_user(buffer, tsz))
48382 return -EFAULT;
48383 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48384
48385 static int open_kcore(struct inode *inode, struct file *filp)
48386 {
48387 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48388 + return -EPERM;
48389 +#endif
48390 if (!capable(CAP_SYS_RAWIO))
48391 return -EPERM;
48392 if (kcore_need_update)
48393 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48394 index 80e4645..53e5fcf 100644
48395 --- a/fs/proc/meminfo.c
48396 +++ b/fs/proc/meminfo.c
48397 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48398 vmi.used >> 10,
48399 vmi.largest_chunk >> 10
48400 #ifdef CONFIG_MEMORY_FAILURE
48401 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48402 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48403 #endif
48404 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48405 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48406 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48407 index b1822dd..df622cb 100644
48408 --- a/fs/proc/nommu.c
48409 +++ b/fs/proc/nommu.c
48410 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48411 if (len < 1)
48412 len = 1;
48413 seq_printf(m, "%*c", len, ' ');
48414 - seq_path(m, &file->f_path, "");
48415 + seq_path(m, &file->f_path, "\n\\");
48416 }
48417
48418 seq_putc(m, '\n');
48419 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48420 index 06e1cc1..177cd98 100644
48421 --- a/fs/proc/proc_net.c
48422 +++ b/fs/proc/proc_net.c
48423 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48424 struct task_struct *task;
48425 struct nsproxy *ns;
48426 struct net *net = NULL;
48427 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48428 + const struct cred *cred = current_cred();
48429 +#endif
48430 +
48431 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48432 + if (cred->fsuid)
48433 + return net;
48434 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48435 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48436 + return net;
48437 +#endif
48438
48439 rcu_read_lock();
48440 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48441 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48442 index 53c3bce..10ad159 100644
48443 --- a/fs/proc/proc_sysctl.c
48444 +++ b/fs/proc/proc_sysctl.c
48445 @@ -9,11 +9,13 @@
48446 #include <linux/namei.h>
48447 #include "internal.h"
48448
48449 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
48450 +
48451 static const struct dentry_operations proc_sys_dentry_operations;
48452 static const struct file_operations proc_sys_file_operations;
48453 -static const struct inode_operations proc_sys_inode_operations;
48454 +const struct inode_operations proc_sys_inode_operations;
48455 static const struct file_operations proc_sys_dir_file_operations;
48456 -static const struct inode_operations proc_sys_dir_operations;
48457 +const struct inode_operations proc_sys_dir_operations;
48458
48459 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48460 {
48461 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48462
48463 err = NULL;
48464 d_set_d_op(dentry, &proc_sys_dentry_operations);
48465 +
48466 + gr_handle_proc_create(dentry, inode);
48467 +
48468 d_add(dentry, inode);
48469
48470 + if (gr_handle_sysctl(p, MAY_EXEC))
48471 + err = ERR_PTR(-ENOENT);
48472 +
48473 out:
48474 sysctl_head_finish(head);
48475 return err;
48476 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48477 if (!table->proc_handler)
48478 goto out;
48479
48480 +#ifdef CONFIG_GRKERNSEC
48481 + error = -EPERM;
48482 + if (write && !capable(CAP_SYS_ADMIN))
48483 + goto out;
48484 +#endif
48485 +
48486 /* careful: calling conventions are nasty here */
48487 res = count;
48488 error = table->proc_handler(table, write, buf, &res, ppos);
48489 @@ -260,6 +274,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48490 return -ENOMEM;
48491 } else {
48492 d_set_d_op(child, &proc_sys_dentry_operations);
48493 +
48494 + gr_handle_proc_create(child, inode);
48495 +
48496 d_add(child, inode);
48497 }
48498 } else {
48499 @@ -288,6 +305,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48500 if (*pos < file->f_pos)
48501 continue;
48502
48503 + if (gr_handle_sysctl(table, 0))
48504 + continue;
48505 +
48506 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
48507 if (res)
48508 return res;
48509 @@ -413,6 +433,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48510 if (IS_ERR(head))
48511 return PTR_ERR(head);
48512
48513 + if (table && gr_handle_sysctl(table, MAY_EXEC))
48514 + return -ENOENT;
48515 +
48516 generic_fillattr(inode, stat);
48517 if (table)
48518 stat->mode = (stat->mode & S_IFMT) | table->mode;
48519 @@ -435,13 +458,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48520 .llseek = generic_file_llseek,
48521 };
48522
48523 -static const struct inode_operations proc_sys_inode_operations = {
48524 +const struct inode_operations proc_sys_inode_operations = {
48525 .permission = proc_sys_permission,
48526 .setattr = proc_sys_setattr,
48527 .getattr = proc_sys_getattr,
48528 };
48529
48530 -static const struct inode_operations proc_sys_dir_operations = {
48531 +const struct inode_operations proc_sys_dir_operations = {
48532 .lookup = proc_sys_lookup,
48533 .permission = proc_sys_permission,
48534 .setattr = proc_sys_setattr,
48535 diff --git a/fs/proc/root.c b/fs/proc/root.c
48536 index 46a15d8..335631a 100644
48537 --- a/fs/proc/root.c
48538 +++ b/fs/proc/root.c
48539 @@ -187,7 +187,15 @@ void __init proc_root_init(void)
48540 #ifdef CONFIG_PROC_DEVICETREE
48541 proc_device_tree_init();
48542 #endif
48543 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48544 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48545 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48546 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48547 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48548 +#endif
48549 +#else
48550 proc_mkdir("bus", NULL);
48551 +#endif
48552 proc_sys_init();
48553 }
48554
48555 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48556 index 3efa725..23c925b 100644
48557 --- a/fs/proc/task_mmu.c
48558 +++ b/fs/proc/task_mmu.c
48559 @@ -11,6 +11,7 @@
48560 #include <linux/rmap.h>
48561 #include <linux/swap.h>
48562 #include <linux/swapops.h>
48563 +#include <linux/grsecurity.h>
48564
48565 #include <asm/elf.h>
48566 #include <asm/uaccess.h>
48567 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48568 "VmExe:\t%8lu kB\n"
48569 "VmLib:\t%8lu kB\n"
48570 "VmPTE:\t%8lu kB\n"
48571 - "VmSwap:\t%8lu kB\n",
48572 - hiwater_vm << (PAGE_SHIFT-10),
48573 + "VmSwap:\t%8lu kB\n"
48574 +
48575 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48576 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48577 +#endif
48578 +
48579 + ,hiwater_vm << (PAGE_SHIFT-10),
48580 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48581 mm->locked_vm << (PAGE_SHIFT-10),
48582 mm->pinned_vm << (PAGE_SHIFT-10),
48583 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48584 data << (PAGE_SHIFT-10),
48585 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48586 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48587 - swap << (PAGE_SHIFT-10));
48588 + swap << (PAGE_SHIFT-10)
48589 +
48590 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48591 + , mm->context.user_cs_base, mm->context.user_cs_limit
48592 +#endif
48593 +
48594 + );
48595 }
48596
48597 unsigned long task_vsize(struct mm_struct *mm)
48598 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
48599 return ret;
48600 }
48601
48602 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48603 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48604 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48605 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48606 +#endif
48607 +
48608 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48609 {
48610 struct mm_struct *mm = vma->vm_mm;
48611 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48612 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48613 }
48614
48615 - /* We don't show the stack guard page in /proc/maps */
48616 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48617 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48618 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48619 +#else
48620 start = vma->vm_start;
48621 - if (stack_guard_page_start(vma, start))
48622 - start += PAGE_SIZE;
48623 end = vma->vm_end;
48624 - if (stack_guard_page_end(vma, end))
48625 - end -= PAGE_SIZE;
48626 +#endif
48627
48628 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48629 start,
48630 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48631 flags & VM_WRITE ? 'w' : '-',
48632 flags & VM_EXEC ? 'x' : '-',
48633 flags & VM_MAYSHARE ? 's' : 'p',
48634 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48635 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48636 +#else
48637 pgoff,
48638 +#endif
48639 MAJOR(dev), MINOR(dev), ino, &len);
48640
48641 /*
48642 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48643 */
48644 if (file) {
48645 pad_len_spaces(m, len);
48646 - seq_path(m, &file->f_path, "\n");
48647 + seq_path(m, &file->f_path, "\n\\");
48648 } else {
48649 const char *name = arch_vma_name(vma);
48650 if (!name) {
48651 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48652 if (vma->vm_start <= mm->brk &&
48653 vma->vm_end >= mm->start_brk) {
48654 name = "[heap]";
48655 - } else if (vma->vm_start <= mm->start_stack &&
48656 - vma->vm_end >= mm->start_stack) {
48657 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48658 + (vma->vm_start <= mm->start_stack &&
48659 + vma->vm_end >= mm->start_stack)) {
48660 name = "[stack]";
48661 }
48662 } else {
48663 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
48664 struct proc_maps_private *priv = m->private;
48665 struct task_struct *task = priv->task;
48666
48667 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48668 + if (current->exec_id != m->exec_id) {
48669 + gr_log_badprocpid("maps");
48670 + return 0;
48671 + }
48672 +#endif
48673 +
48674 show_map_vma(m, vma);
48675
48676 if (m->count < m->size) /* vma is copied successfully */
48677 @@ -437,12 +467,23 @@ static int show_smap(struct seq_file *m, void *v)
48678 .private = &mss,
48679 };
48680
48681 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48682 + if (current->exec_id != m->exec_id) {
48683 + gr_log_badprocpid("smaps");
48684 + return 0;
48685 + }
48686 +#endif
48687 memset(&mss, 0, sizeof mss);
48688 - mss.vma = vma;
48689 - /* mmap_sem is held in m_start */
48690 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48691 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48692 -
48693 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48694 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48695 +#endif
48696 + mss.vma = vma;
48697 + /* mmap_sem is held in m_start */
48698 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48699 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48700 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48701 + }
48702 +#endif
48703 show_map_vma(m, vma);
48704
48705 seq_printf(m,
48706 @@ -460,7 +501,11 @@ static int show_smap(struct seq_file *m, void *v)
48707 "KernelPageSize: %8lu kB\n"
48708 "MMUPageSize: %8lu kB\n"
48709 "Locked: %8lu kB\n",
48710 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48711 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48712 +#else
48713 (vma->vm_end - vma->vm_start) >> 10,
48714 +#endif
48715 mss.resident >> 10,
48716 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48717 mss.shared_clean >> 10,
48718 @@ -1024,6 +1069,13 @@ static int show_numa_map(struct seq_file *m, void *v)
48719 int n;
48720 char buffer[50];
48721
48722 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48723 + if (current->exec_id != m->exec_id) {
48724 + gr_log_badprocpid("numa_maps");
48725 + return 0;
48726 + }
48727 +#endif
48728 +
48729 if (!mm)
48730 return 0;
48731
48732 @@ -1041,11 +1093,15 @@ static int show_numa_map(struct seq_file *m, void *v)
48733 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48734 mpol_cond_put(pol);
48735
48736 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48737 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48738 +#else
48739 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48740 +#endif
48741
48742 if (file) {
48743 seq_printf(m, " file=");
48744 - seq_path(m, &file->f_path, "\n\t= ");
48745 + seq_path(m, &file->f_path, "\n\t\\= ");
48746 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48747 seq_printf(m, " heap");
48748 } else if (vma->vm_start <= mm->start_stack &&
48749 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48750 index 980de54..2a4db5f 100644
48751 --- a/fs/proc/task_nommu.c
48752 +++ b/fs/proc/task_nommu.c
48753 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48754 else
48755 bytes += kobjsize(mm);
48756
48757 - if (current->fs && current->fs->users > 1)
48758 + if (current->fs && atomic_read(&current->fs->users) > 1)
48759 sbytes += kobjsize(current->fs);
48760 else
48761 bytes += kobjsize(current->fs);
48762 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
48763
48764 if (file) {
48765 pad_len_spaces(m, len);
48766 - seq_path(m, &file->f_path, "");
48767 + seq_path(m, &file->f_path, "\n\\");
48768 } else if (mm) {
48769 if (vma->vm_start <= mm->start_stack &&
48770 vma->vm_end >= mm->start_stack) {
48771 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48772 index d67908b..d13f6a6 100644
48773 --- a/fs/quota/netlink.c
48774 +++ b/fs/quota/netlink.c
48775 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48776 void quota_send_warning(short type, unsigned int id, dev_t dev,
48777 const char warntype)
48778 {
48779 - static atomic_t seq;
48780 + static atomic_unchecked_t seq;
48781 struct sk_buff *skb;
48782 void *msg_head;
48783 int ret;
48784 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48785 "VFS: Not enough memory to send quota warning.\n");
48786 return;
48787 }
48788 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48789 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48790 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48791 if (!msg_head) {
48792 printk(KERN_ERR
48793 diff --git a/fs/readdir.c b/fs/readdir.c
48794 index 356f715..c918d38 100644
48795 --- a/fs/readdir.c
48796 +++ b/fs/readdir.c
48797 @@ -17,6 +17,7 @@
48798 #include <linux/security.h>
48799 #include <linux/syscalls.h>
48800 #include <linux/unistd.h>
48801 +#include <linux/namei.h>
48802
48803 #include <asm/uaccess.h>
48804
48805 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48806
48807 struct readdir_callback {
48808 struct old_linux_dirent __user * dirent;
48809 + struct file * file;
48810 int result;
48811 };
48812
48813 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48814 buf->result = -EOVERFLOW;
48815 return -EOVERFLOW;
48816 }
48817 +
48818 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48819 + return 0;
48820 +
48821 buf->result++;
48822 dirent = buf->dirent;
48823 if (!access_ok(VERIFY_WRITE, dirent,
48824 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48825
48826 buf.result = 0;
48827 buf.dirent = dirent;
48828 + buf.file = file;
48829
48830 error = vfs_readdir(file, fillonedir, &buf);
48831 if (buf.result)
48832 @@ -142,6 +149,7 @@ struct linux_dirent {
48833 struct getdents_callback {
48834 struct linux_dirent __user * current_dir;
48835 struct linux_dirent __user * previous;
48836 + struct file * file;
48837 int count;
48838 int error;
48839 };
48840 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48841 buf->error = -EOVERFLOW;
48842 return -EOVERFLOW;
48843 }
48844 +
48845 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48846 + return 0;
48847 +
48848 dirent = buf->previous;
48849 if (dirent) {
48850 if (__put_user(offset, &dirent->d_off))
48851 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48852 buf.previous = NULL;
48853 buf.count = count;
48854 buf.error = 0;
48855 + buf.file = file;
48856
48857 error = vfs_readdir(file, filldir, &buf);
48858 if (error >= 0)
48859 @@ -229,6 +242,7 @@ out:
48860 struct getdents_callback64 {
48861 struct linux_dirent64 __user * current_dir;
48862 struct linux_dirent64 __user * previous;
48863 + struct file *file;
48864 int count;
48865 int error;
48866 };
48867 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48868 buf->error = -EINVAL; /* only used if we fail.. */
48869 if (reclen > buf->count)
48870 return -EINVAL;
48871 +
48872 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48873 + return 0;
48874 +
48875 dirent = buf->previous;
48876 if (dirent) {
48877 if (__put_user(offset, &dirent->d_off))
48878 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48879
48880 buf.current_dir = dirent;
48881 buf.previous = NULL;
48882 + buf.file = file;
48883 buf.count = count;
48884 buf.error = 0;
48885
48886 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48887 error = buf.error;
48888 lastdirent = buf.previous;
48889 if (lastdirent) {
48890 - typeof(lastdirent->d_off) d_off = file->f_pos;
48891 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48892 if (__put_user(d_off, &lastdirent->d_off))
48893 error = -EFAULT;
48894 else
48895 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48896 index 60c0804..d814f98 100644
48897 --- a/fs/reiserfs/do_balan.c
48898 +++ b/fs/reiserfs/do_balan.c
48899 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48900 return;
48901 }
48902
48903 - atomic_inc(&(fs_generation(tb->tb_sb)));
48904 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48905 do_balance_starts(tb);
48906
48907 /* balance leaf returns 0 except if combining L R and S into
48908 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48909 index 7a99811..a7c96c4 100644
48910 --- a/fs/reiserfs/procfs.c
48911 +++ b/fs/reiserfs/procfs.c
48912 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48913 "SMALL_TAILS " : "NO_TAILS ",
48914 replay_only(sb) ? "REPLAY_ONLY " : "",
48915 convert_reiserfs(sb) ? "CONV " : "",
48916 - atomic_read(&r->s_generation_counter),
48917 + atomic_read_unchecked(&r->s_generation_counter),
48918 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48919 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48920 SF(s_good_search_by_key_reada), SF(s_bmaps),
48921 diff --git a/fs/select.c b/fs/select.c
48922 index e782258..3b4b44c 100644
48923 --- a/fs/select.c
48924 +++ b/fs/select.c
48925 @@ -20,6 +20,7 @@
48926 #include <linux/module.h>
48927 #include <linux/slab.h>
48928 #include <linux/poll.h>
48929 +#include <linux/security.h>
48930 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48931 #include <linux/file.h>
48932 #include <linux/fdtable.h>
48933 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48934 struct poll_list *walk = head;
48935 unsigned long todo = nfds;
48936
48937 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48938 if (nfds > rlimit(RLIMIT_NOFILE))
48939 return -EINVAL;
48940
48941 diff --git a/fs/seq_file.c b/fs/seq_file.c
48942 index 4023d6b..ab46c6a 100644
48943 --- a/fs/seq_file.c
48944 +++ b/fs/seq_file.c
48945 @@ -9,6 +9,7 @@
48946 #include <linux/module.h>
48947 #include <linux/seq_file.h>
48948 #include <linux/slab.h>
48949 +#include <linux/sched.h>
48950
48951 #include <asm/uaccess.h>
48952 #include <asm/page.h>
48953 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48954 memset(p, 0, sizeof(*p));
48955 mutex_init(&p->lock);
48956 p->op = op;
48957 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48958 + p->exec_id = current->exec_id;
48959 +#endif
48960
48961 /*
48962 * Wrappers around seq_open(e.g. swaps_open) need to be
48963 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
48964 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48965 void *data)
48966 {
48967 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48968 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48969 int res = -ENOMEM;
48970
48971 if (op) {
48972 diff --git a/fs/splice.c b/fs/splice.c
48973 index 96d7b28..fd465ac 100644
48974 --- a/fs/splice.c
48975 +++ b/fs/splice.c
48976 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48977 pipe_lock(pipe);
48978
48979 for (;;) {
48980 - if (!pipe->readers) {
48981 + if (!atomic_read(&pipe->readers)) {
48982 send_sig(SIGPIPE, current, 0);
48983 if (!ret)
48984 ret = -EPIPE;
48985 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48986 do_wakeup = 0;
48987 }
48988
48989 - pipe->waiting_writers++;
48990 + atomic_inc(&pipe->waiting_writers);
48991 pipe_wait(pipe);
48992 - pipe->waiting_writers--;
48993 + atomic_dec(&pipe->waiting_writers);
48994 }
48995
48996 pipe_unlock(pipe);
48997 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48998 old_fs = get_fs();
48999 set_fs(get_ds());
49000 /* The cast to a user pointer is valid due to the set_fs() */
49001 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49002 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49003 set_fs(old_fs);
49004
49005 return res;
49006 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49007 old_fs = get_fs();
49008 set_fs(get_ds());
49009 /* The cast to a user pointer is valid due to the set_fs() */
49010 - res = vfs_write(file, (const char __user *)buf, count, &pos);
49011 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49012 set_fs(old_fs);
49013
49014 return res;
49015 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49016 goto err;
49017
49018 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49019 - vec[i].iov_base = (void __user *) page_address(page);
49020 + vec[i].iov_base = (void __force_user *) page_address(page);
49021 vec[i].iov_len = this_len;
49022 spd.pages[i] = page;
49023 spd.nr_pages++;
49024 @@ -848,10 +848,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49025 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49026 {
49027 while (!pipe->nrbufs) {
49028 - if (!pipe->writers)
49029 + if (!atomic_read(&pipe->writers))
49030 return 0;
49031
49032 - if (!pipe->waiting_writers && sd->num_spliced)
49033 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49034 return 0;
49035
49036 if (sd->flags & SPLICE_F_NONBLOCK)
49037 @@ -1184,7 +1184,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49038 * out of the pipe right after the splice_to_pipe(). So set
49039 * PIPE_READERS appropriately.
49040 */
49041 - pipe->readers = 1;
49042 + atomic_set(&pipe->readers, 1);
49043
49044 current->splice_pipe = pipe;
49045 }
49046 @@ -1736,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49047 ret = -ERESTARTSYS;
49048 break;
49049 }
49050 - if (!pipe->writers)
49051 + if (!atomic_read(&pipe->writers))
49052 break;
49053 - if (!pipe->waiting_writers) {
49054 + if (!atomic_read(&pipe->waiting_writers)) {
49055 if (flags & SPLICE_F_NONBLOCK) {
49056 ret = -EAGAIN;
49057 break;
49058 @@ -1770,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49059 pipe_lock(pipe);
49060
49061 while (pipe->nrbufs >= pipe->buffers) {
49062 - if (!pipe->readers) {
49063 + if (!atomic_read(&pipe->readers)) {
49064 send_sig(SIGPIPE, current, 0);
49065 ret = -EPIPE;
49066 break;
49067 @@ -1783,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49068 ret = -ERESTARTSYS;
49069 break;
49070 }
49071 - pipe->waiting_writers++;
49072 + atomic_inc(&pipe->waiting_writers);
49073 pipe_wait(pipe);
49074 - pipe->waiting_writers--;
49075 + atomic_dec(&pipe->waiting_writers);
49076 }
49077
49078 pipe_unlock(pipe);
49079 @@ -1821,14 +1821,14 @@ retry:
49080 pipe_double_lock(ipipe, opipe);
49081
49082 do {
49083 - if (!opipe->readers) {
49084 + if (!atomic_read(&opipe->readers)) {
49085 send_sig(SIGPIPE, current, 0);
49086 if (!ret)
49087 ret = -EPIPE;
49088 break;
49089 }
49090
49091 - if (!ipipe->nrbufs && !ipipe->writers)
49092 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49093 break;
49094
49095 /*
49096 @@ -1925,7 +1925,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49097 pipe_double_lock(ipipe, opipe);
49098
49099 do {
49100 - if (!opipe->readers) {
49101 + if (!atomic_read(&opipe->readers)) {
49102 send_sig(SIGPIPE, current, 0);
49103 if (!ret)
49104 ret = -EPIPE;
49105 @@ -1970,7 +1970,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49106 * return EAGAIN if we have the potential of some data in the
49107 * future, otherwise just return 0
49108 */
49109 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49110 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49111 ret = -EAGAIN;
49112
49113 pipe_unlock(ipipe);
49114 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
49115 index 7fdf6a7..e6cd8ad 100644
49116 --- a/fs/sysfs/dir.c
49117 +++ b/fs/sysfs/dir.c
49118 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
49119 struct sysfs_dirent *sd;
49120 int rc;
49121
49122 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49123 + const char *parent_name = parent_sd->s_name;
49124 +
49125 + mode = S_IFDIR | S_IRWXU;
49126 +
49127 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
49128 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
49129 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
49130 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
49131 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
49132 +#endif
49133 +
49134 /* allocate */
49135 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
49136 if (!sd)
49137 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49138 index 00012e3..8392349 100644
49139 --- a/fs/sysfs/file.c
49140 +++ b/fs/sysfs/file.c
49141 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49142
49143 struct sysfs_open_dirent {
49144 atomic_t refcnt;
49145 - atomic_t event;
49146 + atomic_unchecked_t event;
49147 wait_queue_head_t poll;
49148 struct list_head buffers; /* goes through sysfs_buffer.list */
49149 };
49150 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49151 if (!sysfs_get_active(attr_sd))
49152 return -ENODEV;
49153
49154 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49155 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49156 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49157
49158 sysfs_put_active(attr_sd);
49159 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49160 return -ENOMEM;
49161
49162 atomic_set(&new_od->refcnt, 0);
49163 - atomic_set(&new_od->event, 1);
49164 + atomic_set_unchecked(&new_od->event, 1);
49165 init_waitqueue_head(&new_od->poll);
49166 INIT_LIST_HEAD(&new_od->buffers);
49167 goto retry;
49168 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49169
49170 sysfs_put_active(attr_sd);
49171
49172 - if (buffer->event != atomic_read(&od->event))
49173 + if (buffer->event != atomic_read_unchecked(&od->event))
49174 goto trigger;
49175
49176 return DEFAULT_POLLMASK;
49177 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49178
49179 od = sd->s_attr.open;
49180 if (od) {
49181 - atomic_inc(&od->event);
49182 + atomic_inc_unchecked(&od->event);
49183 wake_up_interruptible(&od->poll);
49184 }
49185
49186 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49187 index a7ac78f..02158e1 100644
49188 --- a/fs/sysfs/symlink.c
49189 +++ b/fs/sysfs/symlink.c
49190 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49191
49192 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49193 {
49194 - char *page = nd_get_link(nd);
49195 + const char *page = nd_get_link(nd);
49196 if (!IS_ERR(page))
49197 free_page((unsigned long)page);
49198 }
49199 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49200 index c175b4d..8f36a16 100644
49201 --- a/fs/udf/misc.c
49202 +++ b/fs/udf/misc.c
49203 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49204
49205 u8 udf_tag_checksum(const struct tag *t)
49206 {
49207 - u8 *data = (u8 *)t;
49208 + const u8 *data = (const u8 *)t;
49209 u8 checksum = 0;
49210 int i;
49211 for (i = 0; i < sizeof(struct tag); ++i)
49212 diff --git a/fs/utimes.c b/fs/utimes.c
49213 index ba653f3..06ea4b1 100644
49214 --- a/fs/utimes.c
49215 +++ b/fs/utimes.c
49216 @@ -1,6 +1,7 @@
49217 #include <linux/compiler.h>
49218 #include <linux/file.h>
49219 #include <linux/fs.h>
49220 +#include <linux/security.h>
49221 #include <linux/linkage.h>
49222 #include <linux/mount.h>
49223 #include <linux/namei.h>
49224 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49225 goto mnt_drop_write_and_out;
49226 }
49227 }
49228 +
49229 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49230 + error = -EACCES;
49231 + goto mnt_drop_write_and_out;
49232 + }
49233 +
49234 mutex_lock(&inode->i_mutex);
49235 error = notify_change(path->dentry, &newattrs);
49236 mutex_unlock(&inode->i_mutex);
49237 diff --git a/fs/xattr.c b/fs/xattr.c
49238 index 82f4337..236473c 100644
49239 --- a/fs/xattr.c
49240 +++ b/fs/xattr.c
49241 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49242 * Extended attribute SET operations
49243 */
49244 static long
49245 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
49246 +setxattr(struct path *path, const char __user *name, const void __user *value,
49247 size_t size, int flags)
49248 {
49249 int error;
49250 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49251 return PTR_ERR(kvalue);
49252 }
49253
49254 - error = vfs_setxattr(d, kname, kvalue, size, flags);
49255 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49256 + error = -EACCES;
49257 + goto out;
49258 + }
49259 +
49260 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49261 +out:
49262 kfree(kvalue);
49263 return error;
49264 }
49265 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49266 return error;
49267 error = mnt_want_write(path.mnt);
49268 if (!error) {
49269 - error = setxattr(path.dentry, name, value, size, flags);
49270 + error = setxattr(&path, name, value, size, flags);
49271 mnt_drop_write(path.mnt);
49272 }
49273 path_put(&path);
49274 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49275 return error;
49276 error = mnt_want_write(path.mnt);
49277 if (!error) {
49278 - error = setxattr(path.dentry, name, value, size, flags);
49279 + error = setxattr(&path, name, value, size, flags);
49280 mnt_drop_write(path.mnt);
49281 }
49282 path_put(&path);
49283 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49284 const void __user *,value, size_t, size, int, flags)
49285 {
49286 struct file *f;
49287 - struct dentry *dentry;
49288 int error = -EBADF;
49289
49290 f = fget(fd);
49291 if (!f)
49292 return error;
49293 - dentry = f->f_path.dentry;
49294 - audit_inode(NULL, dentry);
49295 + audit_inode(NULL, f->f_path.dentry);
49296 error = mnt_want_write_file(f);
49297 if (!error) {
49298 - error = setxattr(dentry, name, value, size, flags);
49299 + error = setxattr(&f->f_path, name, value, size, flags);
49300 mnt_drop_write_file(f);
49301 }
49302 fput(f);
49303 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49304 index 8d5a506..7f62712 100644
49305 --- a/fs/xattr_acl.c
49306 +++ b/fs/xattr_acl.c
49307 @@ -17,8 +17,8 @@
49308 struct posix_acl *
49309 posix_acl_from_xattr(const void *value, size_t size)
49310 {
49311 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49312 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49313 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49314 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49315 int count;
49316 struct posix_acl *acl;
49317 struct posix_acl_entry *acl_e;
49318 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49319 index 188ef2f..adcf864 100644
49320 --- a/fs/xfs/xfs_bmap.c
49321 +++ b/fs/xfs/xfs_bmap.c
49322 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
49323 int nmap,
49324 int ret_nmap);
49325 #else
49326 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49327 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49328 #endif /* DEBUG */
49329
49330 STATIC int
49331 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49332 index 79d05e8..e3e5861 100644
49333 --- a/fs/xfs/xfs_dir2_sf.c
49334 +++ b/fs/xfs/xfs_dir2_sf.c
49335 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49336 }
49337
49338 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49339 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49340 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49341 + char name[sfep->namelen];
49342 + memcpy(name, sfep->name, sfep->namelen);
49343 + if (filldir(dirent, name, sfep->namelen,
49344 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
49345 + *offset = off & 0x7fffffff;
49346 + return 0;
49347 + }
49348 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49349 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49350 *offset = off & 0x7fffffff;
49351 return 0;
49352 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49353 index 76f3ca5..f57f712 100644
49354 --- a/fs/xfs/xfs_ioctl.c
49355 +++ b/fs/xfs/xfs_ioctl.c
49356 @@ -128,7 +128,7 @@ xfs_find_handle(
49357 }
49358
49359 error = -EFAULT;
49360 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49361 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49362 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49363 goto out_put;
49364
49365 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49366 index ab30253..4d86958 100644
49367 --- a/fs/xfs/xfs_iops.c
49368 +++ b/fs/xfs/xfs_iops.c
49369 @@ -447,7 +447,7 @@ xfs_vn_put_link(
49370 struct nameidata *nd,
49371 void *p)
49372 {
49373 - char *s = nd_get_link(nd);
49374 + const char *s = nd_get_link(nd);
49375
49376 if (!IS_ERR(s))
49377 kfree(s);
49378 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49379 new file mode 100644
49380 index 0000000..2645296
49381 --- /dev/null
49382 +++ b/grsecurity/Kconfig
49383 @@ -0,0 +1,1079 @@
49384 +#
49385 +# grecurity configuration
49386 +#
49387 +
49388 +menu "Grsecurity"
49389 +
49390 +config GRKERNSEC
49391 + bool "Grsecurity"
49392 + select CRYPTO
49393 + select CRYPTO_SHA256
49394 + help
49395 + If you say Y here, you will be able to configure many features
49396 + that will enhance the security of your system. It is highly
49397 + recommended that you say Y here and read through the help
49398 + for each option so that you fully understand the features and
49399 + can evaluate their usefulness for your machine.
49400 +
49401 +choice
49402 + prompt "Security Level"
49403 + depends on GRKERNSEC
49404 + default GRKERNSEC_CUSTOM
49405 +
49406 +config GRKERNSEC_LOW
49407 + bool "Low"
49408 + select GRKERNSEC_LINK
49409 + select GRKERNSEC_FIFO
49410 + select GRKERNSEC_RANDNET
49411 + select GRKERNSEC_DMESG
49412 + select GRKERNSEC_CHROOT
49413 + select GRKERNSEC_CHROOT_CHDIR
49414 +
49415 + help
49416 + If you choose this option, several of the grsecurity options will
49417 + be enabled that will give you greater protection against a number
49418 + of attacks, while assuring that none of your software will have any
49419 + conflicts with the additional security measures. If you run a lot
49420 + of unusual software, or you are having problems with the higher
49421 + security levels, you should say Y here. With this option, the
49422 + following features are enabled:
49423 +
49424 + - Linking restrictions
49425 + - FIFO restrictions
49426 + - Restricted dmesg
49427 + - Enforced chdir("/") on chroot
49428 + - Runtime module disabling
49429 +
49430 +config GRKERNSEC_MEDIUM
49431 + bool "Medium"
49432 + select PAX
49433 + select PAX_EI_PAX
49434 + select PAX_PT_PAX_FLAGS
49435 + select PAX_HAVE_ACL_FLAGS
49436 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49437 + select GRKERNSEC_CHROOT
49438 + select GRKERNSEC_CHROOT_SYSCTL
49439 + select GRKERNSEC_LINK
49440 + select GRKERNSEC_FIFO
49441 + select GRKERNSEC_DMESG
49442 + select GRKERNSEC_RANDNET
49443 + select GRKERNSEC_FORKFAIL
49444 + select GRKERNSEC_TIME
49445 + select GRKERNSEC_SIGNAL
49446 + select GRKERNSEC_CHROOT
49447 + select GRKERNSEC_CHROOT_UNIX
49448 + select GRKERNSEC_CHROOT_MOUNT
49449 + select GRKERNSEC_CHROOT_PIVOT
49450 + select GRKERNSEC_CHROOT_DOUBLE
49451 + select GRKERNSEC_CHROOT_CHDIR
49452 + select GRKERNSEC_CHROOT_MKNOD
49453 + select GRKERNSEC_PROC
49454 + select GRKERNSEC_PROC_USERGROUP
49455 + select PAX_RANDUSTACK
49456 + select PAX_ASLR
49457 + select PAX_RANDMMAP
49458 + select PAX_REFCOUNT if (X86 || SPARC64)
49459 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49460 +
49461 + help
49462 + If you say Y here, several features in addition to those included
49463 + in the low additional security level will be enabled. These
49464 + features provide even more security to your system, though in rare
49465 + cases they may be incompatible with very old or poorly written
49466 + software. If you enable this option, make sure that your auth
49467 + service (identd) is running as gid 1001. With this option,
49468 + the following features (in addition to those provided in the
49469 + low additional security level) will be enabled:
49470 +
49471 + - Failed fork logging
49472 + - Time change logging
49473 + - Signal logging
49474 + - Deny mounts in chroot
49475 + - Deny double chrooting
49476 + - Deny sysctl writes in chroot
49477 + - Deny mknod in chroot
49478 + - Deny access to abstract AF_UNIX sockets out of chroot
49479 + - Deny pivot_root in chroot
49480 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49481 + - /proc restrictions with special GID set to 10 (usually wheel)
49482 + - Address Space Layout Randomization (ASLR)
49483 + - Prevent exploitation of most refcount overflows
49484 + - Bounds checking of copying between the kernel and userland
49485 +
49486 +config GRKERNSEC_HIGH
49487 + bool "High"
49488 + select GRKERNSEC_LINK
49489 + select GRKERNSEC_FIFO
49490 + select GRKERNSEC_DMESG
49491 + select GRKERNSEC_FORKFAIL
49492 + select GRKERNSEC_TIME
49493 + select GRKERNSEC_SIGNAL
49494 + select GRKERNSEC_CHROOT
49495 + select GRKERNSEC_CHROOT_SHMAT
49496 + select GRKERNSEC_CHROOT_UNIX
49497 + select GRKERNSEC_CHROOT_MOUNT
49498 + select GRKERNSEC_CHROOT_FCHDIR
49499 + select GRKERNSEC_CHROOT_PIVOT
49500 + select GRKERNSEC_CHROOT_DOUBLE
49501 + select GRKERNSEC_CHROOT_CHDIR
49502 + select GRKERNSEC_CHROOT_MKNOD
49503 + select GRKERNSEC_CHROOT_CAPS
49504 + select GRKERNSEC_CHROOT_SYSCTL
49505 + select GRKERNSEC_CHROOT_FINDTASK
49506 + select GRKERNSEC_SYSFS_RESTRICT
49507 + select GRKERNSEC_PROC
49508 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49509 + select GRKERNSEC_HIDESYM
49510 + select GRKERNSEC_BRUTE
49511 + select GRKERNSEC_PROC_USERGROUP
49512 + select GRKERNSEC_KMEM
49513 + select GRKERNSEC_RESLOG
49514 + select GRKERNSEC_RANDNET
49515 + select GRKERNSEC_PROC_ADD
49516 + select GRKERNSEC_CHROOT_CHMOD
49517 + select GRKERNSEC_CHROOT_NICE
49518 + select GRKERNSEC_SETXID if (X86 || SPARC64 || PPC || ARM || MIPS)
49519 + select GRKERNSEC_AUDIT_MOUNT
49520 + select GRKERNSEC_MODHARDEN if (MODULES)
49521 + select GRKERNSEC_HARDEN_PTRACE
49522 + select GRKERNSEC_PTRACE_READEXEC
49523 + select GRKERNSEC_VM86 if (X86_32)
49524 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49525 + select PAX
49526 + select PAX_RANDUSTACK
49527 + select PAX_ASLR
49528 + select PAX_RANDMMAP
49529 + select PAX_NOEXEC
49530 + select PAX_MPROTECT
49531 + select PAX_EI_PAX
49532 + select PAX_PT_PAX_FLAGS
49533 + select PAX_HAVE_ACL_FLAGS
49534 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49535 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49536 + select PAX_RANDKSTACK if (X86_TSC && X86)
49537 + select PAX_SEGMEXEC if (X86_32)
49538 + select PAX_PAGEEXEC
49539 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49540 + select PAX_EMUTRAMP if (PARISC)
49541 + select PAX_EMUSIGRT if (PARISC)
49542 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49543 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49544 + select PAX_REFCOUNT if (X86 || SPARC64)
49545 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49546 + help
49547 + If you say Y here, many of the features of grsecurity will be
49548 + enabled, which will protect you against many kinds of attacks
49549 + against your system. The heightened security comes at a cost
49550 + of an increased chance of incompatibilities with rare software
49551 + on your machine. Since this security level enables PaX, you should
49552 + view <http://pax.grsecurity.net> and read about the PaX
49553 + project. While you are there, download chpax and run it on
49554 + binaries that cause problems with PaX. Also remember that
49555 + since the /proc restrictions are enabled, you must run your
49556 + identd as gid 1001. This security level enables the following
49557 + features in addition to those listed in the low and medium
49558 + security levels:
49559 +
49560 + - Additional /proc restrictions
49561 + - Chmod restrictions in chroot
49562 + - No signals, ptrace, or viewing of processes outside of chroot
49563 + - Capability restrictions in chroot
49564 + - Deny fchdir out of chroot
49565 + - Priority restrictions in chroot
49566 + - Segmentation-based implementation of PaX
49567 + - Mprotect restrictions
49568 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49569 + - Kernel stack randomization
49570 + - Mount/unmount/remount logging
49571 + - Kernel symbol hiding
49572 + - Hardening of module auto-loading
49573 + - Ptrace restrictions
49574 + - Restricted vm86 mode
49575 + - Restricted sysfs/debugfs
49576 + - Active kernel exploit response
49577 +
49578 +config GRKERNSEC_CUSTOM
49579 + bool "Custom"
49580 + help
49581 + If you say Y here, you will be able to configure every grsecurity
49582 + option, which allows you to enable many more features that aren't
49583 + covered in the basic security levels. These additional features
49584 + include TPE, socket restrictions, and the sysctl system for
49585 + grsecurity. It is advised that you read through the help for
49586 + each option to determine its usefulness in your situation.
49587 +
49588 +endchoice
49589 +
49590 +menu "Memory Protections"
49591 +depends on GRKERNSEC
49592 +
49593 +config GRKERNSEC_KMEM
49594 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49595 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49596 + help
49597 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49598 + be written to or read from to modify or leak the contents of the running
49599 + kernel. /dev/port will also not be allowed to be opened. If you have module
49600 + support disabled, enabling this will close up four ways that are
49601 + currently used to insert malicious code into the running kernel.
49602 + Even with all these features enabled, we still highly recommend that
49603 + you use the RBAC system, as it is still possible for an attacker to
49604 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49605 + If you are not using XFree86, you may be able to stop this additional
49606 + case by enabling the 'Disable privileged I/O' option. Though nothing
49607 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49608 + but only to video memory, which is the only writing we allow in this
49609 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49610 + not be allowed to mprotect it with PROT_WRITE later.
49611 + It is highly recommended that you say Y here if you meet all the
49612 + conditions above.
49613 +
49614 +config GRKERNSEC_VM86
49615 + bool "Restrict VM86 mode"
49616 + depends on X86_32
49617 +
49618 + help
49619 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49620 + make use of a special execution mode on 32bit x86 processors called
49621 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49622 + video cards and will still work with this option enabled. The purpose
49623 + of the option is to prevent exploitation of emulation errors in
49624 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49625 + Nearly all users should be able to enable this option.
49626 +
49627 +config GRKERNSEC_IO
49628 + bool "Disable privileged I/O"
49629 + depends on X86
49630 + select RTC_CLASS
49631 + select RTC_INTF_DEV
49632 + select RTC_DRV_CMOS
49633 +
49634 + help
49635 + If you say Y here, all ioperm and iopl calls will return an error.
49636 + Ioperm and iopl can be used to modify the running kernel.
49637 + Unfortunately, some programs need this access to operate properly,
49638 + the most notable of which are XFree86 and hwclock. hwclock can be
49639 + remedied by having RTC support in the kernel, so real-time
49640 + clock support is enabled if this option is enabled, to ensure
49641 + that hwclock operates correctly. XFree86 still will not
49642 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49643 + IF YOU USE XFree86. If you use XFree86 and you still want to
49644 + protect your kernel against modification, use the RBAC system.
49645 +
49646 +config GRKERNSEC_PROC_MEMMAP
49647 + bool "Harden ASLR against information leaks and entropy reduction"
49648 + default y if (PAX_NOEXEC || PAX_ASLR)
49649 + depends on PAX_NOEXEC || PAX_ASLR
49650 + help
49651 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49652 + give no information about the addresses of its mappings if
49653 + PaX features that rely on random addresses are enabled on the task.
49654 + In addition to sanitizing this information and disabling other
49655 + dangerous sources of information, this option causes reads of sensitive
49656 + /proc/<pid> entries where the file descriptor was opened in a different
49657 + task than the one performing the read. Such attempts are logged.
49658 + This option also limits argv/env strings for suid/sgid binaries
49659 + to 512KB to prevent a complete exhaustion of the stack entropy provided
49660 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49661 + binaries to prevent alternative mmap layouts from being abused.
49662 +
49663 + If you use PaX it is essential that you say Y here as it closes up
49664 + several holes that make full ASLR useless locally.
49665 +
49666 +config GRKERNSEC_BRUTE
49667 + bool "Deter exploit bruteforcing"
49668 + help
49669 + If you say Y here, attempts to bruteforce exploits against forking
49670 + daemons such as apache or sshd, as well as against suid/sgid binaries
49671 + will be deterred. When a child of a forking daemon is killed by PaX
49672 + or crashes due to an illegal instruction or other suspicious signal,
49673 + the parent process will be delayed 30 seconds upon every subsequent
49674 + fork until the administrator is able to assess the situation and
49675 + restart the daemon.
49676 + In the suid/sgid case, the attempt is logged, the user has all their
49677 + processes terminated, and they are prevented from executing any further
49678 + processes for 15 minutes.
49679 + It is recommended that you also enable signal logging in the auditing
49680 + section so that logs are generated when a process triggers a suspicious
49681 + signal.
49682 + If the sysctl option is enabled, a sysctl option with name
49683 + "deter_bruteforce" is created.
49684 +
49685 +
49686 +config GRKERNSEC_MODHARDEN
49687 + bool "Harden module auto-loading"
49688 + depends on MODULES
49689 + help
49690 + If you say Y here, module auto-loading in response to use of some
49691 + feature implemented by an unloaded module will be restricted to
49692 + root users. Enabling this option helps defend against attacks
49693 + by unprivileged users who abuse the auto-loading behavior to
49694 + cause a vulnerable module to load that is then exploited.
49695 +
49696 + If this option prevents a legitimate use of auto-loading for a
49697 + non-root user, the administrator can execute modprobe manually
49698 + with the exact name of the module mentioned in the alert log.
49699 + Alternatively, the administrator can add the module to the list
49700 + of modules loaded at boot by modifying init scripts.
49701 +
49702 + Modification of init scripts will most likely be needed on
49703 + Ubuntu servers with encrypted home directory support enabled,
49704 + as the first non-root user logging in will cause the ecb(aes),
49705 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49706 +
49707 +config GRKERNSEC_HIDESYM
49708 + bool "Hide kernel symbols"
49709 + help
49710 + If you say Y here, getting information on loaded modules, and
49711 + displaying all kernel symbols through a syscall will be restricted
49712 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49713 + /proc/kallsyms will be restricted to the root user. The RBAC
49714 + system can hide that entry even from root.
49715 +
49716 + This option also prevents leaking of kernel addresses through
49717 + several /proc entries.
49718 +
49719 + Note that this option is only effective provided the following
49720 + conditions are met:
49721 + 1) The kernel using grsecurity is not precompiled by some distribution
49722 + 2) You have also enabled GRKERNSEC_DMESG
49723 + 3) You are using the RBAC system and hiding other files such as your
49724 + kernel image and System.map. Alternatively, enabling this option
49725 + causes the permissions on /boot, /lib/modules, and the kernel
49726 + source directory to change at compile time to prevent
49727 + reading by non-root users.
49728 + If the above conditions are met, this option will aid in providing a
49729 + useful protection against local kernel exploitation of overflows
49730 + and arbitrary read/write vulnerabilities.
49731 +
49732 +config GRKERNSEC_KERN_LOCKOUT
49733 + bool "Active kernel exploit response"
49734 + depends on X86 || ARM || PPC || SPARC
49735 + help
49736 + If you say Y here, when a PaX alert is triggered due to suspicious
49737 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49738 + or an OOPs occurs due to bad memory accesses, instead of just
49739 + terminating the offending process (and potentially allowing
49740 + a subsequent exploit from the same user), we will take one of two
49741 + actions:
49742 + If the user was root, we will panic the system
49743 + If the user was non-root, we will log the attempt, terminate
49744 + all processes owned by the user, then prevent them from creating
49745 + any new processes until the system is restarted
49746 + This deters repeated kernel exploitation/bruteforcing attempts
49747 + and is useful for later forensics.
49748 +
49749 +endmenu
49750 +menu "Role Based Access Control Options"
49751 +depends on GRKERNSEC
49752 +
49753 +config GRKERNSEC_RBAC_DEBUG
49754 + bool
49755 +
49756 +config GRKERNSEC_NO_RBAC
49757 + bool "Disable RBAC system"
49758 + help
49759 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49760 + preventing the RBAC system from being enabled. You should only say Y
49761 + here if you have no intention of using the RBAC system, so as to prevent
49762 + an attacker with root access from misusing the RBAC system to hide files
49763 + and processes when loadable module support and /dev/[k]mem have been
49764 + locked down.
49765 +
49766 +config GRKERNSEC_ACL_HIDEKERN
49767 + bool "Hide kernel processes"
49768 + help
49769 + If you say Y here, all kernel threads will be hidden to all
49770 + processes but those whose subject has the "view hidden processes"
49771 + flag.
49772 +
49773 +config GRKERNSEC_ACL_MAXTRIES
49774 + int "Maximum tries before password lockout"
49775 + default 3
49776 + help
49777 + This option enforces the maximum number of times a user can attempt
49778 + to authorize themselves with the grsecurity RBAC system before being
49779 + denied the ability to attempt authorization again for a specified time.
49780 + The lower the number, the harder it will be to brute-force a password.
49781 +
49782 +config GRKERNSEC_ACL_TIMEOUT
49783 + int "Time to wait after max password tries, in seconds"
49784 + default 30
49785 + help
49786 + This option specifies the time the user must wait after attempting to
49787 + authorize to the RBAC system with the maximum number of invalid
49788 + passwords. The higher the number, the harder it will be to brute-force
49789 + a password.
49790 +
49791 +endmenu
49792 +menu "Filesystem Protections"
49793 +depends on GRKERNSEC
49794 +
49795 +config GRKERNSEC_PROC
49796 + bool "Proc restrictions"
49797 + help
49798 + If you say Y here, the permissions of the /proc filesystem
49799 + will be altered to enhance system security and privacy. You MUST
49800 + choose either a user only restriction or a user and group restriction.
49801 + Depending upon the option you choose, you can either restrict users to
49802 + see only the processes they themselves run, or choose a group that can
49803 + view all processes and files normally restricted to root if you choose
49804 + the "restrict to user only" option. NOTE: If you're running identd or
49805 + ntpd as a non-root user, you will have to run it as the group you
49806 + specify here.
49807 +
49808 +config GRKERNSEC_PROC_USER
49809 + bool "Restrict /proc to user only"
49810 + depends on GRKERNSEC_PROC
49811 + help
49812 + If you say Y here, non-root users will only be able to view their own
49813 + processes, and restricts them from viewing network-related information,
49814 + and viewing kernel symbol and module information.
49815 +
49816 +config GRKERNSEC_PROC_USERGROUP
49817 + bool "Allow special group"
49818 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49819 + help
49820 + If you say Y here, you will be able to select a group that will be
49821 + able to view all processes and network-related information. If you've
49822 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49823 + remain hidden. This option is useful if you want to run identd as
49824 + a non-root user.
49825 +
49826 +config GRKERNSEC_PROC_GID
49827 + int "GID for special group"
49828 + depends on GRKERNSEC_PROC_USERGROUP
49829 + default 1001
49830 +
49831 +config GRKERNSEC_PROC_ADD
49832 + bool "Additional restrictions"
49833 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49834 + help
49835 + If you say Y here, additional restrictions will be placed on
49836 + /proc that keep normal users from viewing device information and
49837 + slabinfo information that could be useful for exploits.
49838 +
49839 +config GRKERNSEC_LINK
49840 + bool "Linking restrictions"
49841 + help
49842 + If you say Y here, /tmp race exploits will be prevented, since users
49843 + will no longer be able to follow symlinks owned by other users in
49844 + world-writable +t directories (e.g. /tmp), unless the owner of the
49845 + symlink is the owner of the directory. users will also not be
49846 + able to hardlink to files they do not own. If the sysctl option is
49847 + enabled, a sysctl option with name "linking_restrictions" is created.
49848 +
49849 +config GRKERNSEC_FIFO
49850 + bool "FIFO restrictions"
49851 + help
49852 + If you say Y here, users will not be able to write to FIFOs they don't
49853 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49854 + the FIFO is the same owner of the directory it's held in. If the sysctl
49855 + option is enabled, a sysctl option with name "fifo_restrictions" is
49856 + created.
49857 +
49858 +config GRKERNSEC_SYSFS_RESTRICT
49859 + bool "Sysfs/debugfs restriction"
49860 + depends on SYSFS
49861 + help
49862 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49863 + any filesystem normally mounted under it (e.g. debugfs) will be
49864 + mostly accessible only by root. These filesystems generally provide access
49865 + to hardware and debug information that isn't appropriate for unprivileged
49866 + users of the system. Sysfs and debugfs have also become a large source
49867 + of new vulnerabilities, ranging from infoleaks to local compromise.
49868 + There has been very little oversight with an eye toward security involved
49869 + in adding new exporters of information to these filesystems, so their
49870 + use is discouraged.
49871 + For reasons of compatibility, a few directories have been whitelisted
49872 + for access by non-root users:
49873 + /sys/fs/selinux
49874 + /sys/fs/fuse
49875 + /sys/devices/system/cpu
49876 +
49877 +config GRKERNSEC_ROFS
49878 + bool "Runtime read-only mount protection"
49879 + help
49880 + If you say Y here, a sysctl option with name "romount_protect" will
49881 + be created. By setting this option to 1 at runtime, filesystems
49882 + will be protected in the following ways:
49883 + * No new writable mounts will be allowed
49884 + * Existing read-only mounts won't be able to be remounted read/write
49885 + * Write operations will be denied on all block devices
49886 + This option acts independently of grsec_lock: once it is set to 1,
49887 + it cannot be turned off. Therefore, please be mindful of the resulting
49888 + behavior if this option is enabled in an init script on a read-only
49889 + filesystem. This feature is mainly intended for secure embedded systems.
49890 +
49891 +config GRKERNSEC_CHROOT
49892 + bool "Chroot jail restrictions"
49893 + help
49894 + If you say Y here, you will be able to choose several options that will
49895 + make breaking out of a chrooted jail much more difficult. If you
49896 + encounter no software incompatibilities with the following options, it
49897 + is recommended that you enable each one.
49898 +
49899 +config GRKERNSEC_CHROOT_MOUNT
49900 + bool "Deny mounts"
49901 + depends on GRKERNSEC_CHROOT
49902 + help
49903 + If you say Y here, processes inside a chroot will not be able to
49904 + mount or remount filesystems. If the sysctl option is enabled, a
49905 + sysctl option with name "chroot_deny_mount" is created.
49906 +
49907 +config GRKERNSEC_CHROOT_DOUBLE
49908 + bool "Deny double-chroots"
49909 + depends on GRKERNSEC_CHROOT
49910 + help
49911 + If you say Y here, processes inside a chroot will not be able to chroot
49912 + again outside the chroot. This is a widely used method of breaking
49913 + out of a chroot jail and should not be allowed. If the sysctl
49914 + option is enabled, a sysctl option with name
49915 + "chroot_deny_chroot" is created.
49916 +
49917 +config GRKERNSEC_CHROOT_PIVOT
49918 + bool "Deny pivot_root in chroot"
49919 + depends on GRKERNSEC_CHROOT
49920 + help
49921 + If you say Y here, processes inside a chroot will not be able to use
49922 + a function called pivot_root() that was introduced in Linux 2.3.41. It
49923 + works similar to chroot in that it changes the root filesystem. This
49924 + function could be misused in a chrooted process to attempt to break out
49925 + of the chroot, and therefore should not be allowed. If the sysctl
49926 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
49927 + created.
49928 +
49929 +config GRKERNSEC_CHROOT_CHDIR
49930 + bool "Enforce chdir(\"/\") on all chroots"
49931 + depends on GRKERNSEC_CHROOT
49932 + help
49933 + If you say Y here, the current working directory of all newly-chrooted
49934 + applications will be set to the the root directory of the chroot.
49935 + The man page on chroot(2) states:
49936 + Note that this call does not change the current working
49937 + directory, so that `.' can be outside the tree rooted at
49938 + `/'. In particular, the super-user can escape from a
49939 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49940 +
49941 + It is recommended that you say Y here, since it's not known to break
49942 + any software. If the sysctl option is enabled, a sysctl option with
49943 + name "chroot_enforce_chdir" is created.
49944 +
49945 +config GRKERNSEC_CHROOT_CHMOD
49946 + bool "Deny (f)chmod +s"
49947 + depends on GRKERNSEC_CHROOT
49948 + help
49949 + If you say Y here, processes inside a chroot will not be able to chmod
49950 + or fchmod files to make them have suid or sgid bits. This protects
49951 + against another published method of breaking a chroot. If the sysctl
49952 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
49953 + created.
49954 +
49955 +config GRKERNSEC_CHROOT_FCHDIR
49956 + bool "Deny fchdir out of chroot"
49957 + depends on GRKERNSEC_CHROOT
49958 + help
49959 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
49960 + to a file descriptor of the chrooting process that points to a directory
49961 + outside the filesystem will be stopped. If the sysctl option
49962 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49963 +
49964 +config GRKERNSEC_CHROOT_MKNOD
49965 + bool "Deny mknod"
49966 + depends on GRKERNSEC_CHROOT
49967 + help
49968 + If you say Y here, processes inside a chroot will not be allowed to
49969 + mknod. The problem with using mknod inside a chroot is that it
49970 + would allow an attacker to create a device entry that is the same
49971 + as one on the physical root of your system, which could range from
49972 + anything from the console device to a device for your harddrive (which
49973 + they could then use to wipe the drive or steal data). It is recommended
49974 + that you say Y here, unless you run into software incompatibilities.
49975 + If the sysctl option is enabled, a sysctl option with name
49976 + "chroot_deny_mknod" is created.
49977 +
49978 +config GRKERNSEC_CHROOT_SHMAT
49979 + bool "Deny shmat() out of chroot"
49980 + depends on GRKERNSEC_CHROOT
49981 + help
49982 + If you say Y here, processes inside a chroot will not be able to attach
49983 + to shared memory segments that were created outside of the chroot jail.
49984 + It is recommended that you say Y here. If the sysctl option is enabled,
49985 + a sysctl option with name "chroot_deny_shmat" is created.
49986 +
49987 +config GRKERNSEC_CHROOT_UNIX
49988 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
49989 + depends on GRKERNSEC_CHROOT
49990 + help
49991 + If you say Y here, processes inside a chroot will not be able to
49992 + connect to abstract (meaning not belonging to a filesystem) Unix
49993 + domain sockets that were bound outside of a chroot. It is recommended
49994 + that you say Y here. If the sysctl option is enabled, a sysctl option
49995 + with name "chroot_deny_unix" is created.
49996 +
49997 +config GRKERNSEC_CHROOT_FINDTASK
49998 + bool "Protect outside processes"
49999 + depends on GRKERNSEC_CHROOT
50000 + help
50001 + If you say Y here, processes inside a chroot will not be able to
50002 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50003 + getsid, or view any process outside of the chroot. If the sysctl
50004 + option is enabled, a sysctl option with name "chroot_findtask" is
50005 + created.
50006 +
50007 +config GRKERNSEC_CHROOT_NICE
50008 + bool "Restrict priority changes"
50009 + depends on GRKERNSEC_CHROOT
50010 + help
50011 + If you say Y here, processes inside a chroot will not be able to raise
50012 + the priority of processes in the chroot, or alter the priority of
50013 + processes outside the chroot. This provides more security than simply
50014 + removing CAP_SYS_NICE from the process' capability set. If the
50015 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50016 + is created.
50017 +
50018 +config GRKERNSEC_CHROOT_SYSCTL
50019 + bool "Deny sysctl writes"
50020 + depends on GRKERNSEC_CHROOT
50021 + help
50022 + If you say Y here, an attacker in a chroot will not be able to
50023 + write to sysctl entries, either by sysctl(2) or through a /proc
50024 + interface. It is strongly recommended that you say Y here. If the
50025 + sysctl option is enabled, a sysctl option with name
50026 + "chroot_deny_sysctl" is created.
50027 +
50028 +config GRKERNSEC_CHROOT_CAPS
50029 + bool "Capability restrictions"
50030 + depends on GRKERNSEC_CHROOT
50031 + help
50032 + If you say Y here, the capabilities on all processes within a
50033 + chroot jail will be lowered to stop module insertion, raw i/o,
50034 + system and net admin tasks, rebooting the system, modifying immutable
50035 + files, modifying IPC owned by another, and changing the system time.
50036 + This is left an option because it can break some apps. Disable this
50037 + if your chrooted apps are having problems performing those kinds of
50038 + tasks. If the sysctl option is enabled, a sysctl option with
50039 + name "chroot_caps" is created.
50040 +
50041 +endmenu
50042 +menu "Kernel Auditing"
50043 +depends on GRKERNSEC
50044 +
50045 +config GRKERNSEC_AUDIT_GROUP
50046 + bool "Single group for auditing"
50047 + help
50048 + If you say Y here, the exec, chdir, and (un)mount logging features
50049 + will only operate on a group you specify. This option is recommended
50050 + if you only want to watch certain users instead of having a large
50051 + amount of logs from the entire system. If the sysctl option is enabled,
50052 + a sysctl option with name "audit_group" is created.
50053 +
50054 +config GRKERNSEC_AUDIT_GID
50055 + int "GID for auditing"
50056 + depends on GRKERNSEC_AUDIT_GROUP
50057 + default 1007
50058 +
50059 +config GRKERNSEC_EXECLOG
50060 + bool "Exec logging"
50061 + help
50062 + If you say Y here, all execve() calls will be logged (since the
50063 + other exec*() calls are frontends to execve(), all execution
50064 + will be logged). Useful for shell-servers that like to keep track
50065 + of their users. If the sysctl option is enabled, a sysctl option with
50066 + name "exec_logging" is created.
50067 + WARNING: This option when enabled will produce a LOT of logs, especially
50068 + on an active system.
50069 +
50070 +config GRKERNSEC_RESLOG
50071 + bool "Resource logging"
50072 + help
50073 + If you say Y here, all attempts to overstep resource limits will
50074 + be logged with the resource name, the requested size, and the current
50075 + limit. It is highly recommended that you say Y here. If the sysctl
50076 + option is enabled, a sysctl option with name "resource_logging" is
50077 + created. If the RBAC system is enabled, the sysctl value is ignored.
50078 +
50079 +config GRKERNSEC_CHROOT_EXECLOG
50080 + bool "Log execs within chroot"
50081 + help
50082 + If you say Y here, all executions inside a chroot jail will be logged
50083 + to syslog. This can cause a large amount of logs if certain
50084 + applications (eg. djb's daemontools) are installed on the system, and
50085 + is therefore left as an option. If the sysctl option is enabled, a
50086 + sysctl option with name "chroot_execlog" is created.
50087 +
50088 +config GRKERNSEC_AUDIT_PTRACE
50089 + bool "Ptrace logging"
50090 + help
50091 + If you say Y here, all attempts to attach to a process via ptrace
50092 + will be logged. If the sysctl option is enabled, a sysctl option
50093 + with name "audit_ptrace" is created.
50094 +
50095 +config GRKERNSEC_AUDIT_CHDIR
50096 + bool "Chdir logging"
50097 + help
50098 + If you say Y here, all chdir() calls will be logged. If the sysctl
50099 + option is enabled, a sysctl option with name "audit_chdir" is created.
50100 +
50101 +config GRKERNSEC_AUDIT_MOUNT
50102 + bool "(Un)Mount logging"
50103 + help
50104 + If you say Y here, all mounts and unmounts will be logged. If the
50105 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50106 + created.
50107 +
50108 +config GRKERNSEC_SIGNAL
50109 + bool "Signal logging"
50110 + help
50111 + If you say Y here, certain important signals will be logged, such as
50112 + SIGSEGV, which will as a result inform you of when a error in a program
50113 + occurred, which in some cases could mean a possible exploit attempt.
50114 + If the sysctl option is enabled, a sysctl option with name
50115 + "signal_logging" is created.
50116 +
50117 +config GRKERNSEC_FORKFAIL
50118 + bool "Fork failure logging"
50119 + help
50120 + If you say Y here, all failed fork() attempts will be logged.
50121 + This could suggest a fork bomb, or someone attempting to overstep
50122 + their process limit. If the sysctl option is enabled, a sysctl option
50123 + with name "forkfail_logging" is created.
50124 +
50125 +config GRKERNSEC_TIME
50126 + bool "Time change logging"
50127 + help
50128 + If you say Y here, any changes of the system clock will be logged.
50129 + If the sysctl option is enabled, a sysctl option with name
50130 + "timechange_logging" is created.
50131 +
50132 +config GRKERNSEC_PROC_IPADDR
50133 + bool "/proc/<pid>/ipaddr support"
50134 + help
50135 + If you say Y here, a new entry will be added to each /proc/<pid>
50136 + directory that contains the IP address of the person using the task.
50137 + The IP is carried across local TCP and AF_UNIX stream sockets.
50138 + This information can be useful for IDS/IPSes to perform remote response
50139 + to a local attack. The entry is readable by only the owner of the
50140 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50141 + the RBAC system), and thus does not create privacy concerns.
50142 +
50143 +config GRKERNSEC_RWXMAP_LOG
50144 + bool 'Denied RWX mmap/mprotect logging'
50145 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50146 + help
50147 + If you say Y here, calls to mmap() and mprotect() with explicit
50148 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50149 + denied by the PAX_MPROTECT feature. If the sysctl option is
50150 + enabled, a sysctl option with name "rwxmap_logging" is created.
50151 +
50152 +config GRKERNSEC_AUDIT_TEXTREL
50153 + bool 'ELF text relocations logging (READ HELP)'
50154 + depends on PAX_MPROTECT
50155 + help
50156 + If you say Y here, text relocations will be logged with the filename
50157 + of the offending library or binary. The purpose of the feature is
50158 + to help Linux distribution developers get rid of libraries and
50159 + binaries that need text relocations which hinder the future progress
50160 + of PaX. Only Linux distribution developers should say Y here, and
50161 + never on a production machine, as this option creates an information
50162 + leak that could aid an attacker in defeating the randomization of
50163 + a single memory region. If the sysctl option is enabled, a sysctl
50164 + option with name "audit_textrel" is created.
50165 +
50166 +endmenu
50167 +
50168 +menu "Executable Protections"
50169 +depends on GRKERNSEC
50170 +
50171 +config GRKERNSEC_DMESG
50172 + bool "Dmesg(8) restriction"
50173 + help
50174 + If you say Y here, non-root users will not be able to use dmesg(8)
50175 + to view up to the last 4kb of messages in the kernel's log buffer.
50176 + The kernel's log buffer often contains kernel addresses and other
50177 + identifying information useful to an attacker in fingerprinting a
50178 + system for a targeted exploit.
50179 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50180 + created.
50181 +
50182 +config GRKERNSEC_HARDEN_PTRACE
50183 + bool "Deter ptrace-based process snooping"
50184 + help
50185 + If you say Y here, TTY sniffers and other malicious monitoring
50186 + programs implemented through ptrace will be defeated. If you
50187 + have been using the RBAC system, this option has already been
50188 + enabled for several years for all users, with the ability to make
50189 + fine-grained exceptions.
50190 +
50191 + This option only affects the ability of non-root users to ptrace
50192 + processes that are not a descendent of the ptracing process.
50193 + This means that strace ./binary and gdb ./binary will still work,
50194 + but attaching to arbitrary processes will not. If the sysctl
50195 + option is enabled, a sysctl option with name "harden_ptrace" is
50196 + created.
50197 +
50198 +config GRKERNSEC_PTRACE_READEXEC
50199 + bool "Require read access to ptrace sensitive binaries"
50200 + help
50201 + If you say Y here, unprivileged users will not be able to ptrace unreadable
50202 + binaries. This option is useful in environments that
50203 + remove the read bits (e.g. file mode 4711) from suid binaries to
50204 + prevent infoleaking of their contents. This option adds
50205 + consistency to the use of that file mode, as the binary could normally
50206 + be read out when run without privileges while ptracing.
50207 +
50208 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
50209 + is created.
50210 +
50211 +config GRKERNSEC_SETXID
50212 + bool "Enforce consistent multithreaded privileges"
50213 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
50214 + help
50215 + If you say Y here, a change from a root uid to a non-root uid
50216 + in a multithreaded application will cause the resulting uids,
50217 + gids, supplementary groups, and capabilities in that thread
50218 + to be propagated to the other threads of the process. In most
50219 + cases this is unnecessary, as glibc will emulate this behavior
50220 + on behalf of the application. Other libcs do not act in the
50221 + same way, allowing the other threads of the process to continue
50222 + running with root privileges. If the sysctl option is enabled,
50223 + a sysctl option with name "consistent_setxid" is created.
50224 +
50225 +config GRKERNSEC_TPE
50226 + bool "Trusted Path Execution (TPE)"
50227 + help
50228 + If you say Y here, you will be able to choose a gid to add to the
50229 + supplementary groups of users you want to mark as "untrusted."
50230 + These users will not be able to execute any files that are not in
50231 + root-owned directories writable only by root. If the sysctl option
50232 + is enabled, a sysctl option with name "tpe" is created.
50233 +
50234 +config GRKERNSEC_TPE_ALL
50235 + bool "Partially restrict all non-root users"
50236 + depends on GRKERNSEC_TPE
50237 + help
50238 + If you say Y here, all non-root users will be covered under
50239 + a weaker TPE restriction. This is separate from, and in addition to,
50240 + the main TPE options that you have selected elsewhere. Thus, if a
50241 + "trusted" GID is chosen, this restriction applies to even that GID.
50242 + Under this restriction, all non-root users will only be allowed to
50243 + execute files in directories they own that are not group or
50244 + world-writable, or in directories owned by root and writable only by
50245 + root. If the sysctl option is enabled, a sysctl option with name
50246 + "tpe_restrict_all" is created.
50247 +
50248 +config GRKERNSEC_TPE_INVERT
50249 + bool "Invert GID option"
50250 + depends on GRKERNSEC_TPE
50251 + help
50252 + If you say Y here, the group you specify in the TPE configuration will
50253 + decide what group TPE restrictions will be *disabled* for. This
50254 + option is useful if you want TPE restrictions to be applied to most
50255 + users on the system. If the sysctl option is enabled, a sysctl option
50256 + with name "tpe_invert" is created. Unlike other sysctl options, this
50257 + entry will default to on for backward-compatibility.
50258 +
50259 +config GRKERNSEC_TPE_GID
50260 + int "GID for untrusted users"
50261 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50262 + default 1005
50263 + help
50264 + Setting this GID determines what group TPE restrictions will be
50265 + *enabled* for. If the sysctl option is enabled, a sysctl option
50266 + with name "tpe_gid" is created.
50267 +
50268 +config GRKERNSEC_TPE_GID
50269 + int "GID for trusted users"
50270 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50271 + default 1005
50272 + help
50273 + Setting this GID determines what group TPE restrictions will be
50274 + *disabled* for. If the sysctl option is enabled, a sysctl option
50275 + with name "tpe_gid" is created.
50276 +
50277 +endmenu
50278 +menu "Network Protections"
50279 +depends on GRKERNSEC
50280 +
50281 +config GRKERNSEC_RANDNET
50282 + bool "Larger entropy pools"
50283 + help
50284 + If you say Y here, the entropy pools used for many features of Linux
50285 + and grsecurity will be doubled in size. Since several grsecurity
50286 + features use additional randomness, it is recommended that you say Y
50287 + here. Saying Y here has a similar effect as modifying
50288 + /proc/sys/kernel/random/poolsize.
50289 +
50290 +config GRKERNSEC_BLACKHOLE
50291 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50292 + depends on NET
50293 + help
50294 + If you say Y here, neither TCP resets nor ICMP
50295 + destination-unreachable packets will be sent in response to packets
50296 + sent to ports for which no associated listening process exists.
50297 + This feature supports both IPV4 and IPV6 and exempts the
50298 + loopback interface from blackholing. Enabling this feature
50299 + makes a host more resilient to DoS attacks and reduces network
50300 + visibility against scanners.
50301 +
50302 + The blackhole feature as-implemented is equivalent to the FreeBSD
50303 + blackhole feature, as it prevents RST responses to all packets, not
50304 + just SYNs. Under most application behavior this causes no
50305 + problems, but applications (like haproxy) may not close certain
50306 + connections in a way that cleanly terminates them on the remote
50307 + end, leaving the remote host in LAST_ACK state. Because of this
50308 + side-effect and to prevent intentional LAST_ACK DoSes, this
50309 + feature also adds automatic mitigation against such attacks.
50310 + The mitigation drastically reduces the amount of time a socket
50311 + can spend in LAST_ACK state. If you're using haproxy and not
50312 + all servers it connects to have this option enabled, consider
50313 + disabling this feature on the haproxy host.
50314 +
50315 + If the sysctl option is enabled, two sysctl options with names
50316 + "ip_blackhole" and "lastack_retries" will be created.
50317 + While "ip_blackhole" takes the standard zero/non-zero on/off
50318 + toggle, "lastack_retries" uses the same kinds of values as
50319 + "tcp_retries1" and "tcp_retries2". The default value of 4
50320 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50321 + state.
50322 +
50323 +config GRKERNSEC_SOCKET
50324 + bool "Socket restrictions"
50325 + depends on NET
50326 + help
50327 + If you say Y here, you will be able to choose from several options.
50328 + If you assign a GID on your system and add it to the supplementary
50329 + groups of users you want to restrict socket access to, this patch
50330 + will perform up to three things, based on the option(s) you choose.
50331 +
50332 +config GRKERNSEC_SOCKET_ALL
50333 + bool "Deny any sockets to group"
50334 + depends on GRKERNSEC_SOCKET
50335 + help
50336 + If you say Y here, you will be able to choose a GID of whose users will
50337 + be unable to connect to other hosts from your machine or run server
50338 + applications from your machine. If the sysctl option is enabled, a
50339 + sysctl option with name "socket_all" is created.
50340 +
50341 +config GRKERNSEC_SOCKET_ALL_GID
50342 + int "GID to deny all sockets for"
50343 + depends on GRKERNSEC_SOCKET_ALL
50344 + default 1004
50345 + help
50346 + Here you can choose the GID to disable socket access for. Remember to
50347 + add the users you want socket access disabled for to the GID
50348 + specified here. If the sysctl option is enabled, a sysctl option
50349 + with name "socket_all_gid" is created.
50350 +
50351 +config GRKERNSEC_SOCKET_CLIENT
50352 + bool "Deny client sockets to group"
50353 + depends on GRKERNSEC_SOCKET
50354 + help
50355 + If you say Y here, you will be able to choose a GID of whose users will
50356 + be unable to connect to other hosts from your machine, but will be
50357 + able to run servers. If this option is enabled, all users in the group
50358 + you specify will have to use passive mode when initiating ftp transfers
50359 + from the shell on your machine. If the sysctl option is enabled, a
50360 + sysctl option with name "socket_client" is created.
50361 +
50362 +config GRKERNSEC_SOCKET_CLIENT_GID
50363 + int "GID to deny client sockets for"
50364 + depends on GRKERNSEC_SOCKET_CLIENT
50365 + default 1003
50366 + help
50367 + Here you can choose the GID to disable client socket access for.
50368 + Remember to add the users you want client socket access disabled for to
50369 + the GID specified here. If the sysctl option is enabled, a sysctl
50370 + option with name "socket_client_gid" is created.
50371 +
50372 +config GRKERNSEC_SOCKET_SERVER
50373 + bool "Deny server sockets to group"
50374 + depends on GRKERNSEC_SOCKET
50375 + help
50376 + If you say Y here, you will be able to choose a GID of whose users will
50377 + be unable to run server applications from your machine. If the sysctl
50378 + option is enabled, a sysctl option with name "socket_server" is created.
50379 +
50380 +config GRKERNSEC_SOCKET_SERVER_GID
50381 + int "GID to deny server sockets for"
50382 + depends on GRKERNSEC_SOCKET_SERVER
50383 + default 1002
50384 + help
50385 + Here you can choose the GID to disable server socket access for.
50386 + Remember to add the users you want server socket access disabled for to
50387 + the GID specified here. If the sysctl option is enabled, a sysctl
50388 + option with name "socket_server_gid" is created.
50389 +
50390 +endmenu
50391 +menu "Sysctl support"
50392 +depends on GRKERNSEC && SYSCTL
50393 +
50394 +config GRKERNSEC_SYSCTL
50395 + bool "Sysctl support"
50396 + help
50397 + If you say Y here, you will be able to change the options that
50398 + grsecurity runs with at bootup, without having to recompile your
50399 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50400 + to enable (1) or disable (0) various features. All the sysctl entries
50401 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50402 + All features enabled in the kernel configuration are disabled at boot
50403 + if you do not say Y to the "Turn on features by default" option.
50404 + All options should be set at startup, and the grsec_lock entry should
50405 + be set to a non-zero value after all the options are set.
50406 + *THIS IS EXTREMELY IMPORTANT*
50407 +
50408 +config GRKERNSEC_SYSCTL_DISTRO
50409 + bool "Extra sysctl support for distro makers (READ HELP)"
50410 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50411 + help
50412 + If you say Y here, additional sysctl options will be created
50413 + for features that affect processes running as root. Therefore,
50414 + it is critical when using this option that the grsec_lock entry be
50415 + enabled after boot. Only distros with prebuilt kernel packages
50416 + with this option enabled that can ensure grsec_lock is enabled
50417 + after boot should use this option.
50418 + *Failure to set grsec_lock after boot makes all grsec features
50419 + this option covers useless*
50420 +
50421 + Currently this option creates the following sysctl entries:
50422 + "Disable Privileged I/O": "disable_priv_io"
50423 +
50424 +config GRKERNSEC_SYSCTL_ON
50425 + bool "Turn on features by default"
50426 + depends on GRKERNSEC_SYSCTL
50427 + help
50428 + If you say Y here, instead of having all features enabled in the
50429 + kernel configuration disabled at boot time, the features will be
50430 + enabled at boot time. It is recommended you say Y here unless
50431 + there is some reason you would want all sysctl-tunable features to
50432 + be disabled by default. As mentioned elsewhere, it is important
50433 + to enable the grsec_lock entry once you have finished modifying
50434 + the sysctl entries.
50435 +
50436 +endmenu
50437 +menu "Logging Options"
50438 +depends on GRKERNSEC
50439 +
50440 +config GRKERNSEC_FLOODTIME
50441 + int "Seconds in between log messages (minimum)"
50442 + default 10
50443 + help
50444 + This option allows you to enforce the number of seconds between
50445 + grsecurity log messages. The default should be suitable for most
50446 + people, however, if you choose to change it, choose a value small enough
50447 + to allow informative logs to be produced, but large enough to
50448 + prevent flooding.
50449 +
50450 +config GRKERNSEC_FLOODBURST
50451 + int "Number of messages in a burst (maximum)"
50452 + default 6
50453 + help
50454 + This option allows you to choose the maximum number of messages allowed
50455 + within the flood time interval you chose in a separate option. The
50456 + default should be suitable for most people, however if you find that
50457 + many of your logs are being interpreted as flooding, you may want to
50458 + raise this value.
50459 +
50460 +endmenu
50461 +
50462 +endmenu
50463 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50464 new file mode 100644
50465 index 0000000..1b9afa9
50466 --- /dev/null
50467 +++ b/grsecurity/Makefile
50468 @@ -0,0 +1,38 @@
50469 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50470 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50471 +# into an RBAC system
50472 +#
50473 +# All code in this directory and various hooks inserted throughout the kernel
50474 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50475 +# under the GPL v2 or higher
50476 +
50477 +KBUILD_CFLAGS += -Werror
50478 +
50479 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50480 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50481 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50482 +
50483 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50484 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50485 + gracl_learn.o grsec_log.o
50486 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50487 +
50488 +ifdef CONFIG_NET
50489 +obj-y += grsec_sock.o
50490 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50491 +endif
50492 +
50493 +ifndef CONFIG_GRKERNSEC
50494 +obj-y += grsec_disabled.o
50495 +endif
50496 +
50497 +ifdef CONFIG_GRKERNSEC_HIDESYM
50498 +extra-y := grsec_hidesym.o
50499 +$(obj)/grsec_hidesym.o:
50500 + @-chmod -f 500 /boot
50501 + @-chmod -f 500 /lib/modules
50502 + @-chmod -f 500 /lib64/modules
50503 + @-chmod -f 500 /lib32/modules
50504 + @-chmod -f 700 .
50505 + @echo ' grsec: protected kernel image paths'
50506 +endif
50507 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50508 new file mode 100644
50509 index 0000000..a6d83f0
50510 --- /dev/null
50511 +++ b/grsecurity/gracl.c
50512 @@ -0,0 +1,4193 @@
50513 +#include <linux/kernel.h>
50514 +#include <linux/module.h>
50515 +#include <linux/sched.h>
50516 +#include <linux/mm.h>
50517 +#include <linux/file.h>
50518 +#include <linux/fs.h>
50519 +#include <linux/namei.h>
50520 +#include <linux/mount.h>
50521 +#include <linux/tty.h>
50522 +#include <linux/proc_fs.h>
50523 +#include <linux/lglock.h>
50524 +#include <linux/slab.h>
50525 +#include <linux/vmalloc.h>
50526 +#include <linux/types.h>
50527 +#include <linux/sysctl.h>
50528 +#include <linux/netdevice.h>
50529 +#include <linux/ptrace.h>
50530 +#include <linux/gracl.h>
50531 +#include <linux/gralloc.h>
50532 +#include <linux/security.h>
50533 +#include <linux/grinternal.h>
50534 +#include <linux/pid_namespace.h>
50535 +#include <linux/fdtable.h>
50536 +#include <linux/percpu.h>
50537 +#include "../fs/mount.h"
50538 +
50539 +#include <asm/uaccess.h>
50540 +#include <asm/errno.h>
50541 +#include <asm/mman.h>
50542 +
50543 +static struct acl_role_db acl_role_set;
50544 +static struct name_db name_set;
50545 +static struct inodev_db inodev_set;
50546 +
50547 +/* for keeping track of userspace pointers used for subjects, so we
50548 + can share references in the kernel as well
50549 +*/
50550 +
50551 +static struct path real_root;
50552 +
50553 +static struct acl_subj_map_db subj_map_set;
50554 +
50555 +static struct acl_role_label *default_role;
50556 +
50557 +static struct acl_role_label *role_list;
50558 +
50559 +static u16 acl_sp_role_value;
50560 +
50561 +extern char *gr_shared_page[4];
50562 +static DEFINE_MUTEX(gr_dev_mutex);
50563 +DEFINE_RWLOCK(gr_inode_lock);
50564 +
50565 +struct gr_arg *gr_usermode;
50566 +
50567 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50568 +
50569 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50570 +extern void gr_clear_learn_entries(void);
50571 +
50572 +#ifdef CONFIG_GRKERNSEC_RESLOG
50573 +extern void gr_log_resource(const struct task_struct *task,
50574 + const int res, const unsigned long wanted, const int gt);
50575 +#endif
50576 +
50577 +unsigned char *gr_system_salt;
50578 +unsigned char *gr_system_sum;
50579 +
50580 +static struct sprole_pw **acl_special_roles = NULL;
50581 +static __u16 num_sprole_pws = 0;
50582 +
50583 +static struct acl_role_label *kernel_role = NULL;
50584 +
50585 +static unsigned int gr_auth_attempts = 0;
50586 +static unsigned long gr_auth_expires = 0UL;
50587 +
50588 +#ifdef CONFIG_NET
50589 +extern struct vfsmount *sock_mnt;
50590 +#endif
50591 +
50592 +extern struct vfsmount *pipe_mnt;
50593 +extern struct vfsmount *shm_mnt;
50594 +#ifdef CONFIG_HUGETLBFS
50595 +extern struct vfsmount *hugetlbfs_vfsmount;
50596 +#endif
50597 +
50598 +static struct acl_object_label *fakefs_obj_rw;
50599 +static struct acl_object_label *fakefs_obj_rwx;
50600 +
50601 +extern int gr_init_uidset(void);
50602 +extern void gr_free_uidset(void);
50603 +extern void gr_remove_uid(uid_t uid);
50604 +extern int gr_find_uid(uid_t uid);
50605 +
50606 +DECLARE_BRLOCK(vfsmount_lock);
50607 +
50608 +__inline__ int
50609 +gr_acl_is_enabled(void)
50610 +{
50611 + return (gr_status & GR_READY);
50612 +}
50613 +
50614 +#ifdef CONFIG_BTRFS_FS
50615 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50616 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50617 +#endif
50618 +
50619 +static inline dev_t __get_dev(const struct dentry *dentry)
50620 +{
50621 +#ifdef CONFIG_BTRFS_FS
50622 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50623 + return get_btrfs_dev_from_inode(dentry->d_inode);
50624 + else
50625 +#endif
50626 + return dentry->d_inode->i_sb->s_dev;
50627 +}
50628 +
50629 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50630 +{
50631 + return __get_dev(dentry);
50632 +}
50633 +
50634 +static char gr_task_roletype_to_char(struct task_struct *task)
50635 +{
50636 + switch (task->role->roletype &
50637 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50638 + GR_ROLE_SPECIAL)) {
50639 + case GR_ROLE_DEFAULT:
50640 + return 'D';
50641 + case GR_ROLE_USER:
50642 + return 'U';
50643 + case GR_ROLE_GROUP:
50644 + return 'G';
50645 + case GR_ROLE_SPECIAL:
50646 + return 'S';
50647 + }
50648 +
50649 + return 'X';
50650 +}
50651 +
50652 +char gr_roletype_to_char(void)
50653 +{
50654 + return gr_task_roletype_to_char(current);
50655 +}
50656 +
50657 +__inline__ int
50658 +gr_acl_tpe_check(void)
50659 +{
50660 + if (unlikely(!(gr_status & GR_READY)))
50661 + return 0;
50662 + if (current->role->roletype & GR_ROLE_TPE)
50663 + return 1;
50664 + else
50665 + return 0;
50666 +}
50667 +
50668 +int
50669 +gr_handle_rawio(const struct inode *inode)
50670 +{
50671 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50672 + if (inode && S_ISBLK(inode->i_mode) &&
50673 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50674 + !capable(CAP_SYS_RAWIO))
50675 + return 1;
50676 +#endif
50677 + return 0;
50678 +}
50679 +
50680 +static int
50681 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50682 +{
50683 + if (likely(lena != lenb))
50684 + return 0;
50685 +
50686 + return !memcmp(a, b, lena);
50687 +}
50688 +
50689 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50690 +{
50691 + *buflen -= namelen;
50692 + if (*buflen < 0)
50693 + return -ENAMETOOLONG;
50694 + *buffer -= namelen;
50695 + memcpy(*buffer, str, namelen);
50696 + return 0;
50697 +}
50698 +
50699 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50700 +{
50701 + return prepend(buffer, buflen, name->name, name->len);
50702 +}
50703 +
50704 +static int prepend_path(const struct path *path, struct path *root,
50705 + char **buffer, int *buflen)
50706 +{
50707 + struct dentry *dentry = path->dentry;
50708 + struct vfsmount *vfsmnt = path->mnt;
50709 + struct mount *mnt = real_mount(vfsmnt);
50710 + bool slash = false;
50711 + int error = 0;
50712 +
50713 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50714 + struct dentry * parent;
50715 +
50716 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50717 + /* Global root? */
50718 + if (!mnt_has_parent(mnt)) {
50719 + goto out;
50720 + }
50721 + dentry = mnt->mnt_mountpoint;
50722 + mnt = mnt->mnt_parent;
50723 + vfsmnt = &mnt->mnt;
50724 + continue;
50725 + }
50726 + parent = dentry->d_parent;
50727 + prefetch(parent);
50728 + spin_lock(&dentry->d_lock);
50729 + error = prepend_name(buffer, buflen, &dentry->d_name);
50730 + spin_unlock(&dentry->d_lock);
50731 + if (!error)
50732 + error = prepend(buffer, buflen, "/", 1);
50733 + if (error)
50734 + break;
50735 +
50736 + slash = true;
50737 + dentry = parent;
50738 + }
50739 +
50740 +out:
50741 + if (!error && !slash)
50742 + error = prepend(buffer, buflen, "/", 1);
50743 +
50744 + return error;
50745 +}
50746 +
50747 +/* this must be called with vfsmount_lock and rename_lock held */
50748 +
50749 +static char *__our_d_path(const struct path *path, struct path *root,
50750 + char *buf, int buflen)
50751 +{
50752 + char *res = buf + buflen;
50753 + int error;
50754 +
50755 + prepend(&res, &buflen, "\0", 1);
50756 + error = prepend_path(path, root, &res, &buflen);
50757 + if (error)
50758 + return ERR_PTR(error);
50759 +
50760 + return res;
50761 +}
50762 +
50763 +static char *
50764 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50765 +{
50766 + char *retval;
50767 +
50768 + retval = __our_d_path(path, root, buf, buflen);
50769 + if (unlikely(IS_ERR(retval)))
50770 + retval = strcpy(buf, "<path too long>");
50771 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50772 + retval[1] = '\0';
50773 +
50774 + return retval;
50775 +}
50776 +
50777 +static char *
50778 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50779 + char *buf, int buflen)
50780 +{
50781 + struct path path;
50782 + char *res;
50783 +
50784 + path.dentry = (struct dentry *)dentry;
50785 + path.mnt = (struct vfsmount *)vfsmnt;
50786 +
50787 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50788 + by the RBAC system */
50789 + res = gen_full_path(&path, &real_root, buf, buflen);
50790 +
50791 + return res;
50792 +}
50793 +
50794 +static char *
50795 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50796 + char *buf, int buflen)
50797 +{
50798 + char *res;
50799 + struct path path;
50800 + struct path root;
50801 + struct task_struct *reaper = &init_task;
50802 +
50803 + path.dentry = (struct dentry *)dentry;
50804 + path.mnt = (struct vfsmount *)vfsmnt;
50805 +
50806 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50807 + get_fs_root(reaper->fs, &root);
50808 +
50809 + write_seqlock(&rename_lock);
50810 + br_read_lock(vfsmount_lock);
50811 + res = gen_full_path(&path, &root, buf, buflen);
50812 + br_read_unlock(vfsmount_lock);
50813 + write_sequnlock(&rename_lock);
50814 +
50815 + path_put(&root);
50816 + return res;
50817 +}
50818 +
50819 +static char *
50820 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50821 +{
50822 + char *ret;
50823 + write_seqlock(&rename_lock);
50824 + br_read_lock(vfsmount_lock);
50825 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50826 + PAGE_SIZE);
50827 + br_read_unlock(vfsmount_lock);
50828 + write_sequnlock(&rename_lock);
50829 + return ret;
50830 +}
50831 +
50832 +static char *
50833 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50834 +{
50835 + char *ret;
50836 + char *buf;
50837 + int buflen;
50838 +
50839 + write_seqlock(&rename_lock);
50840 + br_read_lock(vfsmount_lock);
50841 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50842 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50843 + buflen = (int)(ret - buf);
50844 + if (buflen >= 5)
50845 + prepend(&ret, &buflen, "/proc", 5);
50846 + else
50847 + ret = strcpy(buf, "<path too long>");
50848 + br_read_unlock(vfsmount_lock);
50849 + write_sequnlock(&rename_lock);
50850 + return ret;
50851 +}
50852 +
50853 +char *
50854 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50855 +{
50856 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50857 + PAGE_SIZE);
50858 +}
50859 +
50860 +char *
50861 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50862 +{
50863 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50864 + PAGE_SIZE);
50865 +}
50866 +
50867 +char *
50868 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50869 +{
50870 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50871 + PAGE_SIZE);
50872 +}
50873 +
50874 +char *
50875 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50876 +{
50877 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50878 + PAGE_SIZE);
50879 +}
50880 +
50881 +char *
50882 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50883 +{
50884 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50885 + PAGE_SIZE);
50886 +}
50887 +
50888 +__inline__ __u32
50889 +to_gr_audit(const __u32 reqmode)
50890 +{
50891 + /* masks off auditable permission flags, then shifts them to create
50892 + auditing flags, and adds the special case of append auditing if
50893 + we're requesting write */
50894 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50895 +}
50896 +
50897 +struct acl_subject_label *
50898 +lookup_subject_map(const struct acl_subject_label *userp)
50899 +{
50900 + unsigned int index = shash(userp, subj_map_set.s_size);
50901 + struct subject_map *match;
50902 +
50903 + match = subj_map_set.s_hash[index];
50904 +
50905 + while (match && match->user != userp)
50906 + match = match->next;
50907 +
50908 + if (match != NULL)
50909 + return match->kernel;
50910 + else
50911 + return NULL;
50912 +}
50913 +
50914 +static void
50915 +insert_subj_map_entry(struct subject_map *subjmap)
50916 +{
50917 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50918 + struct subject_map **curr;
50919 +
50920 + subjmap->prev = NULL;
50921 +
50922 + curr = &subj_map_set.s_hash[index];
50923 + if (*curr != NULL)
50924 + (*curr)->prev = subjmap;
50925 +
50926 + subjmap->next = *curr;
50927 + *curr = subjmap;
50928 +
50929 + return;
50930 +}
50931 +
50932 +static struct acl_role_label *
50933 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50934 + const gid_t gid)
50935 +{
50936 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50937 + struct acl_role_label *match;
50938 + struct role_allowed_ip *ipp;
50939 + unsigned int x;
50940 + u32 curr_ip = task->signal->curr_ip;
50941 +
50942 + task->signal->saved_ip = curr_ip;
50943 +
50944 + match = acl_role_set.r_hash[index];
50945 +
50946 + while (match) {
50947 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50948 + for (x = 0; x < match->domain_child_num; x++) {
50949 + if (match->domain_children[x] == uid)
50950 + goto found;
50951 + }
50952 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50953 + break;
50954 + match = match->next;
50955 + }
50956 +found:
50957 + if (match == NULL) {
50958 + try_group:
50959 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50960 + match = acl_role_set.r_hash[index];
50961 +
50962 + while (match) {
50963 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50964 + for (x = 0; x < match->domain_child_num; x++) {
50965 + if (match->domain_children[x] == gid)
50966 + goto found2;
50967 + }
50968 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50969 + break;
50970 + match = match->next;
50971 + }
50972 +found2:
50973 + if (match == NULL)
50974 + match = default_role;
50975 + if (match->allowed_ips == NULL)
50976 + return match;
50977 + else {
50978 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50979 + if (likely
50980 + ((ntohl(curr_ip) & ipp->netmask) ==
50981 + (ntohl(ipp->addr) & ipp->netmask)))
50982 + return match;
50983 + }
50984 + match = default_role;
50985 + }
50986 + } else if (match->allowed_ips == NULL) {
50987 + return match;
50988 + } else {
50989 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50990 + if (likely
50991 + ((ntohl(curr_ip) & ipp->netmask) ==
50992 + (ntohl(ipp->addr) & ipp->netmask)))
50993 + return match;
50994 + }
50995 + goto try_group;
50996 + }
50997 +
50998 + return match;
50999 +}
51000 +
51001 +struct acl_subject_label *
51002 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51003 + const struct acl_role_label *role)
51004 +{
51005 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51006 + struct acl_subject_label *match;
51007 +
51008 + match = role->subj_hash[index];
51009 +
51010 + while (match && (match->inode != ino || match->device != dev ||
51011 + (match->mode & GR_DELETED))) {
51012 + match = match->next;
51013 + }
51014 +
51015 + if (match && !(match->mode & GR_DELETED))
51016 + return match;
51017 + else
51018 + return NULL;
51019 +}
51020 +
51021 +struct acl_subject_label *
51022 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51023 + const struct acl_role_label *role)
51024 +{
51025 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51026 + struct acl_subject_label *match;
51027 +
51028 + match = role->subj_hash[index];
51029 +
51030 + while (match && (match->inode != ino || match->device != dev ||
51031 + !(match->mode & GR_DELETED))) {
51032 + match = match->next;
51033 + }
51034 +
51035 + if (match && (match->mode & GR_DELETED))
51036 + return match;
51037 + else
51038 + return NULL;
51039 +}
51040 +
51041 +static struct acl_object_label *
51042 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51043 + const struct acl_subject_label *subj)
51044 +{
51045 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51046 + struct acl_object_label *match;
51047 +
51048 + match = subj->obj_hash[index];
51049 +
51050 + while (match && (match->inode != ino || match->device != dev ||
51051 + (match->mode & GR_DELETED))) {
51052 + match = match->next;
51053 + }
51054 +
51055 + if (match && !(match->mode & GR_DELETED))
51056 + return match;
51057 + else
51058 + return NULL;
51059 +}
51060 +
51061 +static struct acl_object_label *
51062 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51063 + const struct acl_subject_label *subj)
51064 +{
51065 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51066 + struct acl_object_label *match;
51067 +
51068 + match = subj->obj_hash[index];
51069 +
51070 + while (match && (match->inode != ino || match->device != dev ||
51071 + !(match->mode & GR_DELETED))) {
51072 + match = match->next;
51073 + }
51074 +
51075 + if (match && (match->mode & GR_DELETED))
51076 + return match;
51077 +
51078 + match = subj->obj_hash[index];
51079 +
51080 + while (match && (match->inode != ino || match->device != dev ||
51081 + (match->mode & GR_DELETED))) {
51082 + match = match->next;
51083 + }
51084 +
51085 + if (match && !(match->mode & GR_DELETED))
51086 + return match;
51087 + else
51088 + return NULL;
51089 +}
51090 +
51091 +static struct name_entry *
51092 +lookup_name_entry(const char *name)
51093 +{
51094 + unsigned int len = strlen(name);
51095 + unsigned int key = full_name_hash(name, len);
51096 + unsigned int index = key % name_set.n_size;
51097 + struct name_entry *match;
51098 +
51099 + match = name_set.n_hash[index];
51100 +
51101 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51102 + match = match->next;
51103 +
51104 + return match;
51105 +}
51106 +
51107 +static struct name_entry *
51108 +lookup_name_entry_create(const char *name)
51109 +{
51110 + unsigned int len = strlen(name);
51111 + unsigned int key = full_name_hash(name, len);
51112 + unsigned int index = key % name_set.n_size;
51113 + struct name_entry *match;
51114 +
51115 + match = name_set.n_hash[index];
51116 +
51117 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51118 + !match->deleted))
51119 + match = match->next;
51120 +
51121 + if (match && match->deleted)
51122 + return match;
51123 +
51124 + match = name_set.n_hash[index];
51125 +
51126 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51127 + match->deleted))
51128 + match = match->next;
51129 +
51130 + if (match && !match->deleted)
51131 + return match;
51132 + else
51133 + return NULL;
51134 +}
51135 +
51136 +static struct inodev_entry *
51137 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
51138 +{
51139 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
51140 + struct inodev_entry *match;
51141 +
51142 + match = inodev_set.i_hash[index];
51143 +
51144 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51145 + match = match->next;
51146 +
51147 + return match;
51148 +}
51149 +
51150 +static void
51151 +insert_inodev_entry(struct inodev_entry *entry)
51152 +{
51153 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51154 + inodev_set.i_size);
51155 + struct inodev_entry **curr;
51156 +
51157 + entry->prev = NULL;
51158 +
51159 + curr = &inodev_set.i_hash[index];
51160 + if (*curr != NULL)
51161 + (*curr)->prev = entry;
51162 +
51163 + entry->next = *curr;
51164 + *curr = entry;
51165 +
51166 + return;
51167 +}
51168 +
51169 +static void
51170 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51171 +{
51172 + unsigned int index =
51173 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51174 + struct acl_role_label **curr;
51175 + struct acl_role_label *tmp, *tmp2;
51176 +
51177 + curr = &acl_role_set.r_hash[index];
51178 +
51179 + /* simple case, slot is empty, just set it to our role */
51180 + if (*curr == NULL) {
51181 + *curr = role;
51182 + } else {
51183 + /* example:
51184 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
51185 + 2 -> 3
51186 + */
51187 + /* first check to see if we can already be reached via this slot */
51188 + tmp = *curr;
51189 + while (tmp && tmp != role)
51190 + tmp = tmp->next;
51191 + if (tmp == role) {
51192 + /* we don't need to add ourselves to this slot's chain */
51193 + return;
51194 + }
51195 + /* we need to add ourselves to this chain, two cases */
51196 + if (role->next == NULL) {
51197 + /* simple case, append the current chain to our role */
51198 + role->next = *curr;
51199 + *curr = role;
51200 + } else {
51201 + /* 1 -> 2 -> 3 -> 4
51202 + 2 -> 3 -> 4
51203 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
51204 + */
51205 + /* trickier case: walk our role's chain until we find
51206 + the role for the start of the current slot's chain */
51207 + tmp = role;
51208 + tmp2 = *curr;
51209 + while (tmp->next && tmp->next != tmp2)
51210 + tmp = tmp->next;
51211 + if (tmp->next == tmp2) {
51212 + /* from example above, we found 3, so just
51213 + replace this slot's chain with ours */
51214 + *curr = role;
51215 + } else {
51216 + /* we didn't find a subset of our role's chain
51217 + in the current slot's chain, so append their
51218 + chain to ours, and set us as the first role in
51219 + the slot's chain
51220 +
51221 + we could fold this case with the case above,
51222 + but making it explicit for clarity
51223 + */
51224 + tmp->next = tmp2;
51225 + *curr = role;
51226 + }
51227 + }
51228 + }
51229 +
51230 + return;
51231 +}
51232 +
51233 +static void
51234 +insert_acl_role_label(struct acl_role_label *role)
51235 +{
51236 + int i;
51237 +
51238 + if (role_list == NULL) {
51239 + role_list = role;
51240 + role->prev = NULL;
51241 + } else {
51242 + role->prev = role_list;
51243 + role_list = role;
51244 + }
51245 +
51246 + /* used for hash chains */
51247 + role->next = NULL;
51248 +
51249 + if (role->roletype & GR_ROLE_DOMAIN) {
51250 + for (i = 0; i < role->domain_child_num; i++)
51251 + __insert_acl_role_label(role, role->domain_children[i]);
51252 + } else
51253 + __insert_acl_role_label(role, role->uidgid);
51254 +}
51255 +
51256 +static int
51257 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51258 +{
51259 + struct name_entry **curr, *nentry;
51260 + struct inodev_entry *ientry;
51261 + unsigned int len = strlen(name);
51262 + unsigned int key = full_name_hash(name, len);
51263 + unsigned int index = key % name_set.n_size;
51264 +
51265 + curr = &name_set.n_hash[index];
51266 +
51267 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51268 + curr = &((*curr)->next);
51269 +
51270 + if (*curr != NULL)
51271 + return 1;
51272 +
51273 + nentry = acl_alloc(sizeof (struct name_entry));
51274 + if (nentry == NULL)
51275 + return 0;
51276 + ientry = acl_alloc(sizeof (struct inodev_entry));
51277 + if (ientry == NULL)
51278 + return 0;
51279 + ientry->nentry = nentry;
51280 +
51281 + nentry->key = key;
51282 + nentry->name = name;
51283 + nentry->inode = inode;
51284 + nentry->device = device;
51285 + nentry->len = len;
51286 + nentry->deleted = deleted;
51287 +
51288 + nentry->prev = NULL;
51289 + curr = &name_set.n_hash[index];
51290 + if (*curr != NULL)
51291 + (*curr)->prev = nentry;
51292 + nentry->next = *curr;
51293 + *curr = nentry;
51294 +
51295 + /* insert us into the table searchable by inode/dev */
51296 + insert_inodev_entry(ientry);
51297 +
51298 + return 1;
51299 +}
51300 +
51301 +static void
51302 +insert_acl_obj_label(struct acl_object_label *obj,
51303 + struct acl_subject_label *subj)
51304 +{
51305 + unsigned int index =
51306 + fhash(obj->inode, obj->device, subj->obj_hash_size);
51307 + struct acl_object_label **curr;
51308 +
51309 +
51310 + obj->prev = NULL;
51311 +
51312 + curr = &subj->obj_hash[index];
51313 + if (*curr != NULL)
51314 + (*curr)->prev = obj;
51315 +
51316 + obj->next = *curr;
51317 + *curr = obj;
51318 +
51319 + return;
51320 +}
51321 +
51322 +static void
51323 +insert_acl_subj_label(struct acl_subject_label *obj,
51324 + struct acl_role_label *role)
51325 +{
51326 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51327 + struct acl_subject_label **curr;
51328 +
51329 + obj->prev = NULL;
51330 +
51331 + curr = &role->subj_hash[index];
51332 + if (*curr != NULL)
51333 + (*curr)->prev = obj;
51334 +
51335 + obj->next = *curr;
51336 + *curr = obj;
51337 +
51338 + return;
51339 +}
51340 +
51341 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51342 +
51343 +static void *
51344 +create_table(__u32 * len, int elementsize)
51345 +{
51346 + unsigned int table_sizes[] = {
51347 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51348 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51349 + 4194301, 8388593, 16777213, 33554393, 67108859
51350 + };
51351 + void *newtable = NULL;
51352 + unsigned int pwr = 0;
51353 +
51354 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51355 + table_sizes[pwr] <= *len)
51356 + pwr++;
51357 +
51358 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51359 + return newtable;
51360 +
51361 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51362 + newtable =
51363 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51364 + else
51365 + newtable = vmalloc(table_sizes[pwr] * elementsize);
51366 +
51367 + *len = table_sizes[pwr];
51368 +
51369 + return newtable;
51370 +}
51371 +
51372 +static int
51373 +init_variables(const struct gr_arg *arg)
51374 +{
51375 + struct task_struct *reaper = &init_task;
51376 + unsigned int stacksize;
51377 +
51378 + subj_map_set.s_size = arg->role_db.num_subjects;
51379 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51380 + name_set.n_size = arg->role_db.num_objects;
51381 + inodev_set.i_size = arg->role_db.num_objects;
51382 +
51383 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
51384 + !name_set.n_size || !inodev_set.i_size)
51385 + return 1;
51386 +
51387 + if (!gr_init_uidset())
51388 + return 1;
51389 +
51390 + /* set up the stack that holds allocation info */
51391 +
51392 + stacksize = arg->role_db.num_pointers + 5;
51393 +
51394 + if (!acl_alloc_stack_init(stacksize))
51395 + return 1;
51396 +
51397 + /* grab reference for the real root dentry and vfsmount */
51398 + get_fs_root(reaper->fs, &real_root);
51399 +
51400 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51401 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51402 +#endif
51403 +
51404 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51405 + if (fakefs_obj_rw == NULL)
51406 + return 1;
51407 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51408 +
51409 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51410 + if (fakefs_obj_rwx == NULL)
51411 + return 1;
51412 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51413 +
51414 + subj_map_set.s_hash =
51415 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51416 + acl_role_set.r_hash =
51417 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51418 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51419 + inodev_set.i_hash =
51420 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51421 +
51422 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51423 + !name_set.n_hash || !inodev_set.i_hash)
51424 + return 1;
51425 +
51426 + memset(subj_map_set.s_hash, 0,
51427 + sizeof(struct subject_map *) * subj_map_set.s_size);
51428 + memset(acl_role_set.r_hash, 0,
51429 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
51430 + memset(name_set.n_hash, 0,
51431 + sizeof (struct name_entry *) * name_set.n_size);
51432 + memset(inodev_set.i_hash, 0,
51433 + sizeof (struct inodev_entry *) * inodev_set.i_size);
51434 +
51435 + return 0;
51436 +}
51437 +
51438 +/* free information not needed after startup
51439 + currently contains user->kernel pointer mappings for subjects
51440 +*/
51441 +
51442 +static void
51443 +free_init_variables(void)
51444 +{
51445 + __u32 i;
51446 +
51447 + if (subj_map_set.s_hash) {
51448 + for (i = 0; i < subj_map_set.s_size; i++) {
51449 + if (subj_map_set.s_hash[i]) {
51450 + kfree(subj_map_set.s_hash[i]);
51451 + subj_map_set.s_hash[i] = NULL;
51452 + }
51453 + }
51454 +
51455 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51456 + PAGE_SIZE)
51457 + kfree(subj_map_set.s_hash);
51458 + else
51459 + vfree(subj_map_set.s_hash);
51460 + }
51461 +
51462 + return;
51463 +}
51464 +
51465 +static void
51466 +free_variables(void)
51467 +{
51468 + struct acl_subject_label *s;
51469 + struct acl_role_label *r;
51470 + struct task_struct *task, *task2;
51471 + unsigned int x;
51472 +
51473 + gr_clear_learn_entries();
51474 +
51475 + read_lock(&tasklist_lock);
51476 + do_each_thread(task2, task) {
51477 + task->acl_sp_role = 0;
51478 + task->acl_role_id = 0;
51479 + task->acl = NULL;
51480 + task->role = NULL;
51481 + } while_each_thread(task2, task);
51482 + read_unlock(&tasklist_lock);
51483 +
51484 + /* release the reference to the real root dentry and vfsmount */
51485 + path_put(&real_root);
51486 + memset(&real_root, 0, sizeof(real_root));
51487 +
51488 + /* free all object hash tables */
51489 +
51490 + FOR_EACH_ROLE_START(r)
51491 + if (r->subj_hash == NULL)
51492 + goto next_role;
51493 + FOR_EACH_SUBJECT_START(r, s, x)
51494 + if (s->obj_hash == NULL)
51495 + break;
51496 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51497 + kfree(s->obj_hash);
51498 + else
51499 + vfree(s->obj_hash);
51500 + FOR_EACH_SUBJECT_END(s, x)
51501 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51502 + if (s->obj_hash == NULL)
51503 + break;
51504 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51505 + kfree(s->obj_hash);
51506 + else
51507 + vfree(s->obj_hash);
51508 + FOR_EACH_NESTED_SUBJECT_END(s)
51509 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51510 + kfree(r->subj_hash);
51511 + else
51512 + vfree(r->subj_hash);
51513 + r->subj_hash = NULL;
51514 +next_role:
51515 + FOR_EACH_ROLE_END(r)
51516 +
51517 + acl_free_all();
51518 +
51519 + if (acl_role_set.r_hash) {
51520 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51521 + PAGE_SIZE)
51522 + kfree(acl_role_set.r_hash);
51523 + else
51524 + vfree(acl_role_set.r_hash);
51525 + }
51526 + if (name_set.n_hash) {
51527 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51528 + PAGE_SIZE)
51529 + kfree(name_set.n_hash);
51530 + else
51531 + vfree(name_set.n_hash);
51532 + }
51533 +
51534 + if (inodev_set.i_hash) {
51535 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51536 + PAGE_SIZE)
51537 + kfree(inodev_set.i_hash);
51538 + else
51539 + vfree(inodev_set.i_hash);
51540 + }
51541 +
51542 + gr_free_uidset();
51543 +
51544 + memset(&name_set, 0, sizeof (struct name_db));
51545 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51546 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51547 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51548 +
51549 + default_role = NULL;
51550 + kernel_role = NULL;
51551 + role_list = NULL;
51552 +
51553 + return;
51554 +}
51555 +
51556 +static __u32
51557 +count_user_objs(struct acl_object_label *userp)
51558 +{
51559 + struct acl_object_label o_tmp;
51560 + __u32 num = 0;
51561 +
51562 + while (userp) {
51563 + if (copy_from_user(&o_tmp, userp,
51564 + sizeof (struct acl_object_label)))
51565 + break;
51566 +
51567 + userp = o_tmp.prev;
51568 + num++;
51569 + }
51570 +
51571 + return num;
51572 +}
51573 +
51574 +static struct acl_subject_label *
51575 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51576 +
51577 +static int
51578 +copy_user_glob(struct acl_object_label *obj)
51579 +{
51580 + struct acl_object_label *g_tmp, **guser;
51581 + unsigned int len;
51582 + char *tmp;
51583 +
51584 + if (obj->globbed == NULL)
51585 + return 0;
51586 +
51587 + guser = &obj->globbed;
51588 + while (*guser) {
51589 + g_tmp = (struct acl_object_label *)
51590 + acl_alloc(sizeof (struct acl_object_label));
51591 + if (g_tmp == NULL)
51592 + return -ENOMEM;
51593 +
51594 + if (copy_from_user(g_tmp, *guser,
51595 + sizeof (struct acl_object_label)))
51596 + return -EFAULT;
51597 +
51598 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51599 +
51600 + if (!len || len >= PATH_MAX)
51601 + return -EINVAL;
51602 +
51603 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51604 + return -ENOMEM;
51605 +
51606 + if (copy_from_user(tmp, g_tmp->filename, len))
51607 + return -EFAULT;
51608 + tmp[len-1] = '\0';
51609 + g_tmp->filename = tmp;
51610 +
51611 + *guser = g_tmp;
51612 + guser = &(g_tmp->next);
51613 + }
51614 +
51615 + return 0;
51616 +}
51617 +
51618 +static int
51619 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51620 + struct acl_role_label *role)
51621 +{
51622 + struct acl_object_label *o_tmp;
51623 + unsigned int len;
51624 + int ret;
51625 + char *tmp;
51626 +
51627 + while (userp) {
51628 + if ((o_tmp = (struct acl_object_label *)
51629 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51630 + return -ENOMEM;
51631 +
51632 + if (copy_from_user(o_tmp, userp,
51633 + sizeof (struct acl_object_label)))
51634 + return -EFAULT;
51635 +
51636 + userp = o_tmp->prev;
51637 +
51638 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51639 +
51640 + if (!len || len >= PATH_MAX)
51641 + return -EINVAL;
51642 +
51643 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51644 + return -ENOMEM;
51645 +
51646 + if (copy_from_user(tmp, o_tmp->filename, len))
51647 + return -EFAULT;
51648 + tmp[len-1] = '\0';
51649 + o_tmp->filename = tmp;
51650 +
51651 + insert_acl_obj_label(o_tmp, subj);
51652 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51653 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51654 + return -ENOMEM;
51655 +
51656 + ret = copy_user_glob(o_tmp);
51657 + if (ret)
51658 + return ret;
51659 +
51660 + if (o_tmp->nested) {
51661 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51662 + if (IS_ERR(o_tmp->nested))
51663 + return PTR_ERR(o_tmp->nested);
51664 +
51665 + /* insert into nested subject list */
51666 + o_tmp->nested->next = role->hash->first;
51667 + role->hash->first = o_tmp->nested;
51668 + }
51669 + }
51670 +
51671 + return 0;
51672 +}
51673 +
51674 +static __u32
51675 +count_user_subjs(struct acl_subject_label *userp)
51676 +{
51677 + struct acl_subject_label s_tmp;
51678 + __u32 num = 0;
51679 +
51680 + while (userp) {
51681 + if (copy_from_user(&s_tmp, userp,
51682 + sizeof (struct acl_subject_label)))
51683 + break;
51684 +
51685 + userp = s_tmp.prev;
51686 + /* do not count nested subjects against this count, since
51687 + they are not included in the hash table, but are
51688 + attached to objects. We have already counted
51689 + the subjects in userspace for the allocation
51690 + stack
51691 + */
51692 + if (!(s_tmp.mode & GR_NESTED))
51693 + num++;
51694 + }
51695 +
51696 + return num;
51697 +}
51698 +
51699 +static int
51700 +copy_user_allowedips(struct acl_role_label *rolep)
51701 +{
51702 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51703 +
51704 + ruserip = rolep->allowed_ips;
51705 +
51706 + while (ruserip) {
51707 + rlast = rtmp;
51708 +
51709 + if ((rtmp = (struct role_allowed_ip *)
51710 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51711 + return -ENOMEM;
51712 +
51713 + if (copy_from_user(rtmp, ruserip,
51714 + sizeof (struct role_allowed_ip)))
51715 + return -EFAULT;
51716 +
51717 + ruserip = rtmp->prev;
51718 +
51719 + if (!rlast) {
51720 + rtmp->prev = NULL;
51721 + rolep->allowed_ips = rtmp;
51722 + } else {
51723 + rlast->next = rtmp;
51724 + rtmp->prev = rlast;
51725 + }
51726 +
51727 + if (!ruserip)
51728 + rtmp->next = NULL;
51729 + }
51730 +
51731 + return 0;
51732 +}
51733 +
51734 +static int
51735 +copy_user_transitions(struct acl_role_label *rolep)
51736 +{
51737 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51738 +
51739 + unsigned int len;
51740 + char *tmp;
51741 +
51742 + rusertp = rolep->transitions;
51743 +
51744 + while (rusertp) {
51745 + rlast = rtmp;
51746 +
51747 + if ((rtmp = (struct role_transition *)
51748 + acl_alloc(sizeof (struct role_transition))) == NULL)
51749 + return -ENOMEM;
51750 +
51751 + if (copy_from_user(rtmp, rusertp,
51752 + sizeof (struct role_transition)))
51753 + return -EFAULT;
51754 +
51755 + rusertp = rtmp->prev;
51756 +
51757 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51758 +
51759 + if (!len || len >= GR_SPROLE_LEN)
51760 + return -EINVAL;
51761 +
51762 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51763 + return -ENOMEM;
51764 +
51765 + if (copy_from_user(tmp, rtmp->rolename, len))
51766 + return -EFAULT;
51767 + tmp[len-1] = '\0';
51768 + rtmp->rolename = tmp;
51769 +
51770 + if (!rlast) {
51771 + rtmp->prev = NULL;
51772 + rolep->transitions = rtmp;
51773 + } else {
51774 + rlast->next = rtmp;
51775 + rtmp->prev = rlast;
51776 + }
51777 +
51778 + if (!rusertp)
51779 + rtmp->next = NULL;
51780 + }
51781 +
51782 + return 0;
51783 +}
51784 +
51785 +static struct acl_subject_label *
51786 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51787 +{
51788 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51789 + unsigned int len;
51790 + char *tmp;
51791 + __u32 num_objs;
51792 + struct acl_ip_label **i_tmp, *i_utmp2;
51793 + struct gr_hash_struct ghash;
51794 + struct subject_map *subjmap;
51795 + unsigned int i_num;
51796 + int err;
51797 +
51798 + s_tmp = lookup_subject_map(userp);
51799 +
51800 + /* we've already copied this subject into the kernel, just return
51801 + the reference to it, and don't copy it over again
51802 + */
51803 + if (s_tmp)
51804 + return(s_tmp);
51805 +
51806 + if ((s_tmp = (struct acl_subject_label *)
51807 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51808 + return ERR_PTR(-ENOMEM);
51809 +
51810 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51811 + if (subjmap == NULL)
51812 + return ERR_PTR(-ENOMEM);
51813 +
51814 + subjmap->user = userp;
51815 + subjmap->kernel = s_tmp;
51816 + insert_subj_map_entry(subjmap);
51817 +
51818 + if (copy_from_user(s_tmp, userp,
51819 + sizeof (struct acl_subject_label)))
51820 + return ERR_PTR(-EFAULT);
51821 +
51822 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51823 +
51824 + if (!len || len >= PATH_MAX)
51825 + return ERR_PTR(-EINVAL);
51826 +
51827 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51828 + return ERR_PTR(-ENOMEM);
51829 +
51830 + if (copy_from_user(tmp, s_tmp->filename, len))
51831 + return ERR_PTR(-EFAULT);
51832 + tmp[len-1] = '\0';
51833 + s_tmp->filename = tmp;
51834 +
51835 + if (!strcmp(s_tmp->filename, "/"))
51836 + role->root_label = s_tmp;
51837 +
51838 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51839 + return ERR_PTR(-EFAULT);
51840 +
51841 + /* copy user and group transition tables */
51842 +
51843 + if (s_tmp->user_trans_num) {
51844 + uid_t *uidlist;
51845 +
51846 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51847 + if (uidlist == NULL)
51848 + return ERR_PTR(-ENOMEM);
51849 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51850 + return ERR_PTR(-EFAULT);
51851 +
51852 + s_tmp->user_transitions = uidlist;
51853 + }
51854 +
51855 + if (s_tmp->group_trans_num) {
51856 + gid_t *gidlist;
51857 +
51858 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51859 + if (gidlist == NULL)
51860 + return ERR_PTR(-ENOMEM);
51861 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51862 + return ERR_PTR(-EFAULT);
51863 +
51864 + s_tmp->group_transitions = gidlist;
51865 + }
51866 +
51867 + /* set up object hash table */
51868 + num_objs = count_user_objs(ghash.first);
51869 +
51870 + s_tmp->obj_hash_size = num_objs;
51871 + s_tmp->obj_hash =
51872 + (struct acl_object_label **)
51873 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51874 +
51875 + if (!s_tmp->obj_hash)
51876 + return ERR_PTR(-ENOMEM);
51877 +
51878 + memset(s_tmp->obj_hash, 0,
51879 + s_tmp->obj_hash_size *
51880 + sizeof (struct acl_object_label *));
51881 +
51882 + /* add in objects */
51883 + err = copy_user_objs(ghash.first, s_tmp, role);
51884 +
51885 + if (err)
51886 + return ERR_PTR(err);
51887 +
51888 + /* set pointer for parent subject */
51889 + if (s_tmp->parent_subject) {
51890 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51891 +
51892 + if (IS_ERR(s_tmp2))
51893 + return s_tmp2;
51894 +
51895 + s_tmp->parent_subject = s_tmp2;
51896 + }
51897 +
51898 + /* add in ip acls */
51899 +
51900 + if (!s_tmp->ip_num) {
51901 + s_tmp->ips = NULL;
51902 + goto insert;
51903 + }
51904 +
51905 + i_tmp =
51906 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51907 + sizeof (struct acl_ip_label *));
51908 +
51909 + if (!i_tmp)
51910 + return ERR_PTR(-ENOMEM);
51911 +
51912 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51913 + *(i_tmp + i_num) =
51914 + (struct acl_ip_label *)
51915 + acl_alloc(sizeof (struct acl_ip_label));
51916 + if (!*(i_tmp + i_num))
51917 + return ERR_PTR(-ENOMEM);
51918 +
51919 + if (copy_from_user
51920 + (&i_utmp2, s_tmp->ips + i_num,
51921 + sizeof (struct acl_ip_label *)))
51922 + return ERR_PTR(-EFAULT);
51923 +
51924 + if (copy_from_user
51925 + (*(i_tmp + i_num), i_utmp2,
51926 + sizeof (struct acl_ip_label)))
51927 + return ERR_PTR(-EFAULT);
51928 +
51929 + if ((*(i_tmp + i_num))->iface == NULL)
51930 + continue;
51931 +
51932 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51933 + if (!len || len >= IFNAMSIZ)
51934 + return ERR_PTR(-EINVAL);
51935 + tmp = acl_alloc(len);
51936 + if (tmp == NULL)
51937 + return ERR_PTR(-ENOMEM);
51938 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51939 + return ERR_PTR(-EFAULT);
51940 + (*(i_tmp + i_num))->iface = tmp;
51941 + }
51942 +
51943 + s_tmp->ips = i_tmp;
51944 +
51945 +insert:
51946 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51947 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51948 + return ERR_PTR(-ENOMEM);
51949 +
51950 + return s_tmp;
51951 +}
51952 +
51953 +static int
51954 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51955 +{
51956 + struct acl_subject_label s_pre;
51957 + struct acl_subject_label * ret;
51958 + int err;
51959 +
51960 + while (userp) {
51961 + if (copy_from_user(&s_pre, userp,
51962 + sizeof (struct acl_subject_label)))
51963 + return -EFAULT;
51964 +
51965 + /* do not add nested subjects here, add
51966 + while parsing objects
51967 + */
51968 +
51969 + if (s_pre.mode & GR_NESTED) {
51970 + userp = s_pre.prev;
51971 + continue;
51972 + }
51973 +
51974 + ret = do_copy_user_subj(userp, role);
51975 +
51976 + err = PTR_ERR(ret);
51977 + if (IS_ERR(ret))
51978 + return err;
51979 +
51980 + insert_acl_subj_label(ret, role);
51981 +
51982 + userp = s_pre.prev;
51983 + }
51984 +
51985 + return 0;
51986 +}
51987 +
51988 +static int
51989 +copy_user_acl(struct gr_arg *arg)
51990 +{
51991 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51992 + struct sprole_pw *sptmp;
51993 + struct gr_hash_struct *ghash;
51994 + uid_t *domainlist;
51995 + unsigned int r_num;
51996 + unsigned int len;
51997 + char *tmp;
51998 + int err = 0;
51999 + __u16 i;
52000 + __u32 num_subjs;
52001 +
52002 + /* we need a default and kernel role */
52003 + if (arg->role_db.num_roles < 2)
52004 + return -EINVAL;
52005 +
52006 + /* copy special role authentication info from userspace */
52007 +
52008 + num_sprole_pws = arg->num_sprole_pws;
52009 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52010 +
52011 + if (!acl_special_roles && num_sprole_pws)
52012 + return -ENOMEM;
52013 +
52014 + for (i = 0; i < num_sprole_pws; i++) {
52015 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52016 + if (!sptmp)
52017 + return -ENOMEM;
52018 + if (copy_from_user(sptmp, arg->sprole_pws + i,
52019 + sizeof (struct sprole_pw)))
52020 + return -EFAULT;
52021 +
52022 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52023 +
52024 + if (!len || len >= GR_SPROLE_LEN)
52025 + return -EINVAL;
52026 +
52027 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52028 + return -ENOMEM;
52029 +
52030 + if (copy_from_user(tmp, sptmp->rolename, len))
52031 + return -EFAULT;
52032 +
52033 + tmp[len-1] = '\0';
52034 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52035 + printk(KERN_ALERT "Copying special role %s\n", tmp);
52036 +#endif
52037 + sptmp->rolename = tmp;
52038 + acl_special_roles[i] = sptmp;
52039 + }
52040 +
52041 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52042 +
52043 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52044 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
52045 +
52046 + if (!r_tmp)
52047 + return -ENOMEM;
52048 +
52049 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
52050 + sizeof (struct acl_role_label *)))
52051 + return -EFAULT;
52052 +
52053 + if (copy_from_user(r_tmp, r_utmp2,
52054 + sizeof (struct acl_role_label)))
52055 + return -EFAULT;
52056 +
52057 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52058 +
52059 + if (!len || len >= PATH_MAX)
52060 + return -EINVAL;
52061 +
52062 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52063 + return -ENOMEM;
52064 +
52065 + if (copy_from_user(tmp, r_tmp->rolename, len))
52066 + return -EFAULT;
52067 +
52068 + tmp[len-1] = '\0';
52069 + r_tmp->rolename = tmp;
52070 +
52071 + if (!strcmp(r_tmp->rolename, "default")
52072 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52073 + default_role = r_tmp;
52074 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52075 + kernel_role = r_tmp;
52076 + }
52077 +
52078 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52079 + return -ENOMEM;
52080 +
52081 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
52082 + return -EFAULT;
52083 +
52084 + r_tmp->hash = ghash;
52085 +
52086 + num_subjs = count_user_subjs(r_tmp->hash->first);
52087 +
52088 + r_tmp->subj_hash_size = num_subjs;
52089 + r_tmp->subj_hash =
52090 + (struct acl_subject_label **)
52091 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52092 +
52093 + if (!r_tmp->subj_hash)
52094 + return -ENOMEM;
52095 +
52096 + err = copy_user_allowedips(r_tmp);
52097 + if (err)
52098 + return err;
52099 +
52100 + /* copy domain info */
52101 + if (r_tmp->domain_children != NULL) {
52102 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52103 + if (domainlist == NULL)
52104 + return -ENOMEM;
52105 +
52106 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52107 + return -EFAULT;
52108 +
52109 + r_tmp->domain_children = domainlist;
52110 + }
52111 +
52112 + err = copy_user_transitions(r_tmp);
52113 + if (err)
52114 + return err;
52115 +
52116 + memset(r_tmp->subj_hash, 0,
52117 + r_tmp->subj_hash_size *
52118 + sizeof (struct acl_subject_label *));
52119 +
52120 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52121 +
52122 + if (err)
52123 + return err;
52124 +
52125 + /* set nested subject list to null */
52126 + r_tmp->hash->first = NULL;
52127 +
52128 + insert_acl_role_label(r_tmp);
52129 + }
52130 +
52131 + if (default_role == NULL || kernel_role == NULL)
52132 + return -EINVAL;
52133 +
52134 + return err;
52135 +}
52136 +
52137 +static int
52138 +gracl_init(struct gr_arg *args)
52139 +{
52140 + int error = 0;
52141 +
52142 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52143 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52144 +
52145 + if (init_variables(args)) {
52146 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52147 + error = -ENOMEM;
52148 + free_variables();
52149 + goto out;
52150 + }
52151 +
52152 + error = copy_user_acl(args);
52153 + free_init_variables();
52154 + if (error) {
52155 + free_variables();
52156 + goto out;
52157 + }
52158 +
52159 + if ((error = gr_set_acls(0))) {
52160 + free_variables();
52161 + goto out;
52162 + }
52163 +
52164 + pax_open_kernel();
52165 + gr_status |= GR_READY;
52166 + pax_close_kernel();
52167 +
52168 + out:
52169 + return error;
52170 +}
52171 +
52172 +/* derived from glibc fnmatch() 0: match, 1: no match*/
52173 +
52174 +static int
52175 +glob_match(const char *p, const char *n)
52176 +{
52177 + char c;
52178 +
52179 + while ((c = *p++) != '\0') {
52180 + switch (c) {
52181 + case '?':
52182 + if (*n == '\0')
52183 + return 1;
52184 + else if (*n == '/')
52185 + return 1;
52186 + break;
52187 + case '\\':
52188 + if (*n != c)
52189 + return 1;
52190 + break;
52191 + case '*':
52192 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
52193 + if (*n == '/')
52194 + return 1;
52195 + else if (c == '?') {
52196 + if (*n == '\0')
52197 + return 1;
52198 + else
52199 + ++n;
52200 + }
52201 + }
52202 + if (c == '\0') {
52203 + return 0;
52204 + } else {
52205 + const char *endp;
52206 +
52207 + if ((endp = strchr(n, '/')) == NULL)
52208 + endp = n + strlen(n);
52209 +
52210 + if (c == '[') {
52211 + for (--p; n < endp; ++n)
52212 + if (!glob_match(p, n))
52213 + return 0;
52214 + } else if (c == '/') {
52215 + while (*n != '\0' && *n != '/')
52216 + ++n;
52217 + if (*n == '/' && !glob_match(p, n + 1))
52218 + return 0;
52219 + } else {
52220 + for (--p; n < endp; ++n)
52221 + if (*n == c && !glob_match(p, n))
52222 + return 0;
52223 + }
52224 +
52225 + return 1;
52226 + }
52227 + case '[':
52228 + {
52229 + int not;
52230 + char cold;
52231 +
52232 + if (*n == '\0' || *n == '/')
52233 + return 1;
52234 +
52235 + not = (*p == '!' || *p == '^');
52236 + if (not)
52237 + ++p;
52238 +
52239 + c = *p++;
52240 + for (;;) {
52241 + unsigned char fn = (unsigned char)*n;
52242 +
52243 + if (c == '\0')
52244 + return 1;
52245 + else {
52246 + if (c == fn)
52247 + goto matched;
52248 + cold = c;
52249 + c = *p++;
52250 +
52251 + if (c == '-' && *p != ']') {
52252 + unsigned char cend = *p++;
52253 +
52254 + if (cend == '\0')
52255 + return 1;
52256 +
52257 + if (cold <= fn && fn <= cend)
52258 + goto matched;
52259 +
52260 + c = *p++;
52261 + }
52262 + }
52263 +
52264 + if (c == ']')
52265 + break;
52266 + }
52267 + if (!not)
52268 + return 1;
52269 + break;
52270 + matched:
52271 + while (c != ']') {
52272 + if (c == '\0')
52273 + return 1;
52274 +
52275 + c = *p++;
52276 + }
52277 + if (not)
52278 + return 1;
52279 + }
52280 + break;
52281 + default:
52282 + if (c != *n)
52283 + return 1;
52284 + }
52285 +
52286 + ++n;
52287 + }
52288 +
52289 + if (*n == '\0')
52290 + return 0;
52291 +
52292 + if (*n == '/')
52293 + return 0;
52294 +
52295 + return 1;
52296 +}
52297 +
52298 +static struct acl_object_label *
52299 +chk_glob_label(struct acl_object_label *globbed,
52300 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
52301 +{
52302 + struct acl_object_label *tmp;
52303 +
52304 + if (*path == NULL)
52305 + *path = gr_to_filename_nolock(dentry, mnt);
52306 +
52307 + tmp = globbed;
52308 +
52309 + while (tmp) {
52310 + if (!glob_match(tmp->filename, *path))
52311 + return tmp;
52312 + tmp = tmp->next;
52313 + }
52314 +
52315 + return NULL;
52316 +}
52317 +
52318 +static struct acl_object_label *
52319 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52320 + const ino_t curr_ino, const dev_t curr_dev,
52321 + const struct acl_subject_label *subj, char **path, const int checkglob)
52322 +{
52323 + struct acl_subject_label *tmpsubj;
52324 + struct acl_object_label *retval;
52325 + struct acl_object_label *retval2;
52326 +
52327 + tmpsubj = (struct acl_subject_label *) subj;
52328 + read_lock(&gr_inode_lock);
52329 + do {
52330 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52331 + if (retval) {
52332 + if (checkglob && retval->globbed) {
52333 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
52334 + if (retval2)
52335 + retval = retval2;
52336 + }
52337 + break;
52338 + }
52339 + } while ((tmpsubj = tmpsubj->parent_subject));
52340 + read_unlock(&gr_inode_lock);
52341 +
52342 + return retval;
52343 +}
52344 +
52345 +static __inline__ struct acl_object_label *
52346 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52347 + struct dentry *curr_dentry,
52348 + const struct acl_subject_label *subj, char **path, const int checkglob)
52349 +{
52350 + int newglob = checkglob;
52351 + ino_t inode;
52352 + dev_t device;
52353 +
52354 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52355 + as we don't want a / * rule to match instead of the / object
52356 + don't do this for create lookups that call this function though, since they're looking up
52357 + on the parent and thus need globbing checks on all paths
52358 + */
52359 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52360 + newglob = GR_NO_GLOB;
52361 +
52362 + spin_lock(&curr_dentry->d_lock);
52363 + inode = curr_dentry->d_inode->i_ino;
52364 + device = __get_dev(curr_dentry);
52365 + spin_unlock(&curr_dentry->d_lock);
52366 +
52367 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52368 +}
52369 +
52370 +static struct acl_object_label *
52371 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52372 + const struct acl_subject_label *subj, char *path, const int checkglob)
52373 +{
52374 + struct dentry *dentry = (struct dentry *) l_dentry;
52375 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52376 + struct mount *real_mnt = real_mount(mnt);
52377 + struct acl_object_label *retval;
52378 + struct dentry *parent;
52379 +
52380 + write_seqlock(&rename_lock);
52381 + br_read_lock(vfsmount_lock);
52382 +
52383 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52384 +#ifdef CONFIG_NET
52385 + mnt == sock_mnt ||
52386 +#endif
52387 +#ifdef CONFIG_HUGETLBFS
52388 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52389 +#endif
52390 + /* ignore Eric Biederman */
52391 + IS_PRIVATE(l_dentry->d_inode))) {
52392 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52393 + goto out;
52394 + }
52395 +
52396 + for (;;) {
52397 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52398 + break;
52399 +
52400 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52401 + if (!mnt_has_parent(real_mnt))
52402 + break;
52403 +
52404 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52405 + if (retval != NULL)
52406 + goto out;
52407 +
52408 + dentry = real_mnt->mnt_mountpoint;
52409 + real_mnt = real_mnt->mnt_parent;
52410 + mnt = &real_mnt->mnt;
52411 + continue;
52412 + }
52413 +
52414 + parent = dentry->d_parent;
52415 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52416 + if (retval != NULL)
52417 + goto out;
52418 +
52419 + dentry = parent;
52420 + }
52421 +
52422 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52423 +
52424 + /* real_root is pinned so we don't have to hold a reference */
52425 + if (retval == NULL)
52426 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52427 +out:
52428 + br_read_unlock(vfsmount_lock);
52429 + write_sequnlock(&rename_lock);
52430 +
52431 + BUG_ON(retval == NULL);
52432 +
52433 + return retval;
52434 +}
52435 +
52436 +static __inline__ struct acl_object_label *
52437 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52438 + const struct acl_subject_label *subj)
52439 +{
52440 + char *path = NULL;
52441 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52442 +}
52443 +
52444 +static __inline__ struct acl_object_label *
52445 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52446 + const struct acl_subject_label *subj)
52447 +{
52448 + char *path = NULL;
52449 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52450 +}
52451 +
52452 +static __inline__ struct acl_object_label *
52453 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52454 + const struct acl_subject_label *subj, char *path)
52455 +{
52456 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52457 +}
52458 +
52459 +static struct acl_subject_label *
52460 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52461 + const struct acl_role_label *role)
52462 +{
52463 + struct dentry *dentry = (struct dentry *) l_dentry;
52464 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52465 + struct mount *real_mnt = real_mount(mnt);
52466 + struct acl_subject_label *retval;
52467 + struct dentry *parent;
52468 +
52469 + write_seqlock(&rename_lock);
52470 + br_read_lock(vfsmount_lock);
52471 +
52472 + for (;;) {
52473 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52474 + break;
52475 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52476 + if (!mnt_has_parent(real_mnt))
52477 + break;
52478 +
52479 + spin_lock(&dentry->d_lock);
52480 + read_lock(&gr_inode_lock);
52481 + retval =
52482 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52483 + __get_dev(dentry), role);
52484 + read_unlock(&gr_inode_lock);
52485 + spin_unlock(&dentry->d_lock);
52486 + if (retval != NULL)
52487 + goto out;
52488 +
52489 + dentry = real_mnt->mnt_mountpoint;
52490 + real_mnt = real_mnt->mnt_parent;
52491 + mnt = &real_mnt->mnt;
52492 + continue;
52493 + }
52494 +
52495 + spin_lock(&dentry->d_lock);
52496 + read_lock(&gr_inode_lock);
52497 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52498 + __get_dev(dentry), role);
52499 + read_unlock(&gr_inode_lock);
52500 + parent = dentry->d_parent;
52501 + spin_unlock(&dentry->d_lock);
52502 +
52503 + if (retval != NULL)
52504 + goto out;
52505 +
52506 + dentry = parent;
52507 + }
52508 +
52509 + spin_lock(&dentry->d_lock);
52510 + read_lock(&gr_inode_lock);
52511 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52512 + __get_dev(dentry), role);
52513 + read_unlock(&gr_inode_lock);
52514 + spin_unlock(&dentry->d_lock);
52515 +
52516 + if (unlikely(retval == NULL)) {
52517 + /* real_root is pinned, we don't need to hold a reference */
52518 + read_lock(&gr_inode_lock);
52519 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52520 + __get_dev(real_root.dentry), role);
52521 + read_unlock(&gr_inode_lock);
52522 + }
52523 +out:
52524 + br_read_unlock(vfsmount_lock);
52525 + write_sequnlock(&rename_lock);
52526 +
52527 + BUG_ON(retval == NULL);
52528 +
52529 + return retval;
52530 +}
52531 +
52532 +static void
52533 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52534 +{
52535 + struct task_struct *task = current;
52536 + const struct cred *cred = current_cred();
52537 +
52538 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52539 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52540 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52541 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52542 +
52543 + return;
52544 +}
52545 +
52546 +static void
52547 +gr_log_learn_sysctl(const char *path, const __u32 mode)
52548 +{
52549 + struct task_struct *task = current;
52550 + const struct cred *cred = current_cred();
52551 +
52552 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52553 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52554 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52555 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
52556 +
52557 + return;
52558 +}
52559 +
52560 +static void
52561 +gr_log_learn_id_change(const char type, const unsigned int real,
52562 + const unsigned int effective, const unsigned int fs)
52563 +{
52564 + struct task_struct *task = current;
52565 + const struct cred *cred = current_cred();
52566 +
52567 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52568 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52569 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52570 + type, real, effective, fs, &task->signal->saved_ip);
52571 +
52572 + return;
52573 +}
52574 +
52575 +__u32
52576 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52577 + const struct vfsmount * mnt)
52578 +{
52579 + __u32 retval = mode;
52580 + struct acl_subject_label *curracl;
52581 + struct acl_object_label *currobj;
52582 +
52583 + if (unlikely(!(gr_status & GR_READY)))
52584 + return (mode & ~GR_AUDITS);
52585 +
52586 + curracl = current->acl;
52587 +
52588 + currobj = chk_obj_label(dentry, mnt, curracl);
52589 + retval = currobj->mode & mode;
52590 +
52591 + /* if we're opening a specified transfer file for writing
52592 + (e.g. /dev/initctl), then transfer our role to init
52593 + */
52594 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52595 + current->role->roletype & GR_ROLE_PERSIST)) {
52596 + struct task_struct *task = init_pid_ns.child_reaper;
52597 +
52598 + if (task->role != current->role) {
52599 + task->acl_sp_role = 0;
52600 + task->acl_role_id = current->acl_role_id;
52601 + task->role = current->role;
52602 + rcu_read_lock();
52603 + read_lock(&grsec_exec_file_lock);
52604 + gr_apply_subject_to_task(task);
52605 + read_unlock(&grsec_exec_file_lock);
52606 + rcu_read_unlock();
52607 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52608 + }
52609 + }
52610 +
52611 + if (unlikely
52612 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52613 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52614 + __u32 new_mode = mode;
52615 +
52616 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52617 +
52618 + retval = new_mode;
52619 +
52620 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52621 + new_mode |= GR_INHERIT;
52622 +
52623 + if (!(mode & GR_NOLEARN))
52624 + gr_log_learn(dentry, mnt, new_mode);
52625 + }
52626 +
52627 + return retval;
52628 +}
52629 +
52630 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52631 + const struct dentry *parent,
52632 + const struct vfsmount *mnt)
52633 +{
52634 + struct name_entry *match;
52635 + struct acl_object_label *matchpo;
52636 + struct acl_subject_label *curracl;
52637 + char *path;
52638 +
52639 + if (unlikely(!(gr_status & GR_READY)))
52640 + return NULL;
52641 +
52642 + preempt_disable();
52643 + path = gr_to_filename_rbac(new_dentry, mnt);
52644 + match = lookup_name_entry_create(path);
52645 +
52646 + curracl = current->acl;
52647 +
52648 + if (match) {
52649 + read_lock(&gr_inode_lock);
52650 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52651 + read_unlock(&gr_inode_lock);
52652 +
52653 + if (matchpo) {
52654 + preempt_enable();
52655 + return matchpo;
52656 + }
52657 + }
52658 +
52659 + // lookup parent
52660 +
52661 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52662 +
52663 + preempt_enable();
52664 + return matchpo;
52665 +}
52666 +
52667 +__u32
52668 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52669 + const struct vfsmount * mnt, const __u32 mode)
52670 +{
52671 + struct acl_object_label *matchpo;
52672 + __u32 retval;
52673 +
52674 + if (unlikely(!(gr_status & GR_READY)))
52675 + return (mode & ~GR_AUDITS);
52676 +
52677 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52678 +
52679 + retval = matchpo->mode & mode;
52680 +
52681 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52682 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52683 + __u32 new_mode = mode;
52684 +
52685 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52686 +
52687 + gr_log_learn(new_dentry, mnt, new_mode);
52688 + return new_mode;
52689 + }
52690 +
52691 + return retval;
52692 +}
52693 +
52694 +__u32
52695 +gr_check_link(const struct dentry * new_dentry,
52696 + const struct dentry * parent_dentry,
52697 + const struct vfsmount * parent_mnt,
52698 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52699 +{
52700 + struct acl_object_label *obj;
52701 + __u32 oldmode, newmode;
52702 + __u32 needmode;
52703 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52704 + GR_DELETE | GR_INHERIT;
52705 +
52706 + if (unlikely(!(gr_status & GR_READY)))
52707 + return (GR_CREATE | GR_LINK);
52708 +
52709 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52710 + oldmode = obj->mode;
52711 +
52712 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52713 + newmode = obj->mode;
52714 +
52715 + needmode = newmode & checkmodes;
52716 +
52717 + // old name for hardlink must have at least the permissions of the new name
52718 + if ((oldmode & needmode) != needmode)
52719 + goto bad;
52720 +
52721 + // if old name had restrictions/auditing, make sure the new name does as well
52722 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52723 +
52724 + // don't allow hardlinking of suid/sgid files without permission
52725 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52726 + needmode |= GR_SETID;
52727 +
52728 + if ((newmode & needmode) != needmode)
52729 + goto bad;
52730 +
52731 + // enforce minimum permissions
52732 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52733 + return newmode;
52734 +bad:
52735 + needmode = oldmode;
52736 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52737 + needmode |= GR_SETID;
52738 +
52739 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52740 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52741 + return (GR_CREATE | GR_LINK);
52742 + } else if (newmode & GR_SUPPRESS)
52743 + return GR_SUPPRESS;
52744 + else
52745 + return 0;
52746 +}
52747 +
52748 +int
52749 +gr_check_hidden_task(const struct task_struct *task)
52750 +{
52751 + if (unlikely(!(gr_status & GR_READY)))
52752 + return 0;
52753 +
52754 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52755 + return 1;
52756 +
52757 + return 0;
52758 +}
52759 +
52760 +int
52761 +gr_check_protected_task(const struct task_struct *task)
52762 +{
52763 + if (unlikely(!(gr_status & GR_READY) || !task))
52764 + return 0;
52765 +
52766 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52767 + task->acl != current->acl)
52768 + return 1;
52769 +
52770 + return 0;
52771 +}
52772 +
52773 +int
52774 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52775 +{
52776 + struct task_struct *p;
52777 + int ret = 0;
52778 +
52779 + if (unlikely(!(gr_status & GR_READY) || !pid))
52780 + return ret;
52781 +
52782 + read_lock(&tasklist_lock);
52783 + do_each_pid_task(pid, type, p) {
52784 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52785 + p->acl != current->acl) {
52786 + ret = 1;
52787 + goto out;
52788 + }
52789 + } while_each_pid_task(pid, type, p);
52790 +out:
52791 + read_unlock(&tasklist_lock);
52792 +
52793 + return ret;
52794 +}
52795 +
52796 +void
52797 +gr_copy_label(struct task_struct *tsk)
52798 +{
52799 + /* plain copying of fields is already done by dup_task_struct */
52800 + tsk->signal->used_accept = 0;
52801 + tsk->acl_sp_role = 0;
52802 + //tsk->acl_role_id = current->acl_role_id;
52803 + //tsk->acl = current->acl;
52804 + //tsk->role = current->role;
52805 + tsk->signal->curr_ip = current->signal->curr_ip;
52806 + tsk->signal->saved_ip = current->signal->saved_ip;
52807 + if (current->exec_file)
52808 + get_file(current->exec_file);
52809 + //tsk->exec_file = current->exec_file;
52810 + //tsk->is_writable = current->is_writable;
52811 + if (unlikely(current->signal->used_accept)) {
52812 + current->signal->curr_ip = 0;
52813 + current->signal->saved_ip = 0;
52814 + }
52815 +
52816 + return;
52817 +}
52818 +
52819 +static void
52820 +gr_set_proc_res(struct task_struct *task)
52821 +{
52822 + struct acl_subject_label *proc;
52823 + unsigned short i;
52824 +
52825 + proc = task->acl;
52826 +
52827 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52828 + return;
52829 +
52830 + for (i = 0; i < RLIM_NLIMITS; i++) {
52831 + if (!(proc->resmask & (1 << i)))
52832 + continue;
52833 +
52834 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52835 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52836 + }
52837 +
52838 + return;
52839 +}
52840 +
52841 +extern int __gr_process_user_ban(struct user_struct *user);
52842 +
52843 +int
52844 +gr_check_user_change(int real, int effective, int fs)
52845 +{
52846 + unsigned int i;
52847 + __u16 num;
52848 + uid_t *uidlist;
52849 + int curuid;
52850 + int realok = 0;
52851 + int effectiveok = 0;
52852 + int fsok = 0;
52853 +
52854 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52855 + struct user_struct *user;
52856 +
52857 + if (real == -1)
52858 + goto skipit;
52859 +
52860 + user = find_user(real);
52861 + if (user == NULL)
52862 + goto skipit;
52863 +
52864 + if (__gr_process_user_ban(user)) {
52865 + /* for find_user */
52866 + free_uid(user);
52867 + return 1;
52868 + }
52869 +
52870 + /* for find_user */
52871 + free_uid(user);
52872 +
52873 +skipit:
52874 +#endif
52875 +
52876 + if (unlikely(!(gr_status & GR_READY)))
52877 + return 0;
52878 +
52879 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52880 + gr_log_learn_id_change('u', real, effective, fs);
52881 +
52882 + num = current->acl->user_trans_num;
52883 + uidlist = current->acl->user_transitions;
52884 +
52885 + if (uidlist == NULL)
52886 + return 0;
52887 +
52888 + if (real == -1)
52889 + realok = 1;
52890 + if (effective == -1)
52891 + effectiveok = 1;
52892 + if (fs == -1)
52893 + fsok = 1;
52894 +
52895 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
52896 + for (i = 0; i < num; i++) {
52897 + curuid = (int)uidlist[i];
52898 + if (real == curuid)
52899 + realok = 1;
52900 + if (effective == curuid)
52901 + effectiveok = 1;
52902 + if (fs == curuid)
52903 + fsok = 1;
52904 + }
52905 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
52906 + for (i = 0; i < num; i++) {
52907 + curuid = (int)uidlist[i];
52908 + if (real == curuid)
52909 + break;
52910 + if (effective == curuid)
52911 + break;
52912 + if (fs == curuid)
52913 + break;
52914 + }
52915 + /* not in deny list */
52916 + if (i == num) {
52917 + realok = 1;
52918 + effectiveok = 1;
52919 + fsok = 1;
52920 + }
52921 + }
52922 +
52923 + if (realok && effectiveok && fsok)
52924 + return 0;
52925 + else {
52926 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52927 + return 1;
52928 + }
52929 +}
52930 +
52931 +int
52932 +gr_check_group_change(int real, int effective, int fs)
52933 +{
52934 + unsigned int i;
52935 + __u16 num;
52936 + gid_t *gidlist;
52937 + int curgid;
52938 + int realok = 0;
52939 + int effectiveok = 0;
52940 + int fsok = 0;
52941 +
52942 + if (unlikely(!(gr_status & GR_READY)))
52943 + return 0;
52944 +
52945 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52946 + gr_log_learn_id_change('g', real, effective, fs);
52947 +
52948 + num = current->acl->group_trans_num;
52949 + gidlist = current->acl->group_transitions;
52950 +
52951 + if (gidlist == NULL)
52952 + return 0;
52953 +
52954 + if (real == -1)
52955 + realok = 1;
52956 + if (effective == -1)
52957 + effectiveok = 1;
52958 + if (fs == -1)
52959 + fsok = 1;
52960 +
52961 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52962 + for (i = 0; i < num; i++) {
52963 + curgid = (int)gidlist[i];
52964 + if (real == curgid)
52965 + realok = 1;
52966 + if (effective == curgid)
52967 + effectiveok = 1;
52968 + if (fs == curgid)
52969 + fsok = 1;
52970 + }
52971 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
52972 + for (i = 0; i < num; i++) {
52973 + curgid = (int)gidlist[i];
52974 + if (real == curgid)
52975 + break;
52976 + if (effective == curgid)
52977 + break;
52978 + if (fs == curgid)
52979 + break;
52980 + }
52981 + /* not in deny list */
52982 + if (i == num) {
52983 + realok = 1;
52984 + effectiveok = 1;
52985 + fsok = 1;
52986 + }
52987 + }
52988 +
52989 + if (realok && effectiveok && fsok)
52990 + return 0;
52991 + else {
52992 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52993 + return 1;
52994 + }
52995 +}
52996 +
52997 +extern int gr_acl_is_capable(const int cap);
52998 +
52999 +void
53000 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53001 +{
53002 + struct acl_role_label *role = task->role;
53003 + struct acl_subject_label *subj = NULL;
53004 + struct acl_object_label *obj;
53005 + struct file *filp;
53006 +
53007 + if (unlikely(!(gr_status & GR_READY)))
53008 + return;
53009 +
53010 + filp = task->exec_file;
53011 +
53012 + /* kernel process, we'll give them the kernel role */
53013 + if (unlikely(!filp)) {
53014 + task->role = kernel_role;
53015 + task->acl = kernel_role->root_label;
53016 + return;
53017 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53018 + role = lookup_acl_role_label(task, uid, gid);
53019 +
53020 + /* don't change the role if we're not a privileged process */
53021 + if (role && task->role != role &&
53022 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53023 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53024 + return;
53025 +
53026 + /* perform subject lookup in possibly new role
53027 + we can use this result below in the case where role == task->role
53028 + */
53029 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53030 +
53031 + /* if we changed uid/gid, but result in the same role
53032 + and are using inheritance, don't lose the inherited subject
53033 + if current subject is other than what normal lookup
53034 + would result in, we arrived via inheritance, don't
53035 + lose subject
53036 + */
53037 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53038 + (subj == task->acl)))
53039 + task->acl = subj;
53040 +
53041 + task->role = role;
53042 +
53043 + task->is_writable = 0;
53044 +
53045 + /* ignore additional mmap checks for processes that are writable
53046 + by the default ACL */
53047 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53048 + if (unlikely(obj->mode & GR_WRITE))
53049 + task->is_writable = 1;
53050 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53051 + if (unlikely(obj->mode & GR_WRITE))
53052 + task->is_writable = 1;
53053 +
53054 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53055 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53056 +#endif
53057 +
53058 + gr_set_proc_res(task);
53059 +
53060 + return;
53061 +}
53062 +
53063 +int
53064 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53065 + const int unsafe_flags)
53066 +{
53067 + struct task_struct *task = current;
53068 + struct acl_subject_label *newacl;
53069 + struct acl_object_label *obj;
53070 + __u32 retmode;
53071 +
53072 + if (unlikely(!(gr_status & GR_READY)))
53073 + return 0;
53074 +
53075 + newacl = chk_subj_label(dentry, mnt, task->role);
53076 +
53077 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53078 + did an exec
53079 + */
53080 + rcu_read_lock();
53081 + read_lock(&tasklist_lock);
53082 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
53083 + (task->parent->acl->mode & GR_POVERRIDE))) {
53084 + read_unlock(&tasklist_lock);
53085 + rcu_read_unlock();
53086 + goto skip_check;
53087 + }
53088 + read_unlock(&tasklist_lock);
53089 + rcu_read_unlock();
53090 +
53091 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53092 + !(task->role->roletype & GR_ROLE_GOD) &&
53093 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53094 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53095 + if (unsafe_flags & LSM_UNSAFE_SHARE)
53096 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53097 + else
53098 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53099 + return -EACCES;
53100 + }
53101 +
53102 +skip_check:
53103 +
53104 + obj = chk_obj_label(dentry, mnt, task->acl);
53105 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53106 +
53107 + if (!(task->acl->mode & GR_INHERITLEARN) &&
53108 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53109 + if (obj->nested)
53110 + task->acl = obj->nested;
53111 + else
53112 + task->acl = newacl;
53113 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53114 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53115 +
53116 + task->is_writable = 0;
53117 +
53118 + /* ignore additional mmap checks for processes that are writable
53119 + by the default ACL */
53120 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
53121 + if (unlikely(obj->mode & GR_WRITE))
53122 + task->is_writable = 1;
53123 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
53124 + if (unlikely(obj->mode & GR_WRITE))
53125 + task->is_writable = 1;
53126 +
53127 + gr_set_proc_res(task);
53128 +
53129 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53130 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53131 +#endif
53132 + return 0;
53133 +}
53134 +
53135 +/* always called with valid inodev ptr */
53136 +static void
53137 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53138 +{
53139 + struct acl_object_label *matchpo;
53140 + struct acl_subject_label *matchps;
53141 + struct acl_subject_label *subj;
53142 + struct acl_role_label *role;
53143 + unsigned int x;
53144 +
53145 + FOR_EACH_ROLE_START(role)
53146 + FOR_EACH_SUBJECT_START(role, subj, x)
53147 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53148 + matchpo->mode |= GR_DELETED;
53149 + FOR_EACH_SUBJECT_END(subj,x)
53150 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53151 + if (subj->inode == ino && subj->device == dev)
53152 + subj->mode |= GR_DELETED;
53153 + FOR_EACH_NESTED_SUBJECT_END(subj)
53154 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53155 + matchps->mode |= GR_DELETED;
53156 + FOR_EACH_ROLE_END(role)
53157 +
53158 + inodev->nentry->deleted = 1;
53159 +
53160 + return;
53161 +}
53162 +
53163 +void
53164 +gr_handle_delete(const ino_t ino, const dev_t dev)
53165 +{
53166 + struct inodev_entry *inodev;
53167 +
53168 + if (unlikely(!(gr_status & GR_READY)))
53169 + return;
53170 +
53171 + write_lock(&gr_inode_lock);
53172 + inodev = lookup_inodev_entry(ino, dev);
53173 + if (inodev != NULL)
53174 + do_handle_delete(inodev, ino, dev);
53175 + write_unlock(&gr_inode_lock);
53176 +
53177 + return;
53178 +}
53179 +
53180 +static void
53181 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53182 + const ino_t newinode, const dev_t newdevice,
53183 + struct acl_subject_label *subj)
53184 +{
53185 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53186 + struct acl_object_label *match;
53187 +
53188 + match = subj->obj_hash[index];
53189 +
53190 + while (match && (match->inode != oldinode ||
53191 + match->device != olddevice ||
53192 + !(match->mode & GR_DELETED)))
53193 + match = match->next;
53194 +
53195 + if (match && (match->inode == oldinode)
53196 + && (match->device == olddevice)
53197 + && (match->mode & GR_DELETED)) {
53198 + if (match->prev == NULL) {
53199 + subj->obj_hash[index] = match->next;
53200 + if (match->next != NULL)
53201 + match->next->prev = NULL;
53202 + } else {
53203 + match->prev->next = match->next;
53204 + if (match->next != NULL)
53205 + match->next->prev = match->prev;
53206 + }
53207 + match->prev = NULL;
53208 + match->next = NULL;
53209 + match->inode = newinode;
53210 + match->device = newdevice;
53211 + match->mode &= ~GR_DELETED;
53212 +
53213 + insert_acl_obj_label(match, subj);
53214 + }
53215 +
53216 + return;
53217 +}
53218 +
53219 +static void
53220 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53221 + const ino_t newinode, const dev_t newdevice,
53222 + struct acl_role_label *role)
53223 +{
53224 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53225 + struct acl_subject_label *match;
53226 +
53227 + match = role->subj_hash[index];
53228 +
53229 + while (match && (match->inode != oldinode ||
53230 + match->device != olddevice ||
53231 + !(match->mode & GR_DELETED)))
53232 + match = match->next;
53233 +
53234 + if (match && (match->inode == oldinode)
53235 + && (match->device == olddevice)
53236 + && (match->mode & GR_DELETED)) {
53237 + if (match->prev == NULL) {
53238 + role->subj_hash[index] = match->next;
53239 + if (match->next != NULL)
53240 + match->next->prev = NULL;
53241 + } else {
53242 + match->prev->next = match->next;
53243 + if (match->next != NULL)
53244 + match->next->prev = match->prev;
53245 + }
53246 + match->prev = NULL;
53247 + match->next = NULL;
53248 + match->inode = newinode;
53249 + match->device = newdevice;
53250 + match->mode &= ~GR_DELETED;
53251 +
53252 + insert_acl_subj_label(match, role);
53253 + }
53254 +
53255 + return;
53256 +}
53257 +
53258 +static void
53259 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53260 + const ino_t newinode, const dev_t newdevice)
53261 +{
53262 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53263 + struct inodev_entry *match;
53264 +
53265 + match = inodev_set.i_hash[index];
53266 +
53267 + while (match && (match->nentry->inode != oldinode ||
53268 + match->nentry->device != olddevice || !match->nentry->deleted))
53269 + match = match->next;
53270 +
53271 + if (match && (match->nentry->inode == oldinode)
53272 + && (match->nentry->device == olddevice) &&
53273 + match->nentry->deleted) {
53274 + if (match->prev == NULL) {
53275 + inodev_set.i_hash[index] = match->next;
53276 + if (match->next != NULL)
53277 + match->next->prev = NULL;
53278 + } else {
53279 + match->prev->next = match->next;
53280 + if (match->next != NULL)
53281 + match->next->prev = match->prev;
53282 + }
53283 + match->prev = NULL;
53284 + match->next = NULL;
53285 + match->nentry->inode = newinode;
53286 + match->nentry->device = newdevice;
53287 + match->nentry->deleted = 0;
53288 +
53289 + insert_inodev_entry(match);
53290 + }
53291 +
53292 + return;
53293 +}
53294 +
53295 +static void
53296 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53297 +{
53298 + struct acl_subject_label *subj;
53299 + struct acl_role_label *role;
53300 + unsigned int x;
53301 +
53302 + FOR_EACH_ROLE_START(role)
53303 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53304 +
53305 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53306 + if ((subj->inode == ino) && (subj->device == dev)) {
53307 + subj->inode = ino;
53308 + subj->device = dev;
53309 + }
53310 + FOR_EACH_NESTED_SUBJECT_END(subj)
53311 + FOR_EACH_SUBJECT_START(role, subj, x)
53312 + update_acl_obj_label(matchn->inode, matchn->device,
53313 + ino, dev, subj);
53314 + FOR_EACH_SUBJECT_END(subj,x)
53315 + FOR_EACH_ROLE_END(role)
53316 +
53317 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53318 +
53319 + return;
53320 +}
53321 +
53322 +static void
53323 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53324 + const struct vfsmount *mnt)
53325 +{
53326 + ino_t ino = dentry->d_inode->i_ino;
53327 + dev_t dev = __get_dev(dentry);
53328 +
53329 + __do_handle_create(matchn, ino, dev);
53330 +
53331 + return;
53332 +}
53333 +
53334 +void
53335 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53336 +{
53337 + struct name_entry *matchn;
53338 +
53339 + if (unlikely(!(gr_status & GR_READY)))
53340 + return;
53341 +
53342 + preempt_disable();
53343 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53344 +
53345 + if (unlikely((unsigned long)matchn)) {
53346 + write_lock(&gr_inode_lock);
53347 + do_handle_create(matchn, dentry, mnt);
53348 + write_unlock(&gr_inode_lock);
53349 + }
53350 + preempt_enable();
53351 +
53352 + return;
53353 +}
53354 +
53355 +void
53356 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53357 +{
53358 + struct name_entry *matchn;
53359 +
53360 + if (unlikely(!(gr_status & GR_READY)))
53361 + return;
53362 +
53363 + preempt_disable();
53364 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53365 +
53366 + if (unlikely((unsigned long)matchn)) {
53367 + write_lock(&gr_inode_lock);
53368 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53369 + write_unlock(&gr_inode_lock);
53370 + }
53371 + preempt_enable();
53372 +
53373 + return;
53374 +}
53375 +
53376 +void
53377 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53378 + struct dentry *old_dentry,
53379 + struct dentry *new_dentry,
53380 + struct vfsmount *mnt, const __u8 replace)
53381 +{
53382 + struct name_entry *matchn;
53383 + struct inodev_entry *inodev;
53384 + struct inode *inode = new_dentry->d_inode;
53385 + ino_t old_ino = old_dentry->d_inode->i_ino;
53386 + dev_t old_dev = __get_dev(old_dentry);
53387 +
53388 + /* vfs_rename swaps the name and parent link for old_dentry and
53389 + new_dentry
53390 + at this point, old_dentry has the new name, parent link, and inode
53391 + for the renamed file
53392 + if a file is being replaced by a rename, new_dentry has the inode
53393 + and name for the replaced file
53394 + */
53395 +
53396 + if (unlikely(!(gr_status & GR_READY)))
53397 + return;
53398 +
53399 + preempt_disable();
53400 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53401 +
53402 + /* we wouldn't have to check d_inode if it weren't for
53403 + NFS silly-renaming
53404 + */
53405 +
53406 + write_lock(&gr_inode_lock);
53407 + if (unlikely(replace && inode)) {
53408 + ino_t new_ino = inode->i_ino;
53409 + dev_t new_dev = __get_dev(new_dentry);
53410 +
53411 + inodev = lookup_inodev_entry(new_ino, new_dev);
53412 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53413 + do_handle_delete(inodev, new_ino, new_dev);
53414 + }
53415 +
53416 + inodev = lookup_inodev_entry(old_ino, old_dev);
53417 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53418 + do_handle_delete(inodev, old_ino, old_dev);
53419 +
53420 + if (unlikely((unsigned long)matchn))
53421 + do_handle_create(matchn, old_dentry, mnt);
53422 +
53423 + write_unlock(&gr_inode_lock);
53424 + preempt_enable();
53425 +
53426 + return;
53427 +}
53428 +
53429 +static int
53430 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53431 + unsigned char **sum)
53432 +{
53433 + struct acl_role_label *r;
53434 + struct role_allowed_ip *ipp;
53435 + struct role_transition *trans;
53436 + unsigned int i;
53437 + int found = 0;
53438 + u32 curr_ip = current->signal->curr_ip;
53439 +
53440 + current->signal->saved_ip = curr_ip;
53441 +
53442 + /* check transition table */
53443 +
53444 + for (trans = current->role->transitions; trans; trans = trans->next) {
53445 + if (!strcmp(rolename, trans->rolename)) {
53446 + found = 1;
53447 + break;
53448 + }
53449 + }
53450 +
53451 + if (!found)
53452 + return 0;
53453 +
53454 + /* handle special roles that do not require authentication
53455 + and check ip */
53456 +
53457 + FOR_EACH_ROLE_START(r)
53458 + if (!strcmp(rolename, r->rolename) &&
53459 + (r->roletype & GR_ROLE_SPECIAL)) {
53460 + found = 0;
53461 + if (r->allowed_ips != NULL) {
53462 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53463 + if ((ntohl(curr_ip) & ipp->netmask) ==
53464 + (ntohl(ipp->addr) & ipp->netmask))
53465 + found = 1;
53466 + }
53467 + } else
53468 + found = 2;
53469 + if (!found)
53470 + return 0;
53471 +
53472 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53473 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53474 + *salt = NULL;
53475 + *sum = NULL;
53476 + return 1;
53477 + }
53478 + }
53479 + FOR_EACH_ROLE_END(r)
53480 +
53481 + for (i = 0; i < num_sprole_pws; i++) {
53482 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53483 + *salt = acl_special_roles[i]->salt;
53484 + *sum = acl_special_roles[i]->sum;
53485 + return 1;
53486 + }
53487 + }
53488 +
53489 + return 0;
53490 +}
53491 +
53492 +static void
53493 +assign_special_role(char *rolename)
53494 +{
53495 + struct acl_object_label *obj;
53496 + struct acl_role_label *r;
53497 + struct acl_role_label *assigned = NULL;
53498 + struct task_struct *tsk;
53499 + struct file *filp;
53500 +
53501 + FOR_EACH_ROLE_START(r)
53502 + if (!strcmp(rolename, r->rolename) &&
53503 + (r->roletype & GR_ROLE_SPECIAL)) {
53504 + assigned = r;
53505 + break;
53506 + }
53507 + FOR_EACH_ROLE_END(r)
53508 +
53509 + if (!assigned)
53510 + return;
53511 +
53512 + read_lock(&tasklist_lock);
53513 + read_lock(&grsec_exec_file_lock);
53514 +
53515 + tsk = current->real_parent;
53516 + if (tsk == NULL)
53517 + goto out_unlock;
53518 +
53519 + filp = tsk->exec_file;
53520 + if (filp == NULL)
53521 + goto out_unlock;
53522 +
53523 + tsk->is_writable = 0;
53524 +
53525 + tsk->acl_sp_role = 1;
53526 + tsk->acl_role_id = ++acl_sp_role_value;
53527 + tsk->role = assigned;
53528 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53529 +
53530 + /* ignore additional mmap checks for processes that are writable
53531 + by the default ACL */
53532 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53533 + if (unlikely(obj->mode & GR_WRITE))
53534 + tsk->is_writable = 1;
53535 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53536 + if (unlikely(obj->mode & GR_WRITE))
53537 + tsk->is_writable = 1;
53538 +
53539 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53540 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53541 +#endif
53542 +
53543 +out_unlock:
53544 + read_unlock(&grsec_exec_file_lock);
53545 + read_unlock(&tasklist_lock);
53546 + return;
53547 +}
53548 +
53549 +int gr_check_secure_terminal(struct task_struct *task)
53550 +{
53551 + struct task_struct *p, *p2, *p3;
53552 + struct files_struct *files;
53553 + struct fdtable *fdt;
53554 + struct file *our_file = NULL, *file;
53555 + int i;
53556 +
53557 + if (task->signal->tty == NULL)
53558 + return 1;
53559 +
53560 + files = get_files_struct(task);
53561 + if (files != NULL) {
53562 + rcu_read_lock();
53563 + fdt = files_fdtable(files);
53564 + for (i=0; i < fdt->max_fds; i++) {
53565 + file = fcheck_files(files, i);
53566 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53567 + get_file(file);
53568 + our_file = file;
53569 + }
53570 + }
53571 + rcu_read_unlock();
53572 + put_files_struct(files);
53573 + }
53574 +
53575 + if (our_file == NULL)
53576 + return 1;
53577 +
53578 + read_lock(&tasklist_lock);
53579 + do_each_thread(p2, p) {
53580 + files = get_files_struct(p);
53581 + if (files == NULL ||
53582 + (p->signal && p->signal->tty == task->signal->tty)) {
53583 + if (files != NULL)
53584 + put_files_struct(files);
53585 + continue;
53586 + }
53587 + rcu_read_lock();
53588 + fdt = files_fdtable(files);
53589 + for (i=0; i < fdt->max_fds; i++) {
53590 + file = fcheck_files(files, i);
53591 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53592 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53593 + p3 = task;
53594 + while (p3->pid > 0) {
53595 + if (p3 == p)
53596 + break;
53597 + p3 = p3->real_parent;
53598 + }
53599 + if (p3 == p)
53600 + break;
53601 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53602 + gr_handle_alertkill(p);
53603 + rcu_read_unlock();
53604 + put_files_struct(files);
53605 + read_unlock(&tasklist_lock);
53606 + fput(our_file);
53607 + return 0;
53608 + }
53609 + }
53610 + rcu_read_unlock();
53611 + put_files_struct(files);
53612 + } while_each_thread(p2, p);
53613 + read_unlock(&tasklist_lock);
53614 +
53615 + fput(our_file);
53616 + return 1;
53617 +}
53618 +
53619 +ssize_t
53620 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53621 +{
53622 + struct gr_arg_wrapper uwrap;
53623 + unsigned char *sprole_salt = NULL;
53624 + unsigned char *sprole_sum = NULL;
53625 + int error = sizeof (struct gr_arg_wrapper);
53626 + int error2 = 0;
53627 +
53628 + mutex_lock(&gr_dev_mutex);
53629 +
53630 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53631 + error = -EPERM;
53632 + goto out;
53633 + }
53634 +
53635 + if (count != sizeof (struct gr_arg_wrapper)) {
53636 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53637 + error = -EINVAL;
53638 + goto out;
53639 + }
53640 +
53641 +
53642 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53643 + gr_auth_expires = 0;
53644 + gr_auth_attempts = 0;
53645 + }
53646 +
53647 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53648 + error = -EFAULT;
53649 + goto out;
53650 + }
53651 +
53652 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53653 + error = -EINVAL;
53654 + goto out;
53655 + }
53656 +
53657 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53658 + error = -EFAULT;
53659 + goto out;
53660 + }
53661 +
53662 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53663 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53664 + time_after(gr_auth_expires, get_seconds())) {
53665 + error = -EBUSY;
53666 + goto out;
53667 + }
53668 +
53669 + /* if non-root trying to do anything other than use a special role,
53670 + do not attempt authentication, do not count towards authentication
53671 + locking
53672 + */
53673 +
53674 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53675 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53676 + current_uid()) {
53677 + error = -EPERM;
53678 + goto out;
53679 + }
53680 +
53681 + /* ensure pw and special role name are null terminated */
53682 +
53683 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53684 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53685 +
53686 + /* Okay.
53687 + * We have our enough of the argument structure..(we have yet
53688 + * to copy_from_user the tables themselves) . Copy the tables
53689 + * only if we need them, i.e. for loading operations. */
53690 +
53691 + switch (gr_usermode->mode) {
53692 + case GR_STATUS:
53693 + if (gr_status & GR_READY) {
53694 + error = 1;
53695 + if (!gr_check_secure_terminal(current))
53696 + error = 3;
53697 + } else
53698 + error = 2;
53699 + goto out;
53700 + case GR_SHUTDOWN:
53701 + if ((gr_status & GR_READY)
53702 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53703 + pax_open_kernel();
53704 + gr_status &= ~GR_READY;
53705 + pax_close_kernel();
53706 +
53707 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53708 + free_variables();
53709 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53710 + memset(gr_system_salt, 0, GR_SALT_LEN);
53711 + memset(gr_system_sum, 0, GR_SHA_LEN);
53712 + } else if (gr_status & GR_READY) {
53713 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53714 + error = -EPERM;
53715 + } else {
53716 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53717 + error = -EAGAIN;
53718 + }
53719 + break;
53720 + case GR_ENABLE:
53721 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53722 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53723 + else {
53724 + if (gr_status & GR_READY)
53725 + error = -EAGAIN;
53726 + else
53727 + error = error2;
53728 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53729 + }
53730 + break;
53731 + case GR_RELOAD:
53732 + if (!(gr_status & GR_READY)) {
53733 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53734 + error = -EAGAIN;
53735 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53736 + preempt_disable();
53737 +
53738 + pax_open_kernel();
53739 + gr_status &= ~GR_READY;
53740 + pax_close_kernel();
53741 +
53742 + free_variables();
53743 + if (!(error2 = gracl_init(gr_usermode))) {
53744 + preempt_enable();
53745 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53746 + } else {
53747 + preempt_enable();
53748 + error = error2;
53749 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53750 + }
53751 + } else {
53752 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53753 + error = -EPERM;
53754 + }
53755 + break;
53756 + case GR_SEGVMOD:
53757 + if (unlikely(!(gr_status & GR_READY))) {
53758 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53759 + error = -EAGAIN;
53760 + break;
53761 + }
53762 +
53763 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53764 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53765 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53766 + struct acl_subject_label *segvacl;
53767 + segvacl =
53768 + lookup_acl_subj_label(gr_usermode->segv_inode,
53769 + gr_usermode->segv_device,
53770 + current->role);
53771 + if (segvacl) {
53772 + segvacl->crashes = 0;
53773 + segvacl->expires = 0;
53774 + }
53775 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53776 + gr_remove_uid(gr_usermode->segv_uid);
53777 + }
53778 + } else {
53779 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53780 + error = -EPERM;
53781 + }
53782 + break;
53783 + case GR_SPROLE:
53784 + case GR_SPROLEPAM:
53785 + if (unlikely(!(gr_status & GR_READY))) {
53786 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53787 + error = -EAGAIN;
53788 + break;
53789 + }
53790 +
53791 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53792 + current->role->expires = 0;
53793 + current->role->auth_attempts = 0;
53794 + }
53795 +
53796 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53797 + time_after(current->role->expires, get_seconds())) {
53798 + error = -EBUSY;
53799 + goto out;
53800 + }
53801 +
53802 + if (lookup_special_role_auth
53803 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53804 + && ((!sprole_salt && !sprole_sum)
53805 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53806 + char *p = "";
53807 + assign_special_role(gr_usermode->sp_role);
53808 + read_lock(&tasklist_lock);
53809 + if (current->real_parent)
53810 + p = current->real_parent->role->rolename;
53811 + read_unlock(&tasklist_lock);
53812 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53813 + p, acl_sp_role_value);
53814 + } else {
53815 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53816 + error = -EPERM;
53817 + if(!(current->role->auth_attempts++))
53818 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53819 +
53820 + goto out;
53821 + }
53822 + break;
53823 + case GR_UNSPROLE:
53824 + if (unlikely(!(gr_status & GR_READY))) {
53825 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53826 + error = -EAGAIN;
53827 + break;
53828 + }
53829 +
53830 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53831 + char *p = "";
53832 + int i = 0;
53833 +
53834 + read_lock(&tasklist_lock);
53835 + if (current->real_parent) {
53836 + p = current->real_parent->role->rolename;
53837 + i = current->real_parent->acl_role_id;
53838 + }
53839 + read_unlock(&tasklist_lock);
53840 +
53841 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53842 + gr_set_acls(1);
53843 + } else {
53844 + error = -EPERM;
53845 + goto out;
53846 + }
53847 + break;
53848 + default:
53849 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53850 + error = -EINVAL;
53851 + break;
53852 + }
53853 +
53854 + if (error != -EPERM)
53855 + goto out;
53856 +
53857 + if(!(gr_auth_attempts++))
53858 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53859 +
53860 + out:
53861 + mutex_unlock(&gr_dev_mutex);
53862 + return error;
53863 +}
53864 +
53865 +/* must be called with
53866 + rcu_read_lock();
53867 + read_lock(&tasklist_lock);
53868 + read_lock(&grsec_exec_file_lock);
53869 +*/
53870 +int gr_apply_subject_to_task(struct task_struct *task)
53871 +{
53872 + struct acl_object_label *obj;
53873 + char *tmpname;
53874 + struct acl_subject_label *tmpsubj;
53875 + struct file *filp;
53876 + struct name_entry *nmatch;
53877 +
53878 + filp = task->exec_file;
53879 + if (filp == NULL)
53880 + return 0;
53881 +
53882 + /* the following is to apply the correct subject
53883 + on binaries running when the RBAC system
53884 + is enabled, when the binaries have been
53885 + replaced or deleted since their execution
53886 + -----
53887 + when the RBAC system starts, the inode/dev
53888 + from exec_file will be one the RBAC system
53889 + is unaware of. It only knows the inode/dev
53890 + of the present file on disk, or the absence
53891 + of it.
53892 + */
53893 + preempt_disable();
53894 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53895 +
53896 + nmatch = lookup_name_entry(tmpname);
53897 + preempt_enable();
53898 + tmpsubj = NULL;
53899 + if (nmatch) {
53900 + if (nmatch->deleted)
53901 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53902 + else
53903 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53904 + if (tmpsubj != NULL)
53905 + task->acl = tmpsubj;
53906 + }
53907 + if (tmpsubj == NULL)
53908 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53909 + task->role);
53910 + if (task->acl) {
53911 + task->is_writable = 0;
53912 + /* ignore additional mmap checks for processes that are writable
53913 + by the default ACL */
53914 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53915 + if (unlikely(obj->mode & GR_WRITE))
53916 + task->is_writable = 1;
53917 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53918 + if (unlikely(obj->mode & GR_WRITE))
53919 + task->is_writable = 1;
53920 +
53921 + gr_set_proc_res(task);
53922 +
53923 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53924 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53925 +#endif
53926 + } else {
53927 + return 1;
53928 + }
53929 +
53930 + return 0;
53931 +}
53932 +
53933 +int
53934 +gr_set_acls(const int type)
53935 +{
53936 + struct task_struct *task, *task2;
53937 + struct acl_role_label *role = current->role;
53938 + __u16 acl_role_id = current->acl_role_id;
53939 + const struct cred *cred;
53940 + int ret;
53941 +
53942 + rcu_read_lock();
53943 + read_lock(&tasklist_lock);
53944 + read_lock(&grsec_exec_file_lock);
53945 + do_each_thread(task2, task) {
53946 + /* check to see if we're called from the exit handler,
53947 + if so, only replace ACLs that have inherited the admin
53948 + ACL */
53949 +
53950 + if (type && (task->role != role ||
53951 + task->acl_role_id != acl_role_id))
53952 + continue;
53953 +
53954 + task->acl_role_id = 0;
53955 + task->acl_sp_role = 0;
53956 +
53957 + if (task->exec_file) {
53958 + cred = __task_cred(task);
53959 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53960 + ret = gr_apply_subject_to_task(task);
53961 + if (ret) {
53962 + read_unlock(&grsec_exec_file_lock);
53963 + read_unlock(&tasklist_lock);
53964 + rcu_read_unlock();
53965 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53966 + return ret;
53967 + }
53968 + } else {
53969 + // it's a kernel process
53970 + task->role = kernel_role;
53971 + task->acl = kernel_role->root_label;
53972 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53973 + task->acl->mode &= ~GR_PROCFIND;
53974 +#endif
53975 + }
53976 + } while_each_thread(task2, task);
53977 + read_unlock(&grsec_exec_file_lock);
53978 + read_unlock(&tasklist_lock);
53979 + rcu_read_unlock();
53980 +
53981 + return 0;
53982 +}
53983 +
53984 +void
53985 +gr_learn_resource(const struct task_struct *task,
53986 + const int res, const unsigned long wanted, const int gt)
53987 +{
53988 + struct acl_subject_label *acl;
53989 + const struct cred *cred;
53990 +
53991 + if (unlikely((gr_status & GR_READY) &&
53992 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53993 + goto skip_reslog;
53994 +
53995 +#ifdef CONFIG_GRKERNSEC_RESLOG
53996 + gr_log_resource(task, res, wanted, gt);
53997 +#endif
53998 + skip_reslog:
53999 +
54000 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54001 + return;
54002 +
54003 + acl = task->acl;
54004 +
54005 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54006 + !(acl->resmask & (1 << (unsigned short) res))))
54007 + return;
54008 +
54009 + if (wanted >= acl->res[res].rlim_cur) {
54010 + unsigned long res_add;
54011 +
54012 + res_add = wanted;
54013 + switch (res) {
54014 + case RLIMIT_CPU:
54015 + res_add += GR_RLIM_CPU_BUMP;
54016 + break;
54017 + case RLIMIT_FSIZE:
54018 + res_add += GR_RLIM_FSIZE_BUMP;
54019 + break;
54020 + case RLIMIT_DATA:
54021 + res_add += GR_RLIM_DATA_BUMP;
54022 + break;
54023 + case RLIMIT_STACK:
54024 + res_add += GR_RLIM_STACK_BUMP;
54025 + break;
54026 + case RLIMIT_CORE:
54027 + res_add += GR_RLIM_CORE_BUMP;
54028 + break;
54029 + case RLIMIT_RSS:
54030 + res_add += GR_RLIM_RSS_BUMP;
54031 + break;
54032 + case RLIMIT_NPROC:
54033 + res_add += GR_RLIM_NPROC_BUMP;
54034 + break;
54035 + case RLIMIT_NOFILE:
54036 + res_add += GR_RLIM_NOFILE_BUMP;
54037 + break;
54038 + case RLIMIT_MEMLOCK:
54039 + res_add += GR_RLIM_MEMLOCK_BUMP;
54040 + break;
54041 + case RLIMIT_AS:
54042 + res_add += GR_RLIM_AS_BUMP;
54043 + break;
54044 + case RLIMIT_LOCKS:
54045 + res_add += GR_RLIM_LOCKS_BUMP;
54046 + break;
54047 + case RLIMIT_SIGPENDING:
54048 + res_add += GR_RLIM_SIGPENDING_BUMP;
54049 + break;
54050 + case RLIMIT_MSGQUEUE:
54051 + res_add += GR_RLIM_MSGQUEUE_BUMP;
54052 + break;
54053 + case RLIMIT_NICE:
54054 + res_add += GR_RLIM_NICE_BUMP;
54055 + break;
54056 + case RLIMIT_RTPRIO:
54057 + res_add += GR_RLIM_RTPRIO_BUMP;
54058 + break;
54059 + case RLIMIT_RTTIME:
54060 + res_add += GR_RLIM_RTTIME_BUMP;
54061 + break;
54062 + }
54063 +
54064 + acl->res[res].rlim_cur = res_add;
54065 +
54066 + if (wanted > acl->res[res].rlim_max)
54067 + acl->res[res].rlim_max = res_add;
54068 +
54069 + /* only log the subject filename, since resource logging is supported for
54070 + single-subject learning only */
54071 + rcu_read_lock();
54072 + cred = __task_cred(task);
54073 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54074 + task->role->roletype, cred->uid, cred->gid, acl->filename,
54075 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54076 + "", (unsigned long) res, &task->signal->saved_ip);
54077 + rcu_read_unlock();
54078 + }
54079 +
54080 + return;
54081 +}
54082 +
54083 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54084 +void
54085 +pax_set_initial_flags(struct linux_binprm *bprm)
54086 +{
54087 + struct task_struct *task = current;
54088 + struct acl_subject_label *proc;
54089 + unsigned long flags;
54090 +
54091 + if (unlikely(!(gr_status & GR_READY)))
54092 + return;
54093 +
54094 + flags = pax_get_flags(task);
54095 +
54096 + proc = task->acl;
54097 +
54098 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54099 + flags &= ~MF_PAX_PAGEEXEC;
54100 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54101 + flags &= ~MF_PAX_SEGMEXEC;
54102 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54103 + flags &= ~MF_PAX_RANDMMAP;
54104 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54105 + flags &= ~MF_PAX_EMUTRAMP;
54106 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54107 + flags &= ~MF_PAX_MPROTECT;
54108 +
54109 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54110 + flags |= MF_PAX_PAGEEXEC;
54111 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54112 + flags |= MF_PAX_SEGMEXEC;
54113 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54114 + flags |= MF_PAX_RANDMMAP;
54115 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54116 + flags |= MF_PAX_EMUTRAMP;
54117 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54118 + flags |= MF_PAX_MPROTECT;
54119 +
54120 + pax_set_flags(task, flags);
54121 +
54122 + return;
54123 +}
54124 +#endif
54125 +
54126 +#ifdef CONFIG_SYSCTL
54127 +/* Eric Biederman likes breaking userland ABI and every inode-based security
54128 + system to save 35kb of memory */
54129 +
54130 +/* we modify the passed in filename, but adjust it back before returning */
54131 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
54132 +{
54133 + struct name_entry *nmatch;
54134 + char *p, *lastp = NULL;
54135 + struct acl_object_label *obj = NULL, *tmp;
54136 + struct acl_subject_label *tmpsubj;
54137 + char c = '\0';
54138 +
54139 + read_lock(&gr_inode_lock);
54140 +
54141 + p = name + len - 1;
54142 + do {
54143 + nmatch = lookup_name_entry(name);
54144 + if (lastp != NULL)
54145 + *lastp = c;
54146 +
54147 + if (nmatch == NULL)
54148 + goto next_component;
54149 + tmpsubj = current->acl;
54150 + do {
54151 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
54152 + if (obj != NULL) {
54153 + tmp = obj->globbed;
54154 + while (tmp) {
54155 + if (!glob_match(tmp->filename, name)) {
54156 + obj = tmp;
54157 + goto found_obj;
54158 + }
54159 + tmp = tmp->next;
54160 + }
54161 + goto found_obj;
54162 + }
54163 + } while ((tmpsubj = tmpsubj->parent_subject));
54164 +next_component:
54165 + /* end case */
54166 + if (p == name)
54167 + break;
54168 +
54169 + while (*p != '/')
54170 + p--;
54171 + if (p == name)
54172 + lastp = p + 1;
54173 + else {
54174 + lastp = p;
54175 + p--;
54176 + }
54177 + c = *lastp;
54178 + *lastp = '\0';
54179 + } while (1);
54180 +found_obj:
54181 + read_unlock(&gr_inode_lock);
54182 + /* obj returned will always be non-null */
54183 + return obj;
54184 +}
54185 +
54186 +/* returns 0 when allowing, non-zero on error
54187 + op of 0 is used for readdir, so we don't log the names of hidden files
54188 +*/
54189 +__u32
54190 +gr_handle_sysctl(const struct ctl_table *table, const int op)
54191 +{
54192 + struct ctl_table *tmp;
54193 + const char *proc_sys = "/proc/sys";
54194 + char *path;
54195 + struct acl_object_label *obj;
54196 + unsigned short len = 0, pos = 0, depth = 0, i;
54197 + __u32 err = 0;
54198 + __u32 mode = 0;
54199 +
54200 + if (unlikely(!(gr_status & GR_READY)))
54201 + return 0;
54202 +
54203 + /* for now, ignore operations on non-sysctl entries if it's not a
54204 + readdir*/
54205 + if (table->child != NULL && op != 0)
54206 + return 0;
54207 +
54208 + mode |= GR_FIND;
54209 + /* it's only a read if it's an entry, read on dirs is for readdir */
54210 + if (op & MAY_READ)
54211 + mode |= GR_READ;
54212 + if (op & MAY_WRITE)
54213 + mode |= GR_WRITE;
54214 +
54215 + preempt_disable();
54216 +
54217 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54218 +
54219 + /* it's only a read/write if it's an actual entry, not a dir
54220 + (which are opened for readdir)
54221 + */
54222 +
54223 + /* convert the requested sysctl entry into a pathname */
54224 +
54225 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54226 + len += strlen(tmp->procname);
54227 + len++;
54228 + depth++;
54229 + }
54230 +
54231 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
54232 + /* deny */
54233 + goto out;
54234 + }
54235 +
54236 + memset(path, 0, PAGE_SIZE);
54237 +
54238 + memcpy(path, proc_sys, strlen(proc_sys));
54239 +
54240 + pos += strlen(proc_sys);
54241 +
54242 + for (; depth > 0; depth--) {
54243 + path[pos] = '/';
54244 + pos++;
54245 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54246 + if (depth == i) {
54247 + memcpy(path + pos, tmp->procname,
54248 + strlen(tmp->procname));
54249 + pos += strlen(tmp->procname);
54250 + }
54251 + i++;
54252 + }
54253 + }
54254 +
54255 + obj = gr_lookup_by_name(path, pos);
54256 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
54257 +
54258 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
54259 + ((err & mode) != mode))) {
54260 + __u32 new_mode = mode;
54261 +
54262 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
54263 +
54264 + err = 0;
54265 + gr_log_learn_sysctl(path, new_mode);
54266 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
54267 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
54268 + err = -ENOENT;
54269 + } else if (!(err & GR_FIND)) {
54270 + err = -ENOENT;
54271 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
54272 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
54273 + path, (mode & GR_READ) ? " reading" : "",
54274 + (mode & GR_WRITE) ? " writing" : "");
54275 + err = -EACCES;
54276 + } else if ((err & mode) != mode) {
54277 + err = -EACCES;
54278 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
54279 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
54280 + path, (mode & GR_READ) ? " reading" : "",
54281 + (mode & GR_WRITE) ? " writing" : "");
54282 + err = 0;
54283 + } else
54284 + err = 0;
54285 +
54286 + out:
54287 + preempt_enable();
54288 +
54289 + return err;
54290 +}
54291 +#endif
54292 +
54293 +int
54294 +gr_handle_proc_ptrace(struct task_struct *task)
54295 +{
54296 + struct file *filp;
54297 + struct task_struct *tmp = task;
54298 + struct task_struct *curtemp = current;
54299 + __u32 retmode;
54300 +
54301 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54302 + if (unlikely(!(gr_status & GR_READY)))
54303 + return 0;
54304 +#endif
54305 +
54306 + read_lock(&tasklist_lock);
54307 + read_lock(&grsec_exec_file_lock);
54308 + filp = task->exec_file;
54309 +
54310 + while (tmp->pid > 0) {
54311 + if (tmp == curtemp)
54312 + break;
54313 + tmp = tmp->real_parent;
54314 + }
54315 +
54316 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54317 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54318 + read_unlock(&grsec_exec_file_lock);
54319 + read_unlock(&tasklist_lock);
54320 + return 1;
54321 + }
54322 +
54323 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54324 + if (!(gr_status & GR_READY)) {
54325 + read_unlock(&grsec_exec_file_lock);
54326 + read_unlock(&tasklist_lock);
54327 + return 0;
54328 + }
54329 +#endif
54330 +
54331 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54332 + read_unlock(&grsec_exec_file_lock);
54333 + read_unlock(&tasklist_lock);
54334 +
54335 + if (retmode & GR_NOPTRACE)
54336 + return 1;
54337 +
54338 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54339 + && (current->acl != task->acl || (current->acl != current->role->root_label
54340 + && current->pid != task->pid)))
54341 + return 1;
54342 +
54343 + return 0;
54344 +}
54345 +
54346 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54347 +{
54348 + if (unlikely(!(gr_status & GR_READY)))
54349 + return;
54350 +
54351 + if (!(current->role->roletype & GR_ROLE_GOD))
54352 + return;
54353 +
54354 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54355 + p->role->rolename, gr_task_roletype_to_char(p),
54356 + p->acl->filename);
54357 +}
54358 +
54359 +int
54360 +gr_handle_ptrace(struct task_struct *task, const long request)
54361 +{
54362 + struct task_struct *tmp = task;
54363 + struct task_struct *curtemp = current;
54364 + __u32 retmode;
54365 +
54366 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54367 + if (unlikely(!(gr_status & GR_READY)))
54368 + return 0;
54369 +#endif
54370 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
54371 + read_lock(&tasklist_lock);
54372 + while (tmp->pid > 0) {
54373 + if (tmp == curtemp)
54374 + break;
54375 + tmp = tmp->real_parent;
54376 + }
54377 +
54378 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54379 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54380 + read_unlock(&tasklist_lock);
54381 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54382 + return 1;
54383 + }
54384 + read_unlock(&tasklist_lock);
54385 + }
54386 +
54387 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54388 + if (!(gr_status & GR_READY))
54389 + return 0;
54390 +#endif
54391 +
54392 + read_lock(&grsec_exec_file_lock);
54393 + if (unlikely(!task->exec_file)) {
54394 + read_unlock(&grsec_exec_file_lock);
54395 + return 0;
54396 + }
54397 +
54398 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54399 + read_unlock(&grsec_exec_file_lock);
54400 +
54401 + if (retmode & GR_NOPTRACE) {
54402 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54403 + return 1;
54404 + }
54405 +
54406 + if (retmode & GR_PTRACERD) {
54407 + switch (request) {
54408 + case PTRACE_SEIZE:
54409 + case PTRACE_POKETEXT:
54410 + case PTRACE_POKEDATA:
54411 + case PTRACE_POKEUSR:
54412 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54413 + case PTRACE_SETREGS:
54414 + case PTRACE_SETFPREGS:
54415 +#endif
54416 +#ifdef CONFIG_X86
54417 + case PTRACE_SETFPXREGS:
54418 +#endif
54419 +#ifdef CONFIG_ALTIVEC
54420 + case PTRACE_SETVRREGS:
54421 +#endif
54422 + return 1;
54423 + default:
54424 + return 0;
54425 + }
54426 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
54427 + !(current->role->roletype & GR_ROLE_GOD) &&
54428 + (current->acl != task->acl)) {
54429 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54430 + return 1;
54431 + }
54432 +
54433 + return 0;
54434 +}
54435 +
54436 +static int is_writable_mmap(const struct file *filp)
54437 +{
54438 + struct task_struct *task = current;
54439 + struct acl_object_label *obj, *obj2;
54440 +
54441 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54442 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54443 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54444 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54445 + task->role->root_label);
54446 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54447 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54448 + return 1;
54449 + }
54450 + }
54451 + return 0;
54452 +}
54453 +
54454 +int
54455 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54456 +{
54457 + __u32 mode;
54458 +
54459 + if (unlikely(!file || !(prot & PROT_EXEC)))
54460 + return 1;
54461 +
54462 + if (is_writable_mmap(file))
54463 + return 0;
54464 +
54465 + mode =
54466 + gr_search_file(file->f_path.dentry,
54467 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54468 + file->f_path.mnt);
54469 +
54470 + if (!gr_tpe_allow(file))
54471 + return 0;
54472 +
54473 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54474 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54475 + return 0;
54476 + } else if (unlikely(!(mode & GR_EXEC))) {
54477 + return 0;
54478 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54479 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54480 + return 1;
54481 + }
54482 +
54483 + return 1;
54484 +}
54485 +
54486 +int
54487 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54488 +{
54489 + __u32 mode;
54490 +
54491 + if (unlikely(!file || !(prot & PROT_EXEC)))
54492 + return 1;
54493 +
54494 + if (is_writable_mmap(file))
54495 + return 0;
54496 +
54497 + mode =
54498 + gr_search_file(file->f_path.dentry,
54499 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54500 + file->f_path.mnt);
54501 +
54502 + if (!gr_tpe_allow(file))
54503 + return 0;
54504 +
54505 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54506 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54507 + return 0;
54508 + } else if (unlikely(!(mode & GR_EXEC))) {
54509 + return 0;
54510 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54511 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54512 + return 1;
54513 + }
54514 +
54515 + return 1;
54516 +}
54517 +
54518 +void
54519 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54520 +{
54521 + unsigned long runtime;
54522 + unsigned long cputime;
54523 + unsigned int wday, cday;
54524 + __u8 whr, chr;
54525 + __u8 wmin, cmin;
54526 + __u8 wsec, csec;
54527 + struct timespec timeval;
54528 +
54529 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54530 + !(task->acl->mode & GR_PROCACCT)))
54531 + return;
54532 +
54533 + do_posix_clock_monotonic_gettime(&timeval);
54534 + runtime = timeval.tv_sec - task->start_time.tv_sec;
54535 + wday = runtime / (3600 * 24);
54536 + runtime -= wday * (3600 * 24);
54537 + whr = runtime / 3600;
54538 + runtime -= whr * 3600;
54539 + wmin = runtime / 60;
54540 + runtime -= wmin * 60;
54541 + wsec = runtime;
54542 +
54543 + cputime = (task->utime + task->stime) / HZ;
54544 + cday = cputime / (3600 * 24);
54545 + cputime -= cday * (3600 * 24);
54546 + chr = cputime / 3600;
54547 + cputime -= chr * 3600;
54548 + cmin = cputime / 60;
54549 + cputime -= cmin * 60;
54550 + csec = cputime;
54551 +
54552 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54553 +
54554 + return;
54555 +}
54556 +
54557 +void gr_set_kernel_label(struct task_struct *task)
54558 +{
54559 + if (gr_status & GR_READY) {
54560 + task->role = kernel_role;
54561 + task->acl = kernel_role->root_label;
54562 + }
54563 + return;
54564 +}
54565 +
54566 +#ifdef CONFIG_TASKSTATS
54567 +int gr_is_taskstats_denied(int pid)
54568 +{
54569 + struct task_struct *task;
54570 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54571 + const struct cred *cred;
54572 +#endif
54573 + int ret = 0;
54574 +
54575 + /* restrict taskstats viewing to un-chrooted root users
54576 + who have the 'view' subject flag if the RBAC system is enabled
54577 + */
54578 +
54579 + rcu_read_lock();
54580 + read_lock(&tasklist_lock);
54581 + task = find_task_by_vpid(pid);
54582 + if (task) {
54583 +#ifdef CONFIG_GRKERNSEC_CHROOT
54584 + if (proc_is_chrooted(task))
54585 + ret = -EACCES;
54586 +#endif
54587 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54588 + cred = __task_cred(task);
54589 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54590 + if (cred->uid != 0)
54591 + ret = -EACCES;
54592 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54593 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54594 + ret = -EACCES;
54595 +#endif
54596 +#endif
54597 + if (gr_status & GR_READY) {
54598 + if (!(task->acl->mode & GR_VIEW))
54599 + ret = -EACCES;
54600 + }
54601 + } else
54602 + ret = -ENOENT;
54603 +
54604 + read_unlock(&tasklist_lock);
54605 + rcu_read_unlock();
54606 +
54607 + return ret;
54608 +}
54609 +#endif
54610 +
54611 +/* AUXV entries are filled via a descendant of search_binary_handler
54612 + after we've already applied the subject for the target
54613 +*/
54614 +int gr_acl_enable_at_secure(void)
54615 +{
54616 + if (unlikely(!(gr_status & GR_READY)))
54617 + return 0;
54618 +
54619 + if (current->acl->mode & GR_ATSECURE)
54620 + return 1;
54621 +
54622 + return 0;
54623 +}
54624 +
54625 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54626 +{
54627 + struct task_struct *task = current;
54628 + struct dentry *dentry = file->f_path.dentry;
54629 + struct vfsmount *mnt = file->f_path.mnt;
54630 + struct acl_object_label *obj, *tmp;
54631 + struct acl_subject_label *subj;
54632 + unsigned int bufsize;
54633 + int is_not_root;
54634 + char *path;
54635 + dev_t dev = __get_dev(dentry);
54636 +
54637 + if (unlikely(!(gr_status & GR_READY)))
54638 + return 1;
54639 +
54640 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54641 + return 1;
54642 +
54643 + /* ignore Eric Biederman */
54644 + if (IS_PRIVATE(dentry->d_inode))
54645 + return 1;
54646 +
54647 + subj = task->acl;
54648 + do {
54649 + obj = lookup_acl_obj_label(ino, dev, subj);
54650 + if (obj != NULL)
54651 + return (obj->mode & GR_FIND) ? 1 : 0;
54652 + } while ((subj = subj->parent_subject));
54653 +
54654 + /* this is purely an optimization since we're looking for an object
54655 + for the directory we're doing a readdir on
54656 + if it's possible for any globbed object to match the entry we're
54657 + filling into the directory, then the object we find here will be
54658 + an anchor point with attached globbed objects
54659 + */
54660 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54661 + if (obj->globbed == NULL)
54662 + return (obj->mode & GR_FIND) ? 1 : 0;
54663 +
54664 + is_not_root = ((obj->filename[0] == '/') &&
54665 + (obj->filename[1] == '\0')) ? 0 : 1;
54666 + bufsize = PAGE_SIZE - namelen - is_not_root;
54667 +
54668 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54669 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54670 + return 1;
54671 +
54672 + preempt_disable();
54673 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54674 + bufsize);
54675 +
54676 + bufsize = strlen(path);
54677 +
54678 + /* if base is "/", don't append an additional slash */
54679 + if (is_not_root)
54680 + *(path + bufsize) = '/';
54681 + memcpy(path + bufsize + is_not_root, name, namelen);
54682 + *(path + bufsize + namelen + is_not_root) = '\0';
54683 +
54684 + tmp = obj->globbed;
54685 + while (tmp) {
54686 + if (!glob_match(tmp->filename, path)) {
54687 + preempt_enable();
54688 + return (tmp->mode & GR_FIND) ? 1 : 0;
54689 + }
54690 + tmp = tmp->next;
54691 + }
54692 + preempt_enable();
54693 + return (obj->mode & GR_FIND) ? 1 : 0;
54694 +}
54695 +
54696 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54697 +EXPORT_SYMBOL(gr_acl_is_enabled);
54698 +#endif
54699 +EXPORT_SYMBOL(gr_learn_resource);
54700 +EXPORT_SYMBOL(gr_set_kernel_label);
54701 +#ifdef CONFIG_SECURITY
54702 +EXPORT_SYMBOL(gr_check_user_change);
54703 +EXPORT_SYMBOL(gr_check_group_change);
54704 +#endif
54705 +
54706 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54707 new file mode 100644
54708 index 0000000..34fefda
54709 --- /dev/null
54710 +++ b/grsecurity/gracl_alloc.c
54711 @@ -0,0 +1,105 @@
54712 +#include <linux/kernel.h>
54713 +#include <linux/mm.h>
54714 +#include <linux/slab.h>
54715 +#include <linux/vmalloc.h>
54716 +#include <linux/gracl.h>
54717 +#include <linux/grsecurity.h>
54718 +
54719 +static unsigned long alloc_stack_next = 1;
54720 +static unsigned long alloc_stack_size = 1;
54721 +static void **alloc_stack;
54722 +
54723 +static __inline__ int
54724 +alloc_pop(void)
54725 +{
54726 + if (alloc_stack_next == 1)
54727 + return 0;
54728 +
54729 + kfree(alloc_stack[alloc_stack_next - 2]);
54730 +
54731 + alloc_stack_next--;
54732 +
54733 + return 1;
54734 +}
54735 +
54736 +static __inline__ int
54737 +alloc_push(void *buf)
54738 +{
54739 + if (alloc_stack_next >= alloc_stack_size)
54740 + return 1;
54741 +
54742 + alloc_stack[alloc_stack_next - 1] = buf;
54743 +
54744 + alloc_stack_next++;
54745 +
54746 + return 0;
54747 +}
54748 +
54749 +void *
54750 +acl_alloc(unsigned long len)
54751 +{
54752 + void *ret = NULL;
54753 +
54754 + if (!len || len > PAGE_SIZE)
54755 + goto out;
54756 +
54757 + ret = kmalloc(len, GFP_KERNEL);
54758 +
54759 + if (ret) {
54760 + if (alloc_push(ret)) {
54761 + kfree(ret);
54762 + ret = NULL;
54763 + }
54764 + }
54765 +
54766 +out:
54767 + return ret;
54768 +}
54769 +
54770 +void *
54771 +acl_alloc_num(unsigned long num, unsigned long len)
54772 +{
54773 + if (!len || (num > (PAGE_SIZE / len)))
54774 + return NULL;
54775 +
54776 + return acl_alloc(num * len);
54777 +}
54778 +
54779 +void
54780 +acl_free_all(void)
54781 +{
54782 + if (gr_acl_is_enabled() || !alloc_stack)
54783 + return;
54784 +
54785 + while (alloc_pop()) ;
54786 +
54787 + if (alloc_stack) {
54788 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54789 + kfree(alloc_stack);
54790 + else
54791 + vfree(alloc_stack);
54792 + }
54793 +
54794 + alloc_stack = NULL;
54795 + alloc_stack_size = 1;
54796 + alloc_stack_next = 1;
54797 +
54798 + return;
54799 +}
54800 +
54801 +int
54802 +acl_alloc_stack_init(unsigned long size)
54803 +{
54804 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54805 + alloc_stack =
54806 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54807 + else
54808 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54809 +
54810 + alloc_stack_size = size;
54811 +
54812 + if (!alloc_stack)
54813 + return 0;
54814 + else
54815 + return 1;
54816 +}
54817 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54818 new file mode 100644
54819 index 0000000..6d21049
54820 --- /dev/null
54821 +++ b/grsecurity/gracl_cap.c
54822 @@ -0,0 +1,110 @@
54823 +#include <linux/kernel.h>
54824 +#include <linux/module.h>
54825 +#include <linux/sched.h>
54826 +#include <linux/gracl.h>
54827 +#include <linux/grsecurity.h>
54828 +#include <linux/grinternal.h>
54829 +
54830 +extern const char *captab_log[];
54831 +extern int captab_log_entries;
54832 +
54833 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54834 +{
54835 + struct acl_subject_label *curracl;
54836 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54837 + kernel_cap_t cap_audit = __cap_empty_set;
54838 +
54839 + if (!gr_acl_is_enabled())
54840 + return 1;
54841 +
54842 + curracl = task->acl;
54843 +
54844 + cap_drop = curracl->cap_lower;
54845 + cap_mask = curracl->cap_mask;
54846 + cap_audit = curracl->cap_invert_audit;
54847 +
54848 + while ((curracl = curracl->parent_subject)) {
54849 + /* if the cap isn't specified in the current computed mask but is specified in the
54850 + current level subject, and is lowered in the current level subject, then add
54851 + it to the set of dropped capabilities
54852 + otherwise, add the current level subject's mask to the current computed mask
54853 + */
54854 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54855 + cap_raise(cap_mask, cap);
54856 + if (cap_raised(curracl->cap_lower, cap))
54857 + cap_raise(cap_drop, cap);
54858 + if (cap_raised(curracl->cap_invert_audit, cap))
54859 + cap_raise(cap_audit, cap);
54860 + }
54861 + }
54862 +
54863 + if (!cap_raised(cap_drop, cap)) {
54864 + if (cap_raised(cap_audit, cap))
54865 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54866 + return 1;
54867 + }
54868 +
54869 + curracl = task->acl;
54870 +
54871 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54872 + && cap_raised(cred->cap_effective, cap)) {
54873 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54874 + task->role->roletype, cred->uid,
54875 + cred->gid, task->exec_file ?
54876 + gr_to_filename(task->exec_file->f_path.dentry,
54877 + task->exec_file->f_path.mnt) : curracl->filename,
54878 + curracl->filename, 0UL,
54879 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54880 + return 1;
54881 + }
54882 +
54883 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54884 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54885 +
54886 + return 0;
54887 +}
54888 +
54889 +int
54890 +gr_acl_is_capable(const int cap)
54891 +{
54892 + return gr_task_acl_is_capable(current, current_cred(), cap);
54893 +}
54894 +
54895 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54896 +{
54897 + struct acl_subject_label *curracl;
54898 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54899 +
54900 + if (!gr_acl_is_enabled())
54901 + return 1;
54902 +
54903 + curracl = task->acl;
54904 +
54905 + cap_drop = curracl->cap_lower;
54906 + cap_mask = curracl->cap_mask;
54907 +
54908 + while ((curracl = curracl->parent_subject)) {
54909 + /* if the cap isn't specified in the current computed mask but is specified in the
54910 + current level subject, and is lowered in the current level subject, then add
54911 + it to the set of dropped capabilities
54912 + otherwise, add the current level subject's mask to the current computed mask
54913 + */
54914 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54915 + cap_raise(cap_mask, cap);
54916 + if (cap_raised(curracl->cap_lower, cap))
54917 + cap_raise(cap_drop, cap);
54918 + }
54919 + }
54920 +
54921 + if (!cap_raised(cap_drop, cap))
54922 + return 1;
54923 +
54924 + return 0;
54925 +}
54926 +
54927 +int
54928 +gr_acl_is_capable_nolog(const int cap)
54929 +{
54930 + return gr_task_acl_is_capable_nolog(current, cap);
54931 +}
54932 +
54933 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54934 new file mode 100644
54935 index 0000000..88d0e87
54936 --- /dev/null
54937 +++ b/grsecurity/gracl_fs.c
54938 @@ -0,0 +1,435 @@
54939 +#include <linux/kernel.h>
54940 +#include <linux/sched.h>
54941 +#include <linux/types.h>
54942 +#include <linux/fs.h>
54943 +#include <linux/file.h>
54944 +#include <linux/stat.h>
54945 +#include <linux/grsecurity.h>
54946 +#include <linux/grinternal.h>
54947 +#include <linux/gracl.h>
54948 +
54949 +umode_t
54950 +gr_acl_umask(void)
54951 +{
54952 + if (unlikely(!gr_acl_is_enabled()))
54953 + return 0;
54954 +
54955 + return current->role->umask;
54956 +}
54957 +
54958 +__u32
54959 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54960 + const struct vfsmount * mnt)
54961 +{
54962 + __u32 mode;
54963 +
54964 + if (unlikely(!dentry->d_inode))
54965 + return GR_FIND;
54966 +
54967 + mode =
54968 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54969 +
54970 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54971 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54972 + return mode;
54973 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54974 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54975 + return 0;
54976 + } else if (unlikely(!(mode & GR_FIND)))
54977 + return 0;
54978 +
54979 + return GR_FIND;
54980 +}
54981 +
54982 +__u32
54983 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54984 + int acc_mode)
54985 +{
54986 + __u32 reqmode = GR_FIND;
54987 + __u32 mode;
54988 +
54989 + if (unlikely(!dentry->d_inode))
54990 + return reqmode;
54991 +
54992 + if (acc_mode & MAY_APPEND)
54993 + reqmode |= GR_APPEND;
54994 + else if (acc_mode & MAY_WRITE)
54995 + reqmode |= GR_WRITE;
54996 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54997 + reqmode |= GR_READ;
54998 +
54999 + mode =
55000 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55001 + mnt);
55002 +
55003 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55004 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55005 + reqmode & GR_READ ? " reading" : "",
55006 + reqmode & GR_WRITE ? " writing" : reqmode &
55007 + GR_APPEND ? " appending" : "");
55008 + return reqmode;
55009 + } else
55010 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55011 + {
55012 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55013 + reqmode & GR_READ ? " reading" : "",
55014 + reqmode & GR_WRITE ? " writing" : reqmode &
55015 + GR_APPEND ? " appending" : "");
55016 + return 0;
55017 + } else if (unlikely((mode & reqmode) != reqmode))
55018 + return 0;
55019 +
55020 + return reqmode;
55021 +}
55022 +
55023 +__u32
55024 +gr_acl_handle_creat(const struct dentry * dentry,
55025 + const struct dentry * p_dentry,
55026 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55027 + const int imode)
55028 +{
55029 + __u32 reqmode = GR_WRITE | GR_CREATE;
55030 + __u32 mode;
55031 +
55032 + if (acc_mode & MAY_APPEND)
55033 + reqmode |= GR_APPEND;
55034 + // if a directory was required or the directory already exists, then
55035 + // don't count this open as a read
55036 + if ((acc_mode & MAY_READ) &&
55037 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55038 + reqmode |= GR_READ;
55039 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
55040 + reqmode |= GR_SETID;
55041 +
55042 + mode =
55043 + gr_check_create(dentry, p_dentry, p_mnt,
55044 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55045 +
55046 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55047 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55048 + reqmode & GR_READ ? " reading" : "",
55049 + reqmode & GR_WRITE ? " writing" : reqmode &
55050 + GR_APPEND ? " appending" : "");
55051 + return reqmode;
55052 + } else
55053 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55054 + {
55055 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55056 + reqmode & GR_READ ? " reading" : "",
55057 + reqmode & GR_WRITE ? " writing" : reqmode &
55058 + GR_APPEND ? " appending" : "");
55059 + return 0;
55060 + } else if (unlikely((mode & reqmode) != reqmode))
55061 + return 0;
55062 +
55063 + return reqmode;
55064 +}
55065 +
55066 +__u32
55067 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55068 + const int fmode)
55069 +{
55070 + __u32 mode, reqmode = GR_FIND;
55071 +
55072 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55073 + reqmode |= GR_EXEC;
55074 + if (fmode & S_IWOTH)
55075 + reqmode |= GR_WRITE;
55076 + if (fmode & S_IROTH)
55077 + reqmode |= GR_READ;
55078 +
55079 + mode =
55080 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55081 + mnt);
55082 +
55083 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55084 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55085 + reqmode & GR_READ ? " reading" : "",
55086 + reqmode & GR_WRITE ? " writing" : "",
55087 + reqmode & GR_EXEC ? " executing" : "");
55088 + return reqmode;
55089 + } else
55090 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55091 + {
55092 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55093 + reqmode & GR_READ ? " reading" : "",
55094 + reqmode & GR_WRITE ? " writing" : "",
55095 + reqmode & GR_EXEC ? " executing" : "");
55096 + return 0;
55097 + } else if (unlikely((mode & reqmode) != reqmode))
55098 + return 0;
55099 +
55100 + return reqmode;
55101 +}
55102 +
55103 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55104 +{
55105 + __u32 mode;
55106 +
55107 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55108 +
55109 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55110 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55111 + return mode;
55112 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55113 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55114 + return 0;
55115 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55116 + return 0;
55117 +
55118 + return (reqmode);
55119 +}
55120 +
55121 +__u32
55122 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55123 +{
55124 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55125 +}
55126 +
55127 +__u32
55128 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55129 +{
55130 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55131 +}
55132 +
55133 +__u32
55134 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55135 +{
55136 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55137 +}
55138 +
55139 +__u32
55140 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55141 +{
55142 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55143 +}
55144 +
55145 +__u32
55146 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55147 + umode_t *modeptr)
55148 +{
55149 + umode_t mode;
55150 +
55151 + *modeptr &= ~gr_acl_umask();
55152 + mode = *modeptr;
55153 +
55154 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55155 + return 1;
55156 +
55157 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
55158 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55159 + GR_CHMOD_ACL_MSG);
55160 + } else {
55161 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55162 + }
55163 +}
55164 +
55165 +__u32
55166 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55167 +{
55168 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55169 +}
55170 +
55171 +__u32
55172 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55173 +{
55174 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55175 +}
55176 +
55177 +__u32
55178 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55179 +{
55180 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55181 +}
55182 +
55183 +__u32
55184 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55185 +{
55186 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55187 + GR_UNIXCONNECT_ACL_MSG);
55188 +}
55189 +
55190 +/* hardlinks require at minimum create and link permission,
55191 + any additional privilege required is based on the
55192 + privilege of the file being linked to
55193 +*/
55194 +__u32
55195 +gr_acl_handle_link(const struct dentry * new_dentry,
55196 + const struct dentry * parent_dentry,
55197 + const struct vfsmount * parent_mnt,
55198 + const struct dentry * old_dentry,
55199 + const struct vfsmount * old_mnt, const char *to)
55200 +{
55201 + __u32 mode;
55202 + __u32 needmode = GR_CREATE | GR_LINK;
55203 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55204 +
55205 + mode =
55206 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55207 + old_mnt);
55208 +
55209 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55210 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55211 + return mode;
55212 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55213 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55214 + return 0;
55215 + } else if (unlikely((mode & needmode) != needmode))
55216 + return 0;
55217 +
55218 + return 1;
55219 +}
55220 +
55221 +__u32
55222 +gr_acl_handle_symlink(const struct dentry * new_dentry,
55223 + const struct dentry * parent_dentry,
55224 + const struct vfsmount * parent_mnt, const char *from)
55225 +{
55226 + __u32 needmode = GR_WRITE | GR_CREATE;
55227 + __u32 mode;
55228 +
55229 + mode =
55230 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
55231 + GR_CREATE | GR_AUDIT_CREATE |
55232 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55233 +
55234 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55235 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55236 + return mode;
55237 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55238 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55239 + return 0;
55240 + } else if (unlikely((mode & needmode) != needmode))
55241 + return 0;
55242 +
55243 + return (GR_WRITE | GR_CREATE);
55244 +}
55245 +
55246 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55247 +{
55248 + __u32 mode;
55249 +
55250 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55251 +
55252 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55253 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55254 + return mode;
55255 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55256 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55257 + return 0;
55258 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55259 + return 0;
55260 +
55261 + return (reqmode);
55262 +}
55263 +
55264 +__u32
55265 +gr_acl_handle_mknod(const struct dentry * new_dentry,
55266 + const struct dentry * parent_dentry,
55267 + const struct vfsmount * parent_mnt,
55268 + const int mode)
55269 +{
55270 + __u32 reqmode = GR_WRITE | GR_CREATE;
55271 + if (unlikely(mode & (S_ISUID | S_ISGID)))
55272 + reqmode |= GR_SETID;
55273 +
55274 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55275 + reqmode, GR_MKNOD_ACL_MSG);
55276 +}
55277 +
55278 +__u32
55279 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
55280 + const struct dentry *parent_dentry,
55281 + const struct vfsmount *parent_mnt)
55282 +{
55283 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55284 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55285 +}
55286 +
55287 +#define RENAME_CHECK_SUCCESS(old, new) \
55288 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55289 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55290 +
55291 +int
55292 +gr_acl_handle_rename(struct dentry *new_dentry,
55293 + struct dentry *parent_dentry,
55294 + const struct vfsmount *parent_mnt,
55295 + struct dentry *old_dentry,
55296 + struct inode *old_parent_inode,
55297 + struct vfsmount *old_mnt, const char *newname)
55298 +{
55299 + __u32 comp1, comp2;
55300 + int error = 0;
55301 +
55302 + if (unlikely(!gr_acl_is_enabled()))
55303 + return 0;
55304 +
55305 + if (!new_dentry->d_inode) {
55306 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55307 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55308 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55309 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55310 + GR_DELETE | GR_AUDIT_DELETE |
55311 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55312 + GR_SUPPRESS, old_mnt);
55313 + } else {
55314 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55315 + GR_CREATE | GR_DELETE |
55316 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55317 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55318 + GR_SUPPRESS, parent_mnt);
55319 + comp2 =
55320 + gr_search_file(old_dentry,
55321 + GR_READ | GR_WRITE | GR_AUDIT_READ |
55322 + GR_DELETE | GR_AUDIT_DELETE |
55323 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55324 + }
55325 +
55326 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55327 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55328 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55329 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55330 + && !(comp2 & GR_SUPPRESS)) {
55331 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55332 + error = -EACCES;
55333 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55334 + error = -EACCES;
55335 +
55336 + return error;
55337 +}
55338 +
55339 +void
55340 +gr_acl_handle_exit(void)
55341 +{
55342 + u16 id;
55343 + char *rolename;
55344 + struct file *exec_file;
55345 +
55346 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55347 + !(current->role->roletype & GR_ROLE_PERSIST))) {
55348 + id = current->acl_role_id;
55349 + rolename = current->role->rolename;
55350 + gr_set_acls(1);
55351 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55352 + }
55353 +
55354 + write_lock(&grsec_exec_file_lock);
55355 + exec_file = current->exec_file;
55356 + current->exec_file = NULL;
55357 + write_unlock(&grsec_exec_file_lock);
55358 +
55359 + if (exec_file)
55360 + fput(exec_file);
55361 +}
55362 +
55363 +int
55364 +gr_acl_handle_procpidmem(const struct task_struct *task)
55365 +{
55366 + if (unlikely(!gr_acl_is_enabled()))
55367 + return 0;
55368 +
55369 + if (task != current && task->acl->mode & GR_PROTPROCFD)
55370 + return -EACCES;
55371 +
55372 + return 0;
55373 +}
55374 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55375 new file mode 100644
55376 index 0000000..58800a7
55377 --- /dev/null
55378 +++ b/grsecurity/gracl_ip.c
55379 @@ -0,0 +1,384 @@
55380 +#include <linux/kernel.h>
55381 +#include <asm/uaccess.h>
55382 +#include <asm/errno.h>
55383 +#include <net/sock.h>
55384 +#include <linux/file.h>
55385 +#include <linux/fs.h>
55386 +#include <linux/net.h>
55387 +#include <linux/in.h>
55388 +#include <linux/skbuff.h>
55389 +#include <linux/ip.h>
55390 +#include <linux/udp.h>
55391 +#include <linux/types.h>
55392 +#include <linux/sched.h>
55393 +#include <linux/netdevice.h>
55394 +#include <linux/inetdevice.h>
55395 +#include <linux/gracl.h>
55396 +#include <linux/grsecurity.h>
55397 +#include <linux/grinternal.h>
55398 +
55399 +#define GR_BIND 0x01
55400 +#define GR_CONNECT 0x02
55401 +#define GR_INVERT 0x04
55402 +#define GR_BINDOVERRIDE 0x08
55403 +#define GR_CONNECTOVERRIDE 0x10
55404 +#define GR_SOCK_FAMILY 0x20
55405 +
55406 +static const char * gr_protocols[IPPROTO_MAX] = {
55407 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55408 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55409 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55410 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55411 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55412 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55413 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55414 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55415 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55416 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55417 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55418 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55419 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55420 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55421 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55422 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55423 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55424 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55425 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55426 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55427 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55428 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55429 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55430 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55431 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55432 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55433 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55434 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55435 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55436 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55437 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55438 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55439 + };
55440 +
55441 +static const char * gr_socktypes[SOCK_MAX] = {
55442 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55443 + "unknown:7", "unknown:8", "unknown:9", "packet"
55444 + };
55445 +
55446 +static const char * gr_sockfamilies[AF_MAX+1] = {
55447 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55448 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55449 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55450 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55451 + };
55452 +
55453 +const char *
55454 +gr_proto_to_name(unsigned char proto)
55455 +{
55456 + return gr_protocols[proto];
55457 +}
55458 +
55459 +const char *
55460 +gr_socktype_to_name(unsigned char type)
55461 +{
55462 + return gr_socktypes[type];
55463 +}
55464 +
55465 +const char *
55466 +gr_sockfamily_to_name(unsigned char family)
55467 +{
55468 + return gr_sockfamilies[family];
55469 +}
55470 +
55471 +int
55472 +gr_search_socket(const int domain, const int type, const int protocol)
55473 +{
55474 + struct acl_subject_label *curr;
55475 + const struct cred *cred = current_cred();
55476 +
55477 + if (unlikely(!gr_acl_is_enabled()))
55478 + goto exit;
55479 +
55480 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
55481 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55482 + goto exit; // let the kernel handle it
55483 +
55484 + curr = current->acl;
55485 +
55486 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55487 + /* the family is allowed, if this is PF_INET allow it only if
55488 + the extra sock type/protocol checks pass */
55489 + if (domain == PF_INET)
55490 + goto inet_check;
55491 + goto exit;
55492 + } else {
55493 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55494 + __u32 fakeip = 0;
55495 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55496 + current->role->roletype, cred->uid,
55497 + cred->gid, current->exec_file ?
55498 + gr_to_filename(current->exec_file->f_path.dentry,
55499 + current->exec_file->f_path.mnt) :
55500 + curr->filename, curr->filename,
55501 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55502 + &current->signal->saved_ip);
55503 + goto exit;
55504 + }
55505 + goto exit_fail;
55506 + }
55507 +
55508 +inet_check:
55509 + /* the rest of this checking is for IPv4 only */
55510 + if (!curr->ips)
55511 + goto exit;
55512 +
55513 + if ((curr->ip_type & (1 << type)) &&
55514 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55515 + goto exit;
55516 +
55517 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55518 + /* we don't place acls on raw sockets , and sometimes
55519 + dgram/ip sockets are opened for ioctl and not
55520 + bind/connect, so we'll fake a bind learn log */
55521 + if (type == SOCK_RAW || type == SOCK_PACKET) {
55522 + __u32 fakeip = 0;
55523 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55524 + current->role->roletype, cred->uid,
55525 + cred->gid, current->exec_file ?
55526 + gr_to_filename(current->exec_file->f_path.dentry,
55527 + current->exec_file->f_path.mnt) :
55528 + curr->filename, curr->filename,
55529 + &fakeip, 0, type,
55530 + protocol, GR_CONNECT, &current->signal->saved_ip);
55531 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55532 + __u32 fakeip = 0;
55533 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55534 + current->role->roletype, cred->uid,
55535 + cred->gid, current->exec_file ?
55536 + gr_to_filename(current->exec_file->f_path.dentry,
55537 + current->exec_file->f_path.mnt) :
55538 + curr->filename, curr->filename,
55539 + &fakeip, 0, type,
55540 + protocol, GR_BIND, &current->signal->saved_ip);
55541 + }
55542 + /* we'll log when they use connect or bind */
55543 + goto exit;
55544 + }
55545 +
55546 +exit_fail:
55547 + if (domain == PF_INET)
55548 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55549 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
55550 + else
55551 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55552 + gr_socktype_to_name(type), protocol);
55553 +
55554 + return 0;
55555 +exit:
55556 + return 1;
55557 +}
55558 +
55559 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55560 +{
55561 + if ((ip->mode & mode) &&
55562 + (ip_port >= ip->low) &&
55563 + (ip_port <= ip->high) &&
55564 + ((ntohl(ip_addr) & our_netmask) ==
55565 + (ntohl(our_addr) & our_netmask))
55566 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55567 + && (ip->type & (1 << type))) {
55568 + if (ip->mode & GR_INVERT)
55569 + return 2; // specifically denied
55570 + else
55571 + return 1; // allowed
55572 + }
55573 +
55574 + return 0; // not specifically allowed, may continue parsing
55575 +}
55576 +
55577 +static int
55578 +gr_search_connectbind(const int full_mode, struct sock *sk,
55579 + struct sockaddr_in *addr, const int type)
55580 +{
55581 + char iface[IFNAMSIZ] = {0};
55582 + struct acl_subject_label *curr;
55583 + struct acl_ip_label *ip;
55584 + struct inet_sock *isk;
55585 + struct net_device *dev;
55586 + struct in_device *idev;
55587 + unsigned long i;
55588 + int ret;
55589 + int mode = full_mode & (GR_BIND | GR_CONNECT);
55590 + __u32 ip_addr = 0;
55591 + __u32 our_addr;
55592 + __u32 our_netmask;
55593 + char *p;
55594 + __u16 ip_port = 0;
55595 + const struct cred *cred = current_cred();
55596 +
55597 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55598 + return 0;
55599 +
55600 + curr = current->acl;
55601 + isk = inet_sk(sk);
55602 +
55603 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55604 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55605 + addr->sin_addr.s_addr = curr->inaddr_any_override;
55606 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55607 + struct sockaddr_in saddr;
55608 + int err;
55609 +
55610 + saddr.sin_family = AF_INET;
55611 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
55612 + saddr.sin_port = isk->inet_sport;
55613 +
55614 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55615 + if (err)
55616 + return err;
55617 +
55618 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55619 + if (err)
55620 + return err;
55621 + }
55622 +
55623 + if (!curr->ips)
55624 + return 0;
55625 +
55626 + ip_addr = addr->sin_addr.s_addr;
55627 + ip_port = ntohs(addr->sin_port);
55628 +
55629 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55630 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55631 + current->role->roletype, cred->uid,
55632 + cred->gid, current->exec_file ?
55633 + gr_to_filename(current->exec_file->f_path.dentry,
55634 + current->exec_file->f_path.mnt) :
55635 + curr->filename, curr->filename,
55636 + &ip_addr, ip_port, type,
55637 + sk->sk_protocol, mode, &current->signal->saved_ip);
55638 + return 0;
55639 + }
55640 +
55641 + for (i = 0; i < curr->ip_num; i++) {
55642 + ip = *(curr->ips + i);
55643 + if (ip->iface != NULL) {
55644 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55645 + p = strchr(iface, ':');
55646 + if (p != NULL)
55647 + *p = '\0';
55648 + dev = dev_get_by_name(sock_net(sk), iface);
55649 + if (dev == NULL)
55650 + continue;
55651 + idev = in_dev_get(dev);
55652 + if (idev == NULL) {
55653 + dev_put(dev);
55654 + continue;
55655 + }
55656 + rcu_read_lock();
55657 + for_ifa(idev) {
55658 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55659 + our_addr = ifa->ifa_address;
55660 + our_netmask = 0xffffffff;
55661 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55662 + if (ret == 1) {
55663 + rcu_read_unlock();
55664 + in_dev_put(idev);
55665 + dev_put(dev);
55666 + return 0;
55667 + } else if (ret == 2) {
55668 + rcu_read_unlock();
55669 + in_dev_put(idev);
55670 + dev_put(dev);
55671 + goto denied;
55672 + }
55673 + }
55674 + } endfor_ifa(idev);
55675 + rcu_read_unlock();
55676 + in_dev_put(idev);
55677 + dev_put(dev);
55678 + } else {
55679 + our_addr = ip->addr;
55680 + our_netmask = ip->netmask;
55681 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55682 + if (ret == 1)
55683 + return 0;
55684 + else if (ret == 2)
55685 + goto denied;
55686 + }
55687 + }
55688 +
55689 +denied:
55690 + if (mode == GR_BIND)
55691 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55692 + else if (mode == GR_CONNECT)
55693 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55694 +
55695 + return -EACCES;
55696 +}
55697 +
55698 +int
55699 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55700 +{
55701 + /* always allow disconnection of dgram sockets with connect */
55702 + if (addr->sin_family == AF_UNSPEC)
55703 + return 0;
55704 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55705 +}
55706 +
55707 +int
55708 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55709 +{
55710 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55711 +}
55712 +
55713 +int gr_search_listen(struct socket *sock)
55714 +{
55715 + struct sock *sk = sock->sk;
55716 + struct sockaddr_in addr;
55717 +
55718 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55719 + addr.sin_port = inet_sk(sk)->inet_sport;
55720 +
55721 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55722 +}
55723 +
55724 +int gr_search_accept(struct socket *sock)
55725 +{
55726 + struct sock *sk = sock->sk;
55727 + struct sockaddr_in addr;
55728 +
55729 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55730 + addr.sin_port = inet_sk(sk)->inet_sport;
55731 +
55732 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55733 +}
55734 +
55735 +int
55736 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55737 +{
55738 + if (addr)
55739 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55740 + else {
55741 + struct sockaddr_in sin;
55742 + const struct inet_sock *inet = inet_sk(sk);
55743 +
55744 + sin.sin_addr.s_addr = inet->inet_daddr;
55745 + sin.sin_port = inet->inet_dport;
55746 +
55747 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55748 + }
55749 +}
55750 +
55751 +int
55752 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55753 +{
55754 + struct sockaddr_in sin;
55755 +
55756 + if (unlikely(skb->len < sizeof (struct udphdr)))
55757 + return 0; // skip this packet
55758 +
55759 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55760 + sin.sin_port = udp_hdr(skb)->source;
55761 +
55762 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55763 +}
55764 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55765 new file mode 100644
55766 index 0000000..25f54ef
55767 --- /dev/null
55768 +++ b/grsecurity/gracl_learn.c
55769 @@ -0,0 +1,207 @@
55770 +#include <linux/kernel.h>
55771 +#include <linux/mm.h>
55772 +#include <linux/sched.h>
55773 +#include <linux/poll.h>
55774 +#include <linux/string.h>
55775 +#include <linux/file.h>
55776 +#include <linux/types.h>
55777 +#include <linux/vmalloc.h>
55778 +#include <linux/grinternal.h>
55779 +
55780 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55781 + size_t count, loff_t *ppos);
55782 +extern int gr_acl_is_enabled(void);
55783 +
55784 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55785 +static int gr_learn_attached;
55786 +
55787 +/* use a 512k buffer */
55788 +#define LEARN_BUFFER_SIZE (512 * 1024)
55789 +
55790 +static DEFINE_SPINLOCK(gr_learn_lock);
55791 +static DEFINE_MUTEX(gr_learn_user_mutex);
55792 +
55793 +/* we need to maintain two buffers, so that the kernel context of grlearn
55794 + uses a semaphore around the userspace copying, and the other kernel contexts
55795 + use a spinlock when copying into the buffer, since they cannot sleep
55796 +*/
55797 +static char *learn_buffer;
55798 +static char *learn_buffer_user;
55799 +static int learn_buffer_len;
55800 +static int learn_buffer_user_len;
55801 +
55802 +static ssize_t
55803 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55804 +{
55805 + DECLARE_WAITQUEUE(wait, current);
55806 + ssize_t retval = 0;
55807 +
55808 + add_wait_queue(&learn_wait, &wait);
55809 + set_current_state(TASK_INTERRUPTIBLE);
55810 + do {
55811 + mutex_lock(&gr_learn_user_mutex);
55812 + spin_lock(&gr_learn_lock);
55813 + if (learn_buffer_len)
55814 + break;
55815 + spin_unlock(&gr_learn_lock);
55816 + mutex_unlock(&gr_learn_user_mutex);
55817 + if (file->f_flags & O_NONBLOCK) {
55818 + retval = -EAGAIN;
55819 + goto out;
55820 + }
55821 + if (signal_pending(current)) {
55822 + retval = -ERESTARTSYS;
55823 + goto out;
55824 + }
55825 +
55826 + schedule();
55827 + } while (1);
55828 +
55829 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55830 + learn_buffer_user_len = learn_buffer_len;
55831 + retval = learn_buffer_len;
55832 + learn_buffer_len = 0;
55833 +
55834 + spin_unlock(&gr_learn_lock);
55835 +
55836 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55837 + retval = -EFAULT;
55838 +
55839 + mutex_unlock(&gr_learn_user_mutex);
55840 +out:
55841 + set_current_state(TASK_RUNNING);
55842 + remove_wait_queue(&learn_wait, &wait);
55843 + return retval;
55844 +}
55845 +
55846 +static unsigned int
55847 +poll_learn(struct file * file, poll_table * wait)
55848 +{
55849 + poll_wait(file, &learn_wait, wait);
55850 +
55851 + if (learn_buffer_len)
55852 + return (POLLIN | POLLRDNORM);
55853 +
55854 + return 0;
55855 +}
55856 +
55857 +void
55858 +gr_clear_learn_entries(void)
55859 +{
55860 + char *tmp;
55861 +
55862 + mutex_lock(&gr_learn_user_mutex);
55863 + spin_lock(&gr_learn_lock);
55864 + tmp = learn_buffer;
55865 + learn_buffer = NULL;
55866 + spin_unlock(&gr_learn_lock);
55867 + if (tmp)
55868 + vfree(tmp);
55869 + if (learn_buffer_user != NULL) {
55870 + vfree(learn_buffer_user);
55871 + learn_buffer_user = NULL;
55872 + }
55873 + learn_buffer_len = 0;
55874 + mutex_unlock(&gr_learn_user_mutex);
55875 +
55876 + return;
55877 +}
55878 +
55879 +void
55880 +gr_add_learn_entry(const char *fmt, ...)
55881 +{
55882 + va_list args;
55883 + unsigned int len;
55884 +
55885 + if (!gr_learn_attached)
55886 + return;
55887 +
55888 + spin_lock(&gr_learn_lock);
55889 +
55890 + /* leave a gap at the end so we know when it's "full" but don't have to
55891 + compute the exact length of the string we're trying to append
55892 + */
55893 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55894 + spin_unlock(&gr_learn_lock);
55895 + wake_up_interruptible(&learn_wait);
55896 + return;
55897 + }
55898 + if (learn_buffer == NULL) {
55899 + spin_unlock(&gr_learn_lock);
55900 + return;
55901 + }
55902 +
55903 + va_start(args, fmt);
55904 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55905 + va_end(args);
55906 +
55907 + learn_buffer_len += len + 1;
55908 +
55909 + spin_unlock(&gr_learn_lock);
55910 + wake_up_interruptible(&learn_wait);
55911 +
55912 + return;
55913 +}
55914 +
55915 +static int
55916 +open_learn(struct inode *inode, struct file *file)
55917 +{
55918 + if (file->f_mode & FMODE_READ && gr_learn_attached)
55919 + return -EBUSY;
55920 + if (file->f_mode & FMODE_READ) {
55921 + int retval = 0;
55922 + mutex_lock(&gr_learn_user_mutex);
55923 + if (learn_buffer == NULL)
55924 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55925 + if (learn_buffer_user == NULL)
55926 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55927 + if (learn_buffer == NULL) {
55928 + retval = -ENOMEM;
55929 + goto out_error;
55930 + }
55931 + if (learn_buffer_user == NULL) {
55932 + retval = -ENOMEM;
55933 + goto out_error;
55934 + }
55935 + learn_buffer_len = 0;
55936 + learn_buffer_user_len = 0;
55937 + gr_learn_attached = 1;
55938 +out_error:
55939 + mutex_unlock(&gr_learn_user_mutex);
55940 + return retval;
55941 + }
55942 + return 0;
55943 +}
55944 +
55945 +static int
55946 +close_learn(struct inode *inode, struct file *file)
55947 +{
55948 + if (file->f_mode & FMODE_READ) {
55949 + char *tmp = NULL;
55950 + mutex_lock(&gr_learn_user_mutex);
55951 + spin_lock(&gr_learn_lock);
55952 + tmp = learn_buffer;
55953 + learn_buffer = NULL;
55954 + spin_unlock(&gr_learn_lock);
55955 + if (tmp)
55956 + vfree(tmp);
55957 + if (learn_buffer_user != NULL) {
55958 + vfree(learn_buffer_user);
55959 + learn_buffer_user = NULL;
55960 + }
55961 + learn_buffer_len = 0;
55962 + learn_buffer_user_len = 0;
55963 + gr_learn_attached = 0;
55964 + mutex_unlock(&gr_learn_user_mutex);
55965 + }
55966 +
55967 + return 0;
55968 +}
55969 +
55970 +const struct file_operations grsec_fops = {
55971 + .read = read_learn,
55972 + .write = write_grsec_handler,
55973 + .open = open_learn,
55974 + .release = close_learn,
55975 + .poll = poll_learn,
55976 +};
55977 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55978 new file mode 100644
55979 index 0000000..39645c9
55980 --- /dev/null
55981 +++ b/grsecurity/gracl_res.c
55982 @@ -0,0 +1,68 @@
55983 +#include <linux/kernel.h>
55984 +#include <linux/sched.h>
55985 +#include <linux/gracl.h>
55986 +#include <linux/grinternal.h>
55987 +
55988 +static const char *restab_log[] = {
55989 + [RLIMIT_CPU] = "RLIMIT_CPU",
55990 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55991 + [RLIMIT_DATA] = "RLIMIT_DATA",
55992 + [RLIMIT_STACK] = "RLIMIT_STACK",
55993 + [RLIMIT_CORE] = "RLIMIT_CORE",
55994 + [RLIMIT_RSS] = "RLIMIT_RSS",
55995 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55996 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55997 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55998 + [RLIMIT_AS] = "RLIMIT_AS",
55999 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56000 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56001 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56002 + [RLIMIT_NICE] = "RLIMIT_NICE",
56003 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56004 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56005 + [GR_CRASH_RES] = "RLIMIT_CRASH"
56006 +};
56007 +
56008 +void
56009 +gr_log_resource(const struct task_struct *task,
56010 + const int res, const unsigned long wanted, const int gt)
56011 +{
56012 + const struct cred *cred;
56013 + unsigned long rlim;
56014 +
56015 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
56016 + return;
56017 +
56018 + // not yet supported resource
56019 + if (unlikely(!restab_log[res]))
56020 + return;
56021 +
56022 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56023 + rlim = task_rlimit_max(task, res);
56024 + else
56025 + rlim = task_rlimit(task, res);
56026 +
56027 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56028 + return;
56029 +
56030 + rcu_read_lock();
56031 + cred = __task_cred(task);
56032 +
56033 + if (res == RLIMIT_NPROC &&
56034 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56035 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56036 + goto out_rcu_unlock;
56037 + else if (res == RLIMIT_MEMLOCK &&
56038 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56039 + goto out_rcu_unlock;
56040 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56041 + goto out_rcu_unlock;
56042 + rcu_read_unlock();
56043 +
56044 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56045 +
56046 + return;
56047 +out_rcu_unlock:
56048 + rcu_read_unlock();
56049 + return;
56050 +}
56051 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56052 new file mode 100644
56053 index 0000000..5556be3
56054 --- /dev/null
56055 +++ b/grsecurity/gracl_segv.c
56056 @@ -0,0 +1,299 @@
56057 +#include <linux/kernel.h>
56058 +#include <linux/mm.h>
56059 +#include <asm/uaccess.h>
56060 +#include <asm/errno.h>
56061 +#include <asm/mman.h>
56062 +#include <net/sock.h>
56063 +#include <linux/file.h>
56064 +#include <linux/fs.h>
56065 +#include <linux/net.h>
56066 +#include <linux/in.h>
56067 +#include <linux/slab.h>
56068 +#include <linux/types.h>
56069 +#include <linux/sched.h>
56070 +#include <linux/timer.h>
56071 +#include <linux/gracl.h>
56072 +#include <linux/grsecurity.h>
56073 +#include <linux/grinternal.h>
56074 +
56075 +static struct crash_uid *uid_set;
56076 +static unsigned short uid_used;
56077 +static DEFINE_SPINLOCK(gr_uid_lock);
56078 +extern rwlock_t gr_inode_lock;
56079 +extern struct acl_subject_label *
56080 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56081 + struct acl_role_label *role);
56082 +
56083 +#ifdef CONFIG_BTRFS_FS
56084 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56085 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56086 +#endif
56087 +
56088 +static inline dev_t __get_dev(const struct dentry *dentry)
56089 +{
56090 +#ifdef CONFIG_BTRFS_FS
56091 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56092 + return get_btrfs_dev_from_inode(dentry->d_inode);
56093 + else
56094 +#endif
56095 + return dentry->d_inode->i_sb->s_dev;
56096 +}
56097 +
56098 +int
56099 +gr_init_uidset(void)
56100 +{
56101 + uid_set =
56102 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56103 + uid_used = 0;
56104 +
56105 + return uid_set ? 1 : 0;
56106 +}
56107 +
56108 +void
56109 +gr_free_uidset(void)
56110 +{
56111 + if (uid_set)
56112 + kfree(uid_set);
56113 +
56114 + return;
56115 +}
56116 +
56117 +int
56118 +gr_find_uid(const uid_t uid)
56119 +{
56120 + struct crash_uid *tmp = uid_set;
56121 + uid_t buid;
56122 + int low = 0, high = uid_used - 1, mid;
56123 +
56124 + while (high >= low) {
56125 + mid = (low + high) >> 1;
56126 + buid = tmp[mid].uid;
56127 + if (buid == uid)
56128 + return mid;
56129 + if (buid > uid)
56130 + high = mid - 1;
56131 + if (buid < uid)
56132 + low = mid + 1;
56133 + }
56134 +
56135 + return -1;
56136 +}
56137 +
56138 +static __inline__ void
56139 +gr_insertsort(void)
56140 +{
56141 + unsigned short i, j;
56142 + struct crash_uid index;
56143 +
56144 + for (i = 1; i < uid_used; i++) {
56145 + index = uid_set[i];
56146 + j = i;
56147 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56148 + uid_set[j] = uid_set[j - 1];
56149 + j--;
56150 + }
56151 + uid_set[j] = index;
56152 + }
56153 +
56154 + return;
56155 +}
56156 +
56157 +static __inline__ void
56158 +gr_insert_uid(const uid_t uid, const unsigned long expires)
56159 +{
56160 + int loc;
56161 +
56162 + if (uid_used == GR_UIDTABLE_MAX)
56163 + return;
56164 +
56165 + loc = gr_find_uid(uid);
56166 +
56167 + if (loc >= 0) {
56168 + uid_set[loc].expires = expires;
56169 + return;
56170 + }
56171 +
56172 + uid_set[uid_used].uid = uid;
56173 + uid_set[uid_used].expires = expires;
56174 + uid_used++;
56175 +
56176 + gr_insertsort();
56177 +
56178 + return;
56179 +}
56180 +
56181 +void
56182 +gr_remove_uid(const unsigned short loc)
56183 +{
56184 + unsigned short i;
56185 +
56186 + for (i = loc + 1; i < uid_used; i++)
56187 + uid_set[i - 1] = uid_set[i];
56188 +
56189 + uid_used--;
56190 +
56191 + return;
56192 +}
56193 +
56194 +int
56195 +gr_check_crash_uid(const uid_t uid)
56196 +{
56197 + int loc;
56198 + int ret = 0;
56199 +
56200 + if (unlikely(!gr_acl_is_enabled()))
56201 + return 0;
56202 +
56203 + spin_lock(&gr_uid_lock);
56204 + loc = gr_find_uid(uid);
56205 +
56206 + if (loc < 0)
56207 + goto out_unlock;
56208 +
56209 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
56210 + gr_remove_uid(loc);
56211 + else
56212 + ret = 1;
56213 +
56214 +out_unlock:
56215 + spin_unlock(&gr_uid_lock);
56216 + return ret;
56217 +}
56218 +
56219 +static __inline__ int
56220 +proc_is_setxid(const struct cred *cred)
56221 +{
56222 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
56223 + cred->uid != cred->fsuid)
56224 + return 1;
56225 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56226 + cred->gid != cred->fsgid)
56227 + return 1;
56228 +
56229 + return 0;
56230 +}
56231 +
56232 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
56233 +
56234 +void
56235 +gr_handle_crash(struct task_struct *task, const int sig)
56236 +{
56237 + struct acl_subject_label *curr;
56238 + struct task_struct *tsk, *tsk2;
56239 + const struct cred *cred;
56240 + const struct cred *cred2;
56241 +
56242 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56243 + return;
56244 +
56245 + if (unlikely(!gr_acl_is_enabled()))
56246 + return;
56247 +
56248 + curr = task->acl;
56249 +
56250 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
56251 + return;
56252 +
56253 + if (time_before_eq(curr->expires, get_seconds())) {
56254 + curr->expires = 0;
56255 + curr->crashes = 0;
56256 + }
56257 +
56258 + curr->crashes++;
56259 +
56260 + if (!curr->expires)
56261 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56262 +
56263 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56264 + time_after(curr->expires, get_seconds())) {
56265 + rcu_read_lock();
56266 + cred = __task_cred(task);
56267 + if (cred->uid && proc_is_setxid(cred)) {
56268 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56269 + spin_lock(&gr_uid_lock);
56270 + gr_insert_uid(cred->uid, curr->expires);
56271 + spin_unlock(&gr_uid_lock);
56272 + curr->expires = 0;
56273 + curr->crashes = 0;
56274 + read_lock(&tasklist_lock);
56275 + do_each_thread(tsk2, tsk) {
56276 + cred2 = __task_cred(tsk);
56277 + if (tsk != task && cred2->uid == cred->uid)
56278 + gr_fake_force_sig(SIGKILL, tsk);
56279 + } while_each_thread(tsk2, tsk);
56280 + read_unlock(&tasklist_lock);
56281 + } else {
56282 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56283 + read_lock(&tasklist_lock);
56284 + read_lock(&grsec_exec_file_lock);
56285 + do_each_thread(tsk2, tsk) {
56286 + if (likely(tsk != task)) {
56287 + // if this thread has the same subject as the one that triggered
56288 + // RES_CRASH and it's the same binary, kill it
56289 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56290 + gr_fake_force_sig(SIGKILL, tsk);
56291 + }
56292 + } while_each_thread(tsk2, tsk);
56293 + read_unlock(&grsec_exec_file_lock);
56294 + read_unlock(&tasklist_lock);
56295 + }
56296 + rcu_read_unlock();
56297 + }
56298 +
56299 + return;
56300 +}
56301 +
56302 +int
56303 +gr_check_crash_exec(const struct file *filp)
56304 +{
56305 + struct acl_subject_label *curr;
56306 +
56307 + if (unlikely(!gr_acl_is_enabled()))
56308 + return 0;
56309 +
56310 + read_lock(&gr_inode_lock);
56311 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56312 + __get_dev(filp->f_path.dentry),
56313 + current->role);
56314 + read_unlock(&gr_inode_lock);
56315 +
56316 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56317 + (!curr->crashes && !curr->expires))
56318 + return 0;
56319 +
56320 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56321 + time_after(curr->expires, get_seconds()))
56322 + return 1;
56323 + else if (time_before_eq(curr->expires, get_seconds())) {
56324 + curr->crashes = 0;
56325 + curr->expires = 0;
56326 + }
56327 +
56328 + return 0;
56329 +}
56330 +
56331 +void
56332 +gr_handle_alertkill(struct task_struct *task)
56333 +{
56334 + struct acl_subject_label *curracl;
56335 + __u32 curr_ip;
56336 + struct task_struct *p, *p2;
56337 +
56338 + if (unlikely(!gr_acl_is_enabled()))
56339 + return;
56340 +
56341 + curracl = task->acl;
56342 + curr_ip = task->signal->curr_ip;
56343 +
56344 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56345 + read_lock(&tasklist_lock);
56346 + do_each_thread(p2, p) {
56347 + if (p->signal->curr_ip == curr_ip)
56348 + gr_fake_force_sig(SIGKILL, p);
56349 + } while_each_thread(p2, p);
56350 + read_unlock(&tasklist_lock);
56351 + } else if (curracl->mode & GR_KILLPROC)
56352 + gr_fake_force_sig(SIGKILL, task);
56353 +
56354 + return;
56355 +}
56356 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56357 new file mode 100644
56358 index 0000000..9d83a69
56359 --- /dev/null
56360 +++ b/grsecurity/gracl_shm.c
56361 @@ -0,0 +1,40 @@
56362 +#include <linux/kernel.h>
56363 +#include <linux/mm.h>
56364 +#include <linux/sched.h>
56365 +#include <linux/file.h>
56366 +#include <linux/ipc.h>
56367 +#include <linux/gracl.h>
56368 +#include <linux/grsecurity.h>
56369 +#include <linux/grinternal.h>
56370 +
56371 +int
56372 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56373 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56374 +{
56375 + struct task_struct *task;
56376 +
56377 + if (!gr_acl_is_enabled())
56378 + return 1;
56379 +
56380 + rcu_read_lock();
56381 + read_lock(&tasklist_lock);
56382 +
56383 + task = find_task_by_vpid(shm_cprid);
56384 +
56385 + if (unlikely(!task))
56386 + task = find_task_by_vpid(shm_lapid);
56387 +
56388 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56389 + (task->pid == shm_lapid)) &&
56390 + (task->acl->mode & GR_PROTSHM) &&
56391 + (task->acl != current->acl))) {
56392 + read_unlock(&tasklist_lock);
56393 + rcu_read_unlock();
56394 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56395 + return 0;
56396 + }
56397 + read_unlock(&tasklist_lock);
56398 + rcu_read_unlock();
56399 +
56400 + return 1;
56401 +}
56402 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56403 new file mode 100644
56404 index 0000000..bc0be01
56405 --- /dev/null
56406 +++ b/grsecurity/grsec_chdir.c
56407 @@ -0,0 +1,19 @@
56408 +#include <linux/kernel.h>
56409 +#include <linux/sched.h>
56410 +#include <linux/fs.h>
56411 +#include <linux/file.h>
56412 +#include <linux/grsecurity.h>
56413 +#include <linux/grinternal.h>
56414 +
56415 +void
56416 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56417 +{
56418 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56419 + if ((grsec_enable_chdir && grsec_enable_group &&
56420 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56421 + !grsec_enable_group)) {
56422 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56423 + }
56424 +#endif
56425 + return;
56426 +}
56427 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56428 new file mode 100644
56429 index 0000000..9807ee2
56430 --- /dev/null
56431 +++ b/grsecurity/grsec_chroot.c
56432 @@ -0,0 +1,368 @@
56433 +#include <linux/kernel.h>
56434 +#include <linux/module.h>
56435 +#include <linux/sched.h>
56436 +#include <linux/file.h>
56437 +#include <linux/fs.h>
56438 +#include <linux/mount.h>
56439 +#include <linux/types.h>
56440 +#include "../fs/mount.h"
56441 +#include <linux/grsecurity.h>
56442 +#include <linux/grinternal.h>
56443 +
56444 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56445 +{
56446 +#ifdef CONFIG_GRKERNSEC
56447 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56448 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
56449 + task->gr_is_chrooted = 1;
56450 + else
56451 + task->gr_is_chrooted = 0;
56452 +
56453 + task->gr_chroot_dentry = path->dentry;
56454 +#endif
56455 + return;
56456 +}
56457 +
56458 +void gr_clear_chroot_entries(struct task_struct *task)
56459 +{
56460 +#ifdef CONFIG_GRKERNSEC
56461 + task->gr_is_chrooted = 0;
56462 + task->gr_chroot_dentry = NULL;
56463 +#endif
56464 + return;
56465 +}
56466 +
56467 +int
56468 +gr_handle_chroot_unix(const pid_t pid)
56469 +{
56470 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56471 + struct task_struct *p;
56472 +
56473 + if (unlikely(!grsec_enable_chroot_unix))
56474 + return 1;
56475 +
56476 + if (likely(!proc_is_chrooted(current)))
56477 + return 1;
56478 +
56479 + rcu_read_lock();
56480 + read_lock(&tasklist_lock);
56481 + p = find_task_by_vpid_unrestricted(pid);
56482 + if (unlikely(p && !have_same_root(current, p))) {
56483 + read_unlock(&tasklist_lock);
56484 + rcu_read_unlock();
56485 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56486 + return 0;
56487 + }
56488 + read_unlock(&tasklist_lock);
56489 + rcu_read_unlock();
56490 +#endif
56491 + return 1;
56492 +}
56493 +
56494 +int
56495 +gr_handle_chroot_nice(void)
56496 +{
56497 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56498 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56499 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56500 + return -EPERM;
56501 + }
56502 +#endif
56503 + return 0;
56504 +}
56505 +
56506 +int
56507 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56508 +{
56509 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56510 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56511 + && proc_is_chrooted(current)) {
56512 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56513 + return -EACCES;
56514 + }
56515 +#endif
56516 + return 0;
56517 +}
56518 +
56519 +int
56520 +gr_handle_chroot_rawio(const struct inode *inode)
56521 +{
56522 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56523 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56524 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56525 + return 1;
56526 +#endif
56527 + return 0;
56528 +}
56529 +
56530 +int
56531 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56532 +{
56533 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56534 + struct task_struct *p;
56535 + int ret = 0;
56536 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56537 + return ret;
56538 +
56539 + read_lock(&tasklist_lock);
56540 + do_each_pid_task(pid, type, p) {
56541 + if (!have_same_root(current, p)) {
56542 + ret = 1;
56543 + goto out;
56544 + }
56545 + } while_each_pid_task(pid, type, p);
56546 +out:
56547 + read_unlock(&tasklist_lock);
56548 + return ret;
56549 +#endif
56550 + return 0;
56551 +}
56552 +
56553 +int
56554 +gr_pid_is_chrooted(struct task_struct *p)
56555 +{
56556 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56557 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56558 + return 0;
56559 +
56560 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56561 + !have_same_root(current, p)) {
56562 + return 1;
56563 + }
56564 +#endif
56565 + return 0;
56566 +}
56567 +
56568 +EXPORT_SYMBOL(gr_pid_is_chrooted);
56569 +
56570 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56571 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56572 +{
56573 + struct path path, currentroot;
56574 + int ret = 0;
56575 +
56576 + path.dentry = (struct dentry *)u_dentry;
56577 + path.mnt = (struct vfsmount *)u_mnt;
56578 + get_fs_root(current->fs, &currentroot);
56579 + if (path_is_under(&path, &currentroot))
56580 + ret = 1;
56581 + path_put(&currentroot);
56582 +
56583 + return ret;
56584 +}
56585 +#endif
56586 +
56587 +int
56588 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56589 +{
56590 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56591 + if (!grsec_enable_chroot_fchdir)
56592 + return 1;
56593 +
56594 + if (!proc_is_chrooted(current))
56595 + return 1;
56596 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56597 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56598 + return 0;
56599 + }
56600 +#endif
56601 + return 1;
56602 +}
56603 +
56604 +int
56605 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56606 + const time_t shm_createtime)
56607 +{
56608 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56609 + struct task_struct *p;
56610 + time_t starttime;
56611 +
56612 + if (unlikely(!grsec_enable_chroot_shmat))
56613 + return 1;
56614 +
56615 + if (likely(!proc_is_chrooted(current)))
56616 + return 1;
56617 +
56618 + rcu_read_lock();
56619 + read_lock(&tasklist_lock);
56620 +
56621 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56622 + starttime = p->start_time.tv_sec;
56623 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56624 + if (have_same_root(current, p)) {
56625 + goto allow;
56626 + } else {
56627 + read_unlock(&tasklist_lock);
56628 + rcu_read_unlock();
56629 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56630 + return 0;
56631 + }
56632 + }
56633 + /* creator exited, pid reuse, fall through to next check */
56634 + }
56635 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56636 + if (unlikely(!have_same_root(current, p))) {
56637 + read_unlock(&tasklist_lock);
56638 + rcu_read_unlock();
56639 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56640 + return 0;
56641 + }
56642 + }
56643 +
56644 +allow:
56645 + read_unlock(&tasklist_lock);
56646 + rcu_read_unlock();
56647 +#endif
56648 + return 1;
56649 +}
56650 +
56651 +void
56652 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56653 +{
56654 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56655 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56656 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56657 +#endif
56658 + return;
56659 +}
56660 +
56661 +int
56662 +gr_handle_chroot_mknod(const struct dentry *dentry,
56663 + const struct vfsmount *mnt, const int mode)
56664 +{
56665 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56666 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56667 + proc_is_chrooted(current)) {
56668 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56669 + return -EPERM;
56670 + }
56671 +#endif
56672 + return 0;
56673 +}
56674 +
56675 +int
56676 +gr_handle_chroot_mount(const struct dentry *dentry,
56677 + const struct vfsmount *mnt, const char *dev_name)
56678 +{
56679 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56680 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56681 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56682 + return -EPERM;
56683 + }
56684 +#endif
56685 + return 0;
56686 +}
56687 +
56688 +int
56689 +gr_handle_chroot_pivot(void)
56690 +{
56691 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56692 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56693 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56694 + return -EPERM;
56695 + }
56696 +#endif
56697 + return 0;
56698 +}
56699 +
56700 +int
56701 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56702 +{
56703 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56704 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56705 + !gr_is_outside_chroot(dentry, mnt)) {
56706 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56707 + return -EPERM;
56708 + }
56709 +#endif
56710 + return 0;
56711 +}
56712 +
56713 +extern const char *captab_log[];
56714 +extern int captab_log_entries;
56715 +
56716 +int
56717 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56718 +{
56719 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56720 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56721 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56722 + if (cap_raised(chroot_caps, cap)) {
56723 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56724 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56725 + }
56726 + return 0;
56727 + }
56728 + }
56729 +#endif
56730 + return 1;
56731 +}
56732 +
56733 +int
56734 +gr_chroot_is_capable(const int cap)
56735 +{
56736 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56737 + return gr_task_chroot_is_capable(current, current_cred(), cap);
56738 +#endif
56739 + return 1;
56740 +}
56741 +
56742 +int
56743 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56744 +{
56745 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56746 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56747 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56748 + if (cap_raised(chroot_caps, cap)) {
56749 + return 0;
56750 + }
56751 + }
56752 +#endif
56753 + return 1;
56754 +}
56755 +
56756 +int
56757 +gr_chroot_is_capable_nolog(const int cap)
56758 +{
56759 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56760 + return gr_task_chroot_is_capable_nolog(current, cap);
56761 +#endif
56762 + return 1;
56763 +}
56764 +
56765 +int
56766 +gr_handle_chroot_sysctl(const int op)
56767 +{
56768 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56769 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56770 + proc_is_chrooted(current))
56771 + return -EACCES;
56772 +#endif
56773 + return 0;
56774 +}
56775 +
56776 +void
56777 +gr_handle_chroot_chdir(struct path *path)
56778 +{
56779 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56780 + if (grsec_enable_chroot_chdir)
56781 + set_fs_pwd(current->fs, path);
56782 +#endif
56783 + return;
56784 +}
56785 +
56786 +int
56787 +gr_handle_chroot_chmod(const struct dentry *dentry,
56788 + const struct vfsmount *mnt, const int mode)
56789 +{
56790 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56791 + /* allow chmod +s on directories, but not files */
56792 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56793 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56794 + proc_is_chrooted(current)) {
56795 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56796 + return -EPERM;
56797 + }
56798 +#endif
56799 + return 0;
56800 +}
56801 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56802 new file mode 100644
56803 index 0000000..213ad8b
56804 --- /dev/null
56805 +++ b/grsecurity/grsec_disabled.c
56806 @@ -0,0 +1,437 @@
56807 +#include <linux/kernel.h>
56808 +#include <linux/module.h>
56809 +#include <linux/sched.h>
56810 +#include <linux/file.h>
56811 +#include <linux/fs.h>
56812 +#include <linux/kdev_t.h>
56813 +#include <linux/net.h>
56814 +#include <linux/in.h>
56815 +#include <linux/ip.h>
56816 +#include <linux/skbuff.h>
56817 +#include <linux/sysctl.h>
56818 +
56819 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56820 +void
56821 +pax_set_initial_flags(struct linux_binprm *bprm)
56822 +{
56823 + return;
56824 +}
56825 +#endif
56826 +
56827 +#ifdef CONFIG_SYSCTL
56828 +__u32
56829 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56830 +{
56831 + return 0;
56832 +}
56833 +#endif
56834 +
56835 +#ifdef CONFIG_TASKSTATS
56836 +int gr_is_taskstats_denied(int pid)
56837 +{
56838 + return 0;
56839 +}
56840 +#endif
56841 +
56842 +int
56843 +gr_acl_is_enabled(void)
56844 +{
56845 + return 0;
56846 +}
56847 +
56848 +void
56849 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56850 +{
56851 + return;
56852 +}
56853 +
56854 +int
56855 +gr_handle_rawio(const struct inode *inode)
56856 +{
56857 + return 0;
56858 +}
56859 +
56860 +void
56861 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56862 +{
56863 + return;
56864 +}
56865 +
56866 +int
56867 +gr_handle_ptrace(struct task_struct *task, const long request)
56868 +{
56869 + return 0;
56870 +}
56871 +
56872 +int
56873 +gr_handle_proc_ptrace(struct task_struct *task)
56874 +{
56875 + return 0;
56876 +}
56877 +
56878 +void
56879 +gr_learn_resource(const struct task_struct *task,
56880 + const int res, const unsigned long wanted, const int gt)
56881 +{
56882 + return;
56883 +}
56884 +
56885 +int
56886 +gr_set_acls(const int type)
56887 +{
56888 + return 0;
56889 +}
56890 +
56891 +int
56892 +gr_check_hidden_task(const struct task_struct *tsk)
56893 +{
56894 + return 0;
56895 +}
56896 +
56897 +int
56898 +gr_check_protected_task(const struct task_struct *task)
56899 +{
56900 + return 0;
56901 +}
56902 +
56903 +int
56904 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56905 +{
56906 + return 0;
56907 +}
56908 +
56909 +void
56910 +gr_copy_label(struct task_struct *tsk)
56911 +{
56912 + return;
56913 +}
56914 +
56915 +void
56916 +gr_set_pax_flags(struct task_struct *task)
56917 +{
56918 + return;
56919 +}
56920 +
56921 +int
56922 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56923 + const int unsafe_share)
56924 +{
56925 + return 0;
56926 +}
56927 +
56928 +void
56929 +gr_handle_delete(const ino_t ino, const dev_t dev)
56930 +{
56931 + return;
56932 +}
56933 +
56934 +void
56935 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56936 +{
56937 + return;
56938 +}
56939 +
56940 +void
56941 +gr_handle_crash(struct task_struct *task, const int sig)
56942 +{
56943 + return;
56944 +}
56945 +
56946 +int
56947 +gr_check_crash_exec(const struct file *filp)
56948 +{
56949 + return 0;
56950 +}
56951 +
56952 +int
56953 +gr_check_crash_uid(const uid_t uid)
56954 +{
56955 + return 0;
56956 +}
56957 +
56958 +void
56959 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56960 + struct dentry *old_dentry,
56961 + struct dentry *new_dentry,
56962 + struct vfsmount *mnt, const __u8 replace)
56963 +{
56964 + return;
56965 +}
56966 +
56967 +int
56968 +gr_search_socket(const int family, const int type, const int protocol)
56969 +{
56970 + return 1;
56971 +}
56972 +
56973 +int
56974 +gr_search_connectbind(const int mode, const struct socket *sock,
56975 + const struct sockaddr_in *addr)
56976 +{
56977 + return 0;
56978 +}
56979 +
56980 +void
56981 +gr_handle_alertkill(struct task_struct *task)
56982 +{
56983 + return;
56984 +}
56985 +
56986 +__u32
56987 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56988 +{
56989 + return 1;
56990 +}
56991 +
56992 +__u32
56993 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56994 + const struct vfsmount * mnt)
56995 +{
56996 + return 1;
56997 +}
56998 +
56999 +__u32
57000 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57001 + int acc_mode)
57002 +{
57003 + return 1;
57004 +}
57005 +
57006 +__u32
57007 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57008 +{
57009 + return 1;
57010 +}
57011 +
57012 +__u32
57013 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57014 +{
57015 + return 1;
57016 +}
57017 +
57018 +int
57019 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57020 + unsigned int *vm_flags)
57021 +{
57022 + return 1;
57023 +}
57024 +
57025 +__u32
57026 +gr_acl_handle_truncate(const struct dentry * dentry,
57027 + const struct vfsmount * mnt)
57028 +{
57029 + return 1;
57030 +}
57031 +
57032 +__u32
57033 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57034 +{
57035 + return 1;
57036 +}
57037 +
57038 +__u32
57039 +gr_acl_handle_access(const struct dentry * dentry,
57040 + const struct vfsmount * mnt, const int fmode)
57041 +{
57042 + return 1;
57043 +}
57044 +
57045 +__u32
57046 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57047 + umode_t *mode)
57048 +{
57049 + return 1;
57050 +}
57051 +
57052 +__u32
57053 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57054 +{
57055 + return 1;
57056 +}
57057 +
57058 +__u32
57059 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57060 +{
57061 + return 1;
57062 +}
57063 +
57064 +void
57065 +grsecurity_init(void)
57066 +{
57067 + return;
57068 +}
57069 +
57070 +umode_t gr_acl_umask(void)
57071 +{
57072 + return 0;
57073 +}
57074 +
57075 +__u32
57076 +gr_acl_handle_mknod(const struct dentry * new_dentry,
57077 + const struct dentry * parent_dentry,
57078 + const struct vfsmount * parent_mnt,
57079 + const int mode)
57080 +{
57081 + return 1;
57082 +}
57083 +
57084 +__u32
57085 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
57086 + const struct dentry * parent_dentry,
57087 + const struct vfsmount * parent_mnt)
57088 +{
57089 + return 1;
57090 +}
57091 +
57092 +__u32
57093 +gr_acl_handle_symlink(const struct dentry * new_dentry,
57094 + const struct dentry * parent_dentry,
57095 + const struct vfsmount * parent_mnt, const char *from)
57096 +{
57097 + return 1;
57098 +}
57099 +
57100 +__u32
57101 +gr_acl_handle_link(const struct dentry * new_dentry,
57102 + const struct dentry * parent_dentry,
57103 + const struct vfsmount * parent_mnt,
57104 + const struct dentry * old_dentry,
57105 + const struct vfsmount * old_mnt, const char *to)
57106 +{
57107 + return 1;
57108 +}
57109 +
57110 +int
57111 +gr_acl_handle_rename(const struct dentry *new_dentry,
57112 + const struct dentry *parent_dentry,
57113 + const struct vfsmount *parent_mnt,
57114 + const struct dentry *old_dentry,
57115 + const struct inode *old_parent_inode,
57116 + const struct vfsmount *old_mnt, const char *newname)
57117 +{
57118 + return 0;
57119 +}
57120 +
57121 +int
57122 +gr_acl_handle_filldir(const struct file *file, const char *name,
57123 + const int namelen, const ino_t ino)
57124 +{
57125 + return 1;
57126 +}
57127 +
57128 +int
57129 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57130 + const time_t shm_createtime, const uid_t cuid, const int shmid)
57131 +{
57132 + return 1;
57133 +}
57134 +
57135 +int
57136 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57137 +{
57138 + return 0;
57139 +}
57140 +
57141 +int
57142 +gr_search_accept(const struct socket *sock)
57143 +{
57144 + return 0;
57145 +}
57146 +
57147 +int
57148 +gr_search_listen(const struct socket *sock)
57149 +{
57150 + return 0;
57151 +}
57152 +
57153 +int
57154 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57155 +{
57156 + return 0;
57157 +}
57158 +
57159 +__u32
57160 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57161 +{
57162 + return 1;
57163 +}
57164 +
57165 +__u32
57166 +gr_acl_handle_creat(const struct dentry * dentry,
57167 + const struct dentry * p_dentry,
57168 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57169 + const int imode)
57170 +{
57171 + return 1;
57172 +}
57173 +
57174 +void
57175 +gr_acl_handle_exit(void)
57176 +{
57177 + return;
57178 +}
57179 +
57180 +int
57181 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57182 +{
57183 + return 1;
57184 +}
57185 +
57186 +void
57187 +gr_set_role_label(const uid_t uid, const gid_t gid)
57188 +{
57189 + return;
57190 +}
57191 +
57192 +int
57193 +gr_acl_handle_procpidmem(const struct task_struct *task)
57194 +{
57195 + return 0;
57196 +}
57197 +
57198 +int
57199 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57200 +{
57201 + return 0;
57202 +}
57203 +
57204 +int
57205 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57206 +{
57207 + return 0;
57208 +}
57209 +
57210 +void
57211 +gr_set_kernel_label(struct task_struct *task)
57212 +{
57213 + return;
57214 +}
57215 +
57216 +int
57217 +gr_check_user_change(int real, int effective, int fs)
57218 +{
57219 + return 0;
57220 +}
57221 +
57222 +int
57223 +gr_check_group_change(int real, int effective, int fs)
57224 +{
57225 + return 0;
57226 +}
57227 +
57228 +int gr_acl_enable_at_secure(void)
57229 +{
57230 + return 0;
57231 +}
57232 +
57233 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57234 +{
57235 + return dentry->d_inode->i_sb->s_dev;
57236 +}
57237 +
57238 +EXPORT_SYMBOL(gr_learn_resource);
57239 +EXPORT_SYMBOL(gr_set_kernel_label);
57240 +#ifdef CONFIG_SECURITY
57241 +EXPORT_SYMBOL(gr_check_user_change);
57242 +EXPORT_SYMBOL(gr_check_group_change);
57243 +#endif
57244 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57245 new file mode 100644
57246 index 0000000..abfa971
57247 --- /dev/null
57248 +++ b/grsecurity/grsec_exec.c
57249 @@ -0,0 +1,174 @@
57250 +#include <linux/kernel.h>
57251 +#include <linux/sched.h>
57252 +#include <linux/file.h>
57253 +#include <linux/binfmts.h>
57254 +#include <linux/fs.h>
57255 +#include <linux/types.h>
57256 +#include <linux/grdefs.h>
57257 +#include <linux/grsecurity.h>
57258 +#include <linux/grinternal.h>
57259 +#include <linux/capability.h>
57260 +#include <linux/module.h>
57261 +
57262 +#include <asm/uaccess.h>
57263 +
57264 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57265 +static char gr_exec_arg_buf[132];
57266 +static DEFINE_MUTEX(gr_exec_arg_mutex);
57267 +#endif
57268 +
57269 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57270 +
57271 +void
57272 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57273 +{
57274 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57275 + char *grarg = gr_exec_arg_buf;
57276 + unsigned int i, x, execlen = 0;
57277 + char c;
57278 +
57279 + if (!((grsec_enable_execlog && grsec_enable_group &&
57280 + in_group_p(grsec_audit_gid))
57281 + || (grsec_enable_execlog && !grsec_enable_group)))
57282 + return;
57283 +
57284 + mutex_lock(&gr_exec_arg_mutex);
57285 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
57286 +
57287 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
57288 + const char __user *p;
57289 + unsigned int len;
57290 +
57291 + p = get_user_arg_ptr(argv, i);
57292 + if (IS_ERR(p))
57293 + goto log;
57294 +
57295 + len = strnlen_user(p, 128 - execlen);
57296 + if (len > 128 - execlen)
57297 + len = 128 - execlen;
57298 + else if (len > 0)
57299 + len--;
57300 + if (copy_from_user(grarg + execlen, p, len))
57301 + goto log;
57302 +
57303 + /* rewrite unprintable characters */
57304 + for (x = 0; x < len; x++) {
57305 + c = *(grarg + execlen + x);
57306 + if (c < 32 || c > 126)
57307 + *(grarg + execlen + x) = ' ';
57308 + }
57309 +
57310 + execlen += len;
57311 + *(grarg + execlen) = ' ';
57312 + *(grarg + execlen + 1) = '\0';
57313 + execlen++;
57314 + }
57315 +
57316 + log:
57317 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57318 + bprm->file->f_path.mnt, grarg);
57319 + mutex_unlock(&gr_exec_arg_mutex);
57320 +#endif
57321 + return;
57322 +}
57323 +
57324 +#ifdef CONFIG_GRKERNSEC
57325 +extern int gr_acl_is_capable(const int cap);
57326 +extern int gr_acl_is_capable_nolog(const int cap);
57327 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57328 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
57329 +extern int gr_chroot_is_capable(const int cap);
57330 +extern int gr_chroot_is_capable_nolog(const int cap);
57331 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57332 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
57333 +#endif
57334 +
57335 +const char *captab_log[] = {
57336 + "CAP_CHOWN",
57337 + "CAP_DAC_OVERRIDE",
57338 + "CAP_DAC_READ_SEARCH",
57339 + "CAP_FOWNER",
57340 + "CAP_FSETID",
57341 + "CAP_KILL",
57342 + "CAP_SETGID",
57343 + "CAP_SETUID",
57344 + "CAP_SETPCAP",
57345 + "CAP_LINUX_IMMUTABLE",
57346 + "CAP_NET_BIND_SERVICE",
57347 + "CAP_NET_BROADCAST",
57348 + "CAP_NET_ADMIN",
57349 + "CAP_NET_RAW",
57350 + "CAP_IPC_LOCK",
57351 + "CAP_IPC_OWNER",
57352 + "CAP_SYS_MODULE",
57353 + "CAP_SYS_RAWIO",
57354 + "CAP_SYS_CHROOT",
57355 + "CAP_SYS_PTRACE",
57356 + "CAP_SYS_PACCT",
57357 + "CAP_SYS_ADMIN",
57358 + "CAP_SYS_BOOT",
57359 + "CAP_SYS_NICE",
57360 + "CAP_SYS_RESOURCE",
57361 + "CAP_SYS_TIME",
57362 + "CAP_SYS_TTY_CONFIG",
57363 + "CAP_MKNOD",
57364 + "CAP_LEASE",
57365 + "CAP_AUDIT_WRITE",
57366 + "CAP_AUDIT_CONTROL",
57367 + "CAP_SETFCAP",
57368 + "CAP_MAC_OVERRIDE",
57369 + "CAP_MAC_ADMIN",
57370 + "CAP_SYSLOG",
57371 + "CAP_WAKE_ALARM"
57372 +};
57373 +
57374 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57375 +
57376 +int gr_is_capable(const int cap)
57377 +{
57378 +#ifdef CONFIG_GRKERNSEC
57379 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57380 + return 1;
57381 + return 0;
57382 +#else
57383 + return 1;
57384 +#endif
57385 +}
57386 +
57387 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57388 +{
57389 +#ifdef CONFIG_GRKERNSEC
57390 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
57391 + return 1;
57392 + return 0;
57393 +#else
57394 + return 1;
57395 +#endif
57396 +}
57397 +
57398 +int gr_is_capable_nolog(const int cap)
57399 +{
57400 +#ifdef CONFIG_GRKERNSEC
57401 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57402 + return 1;
57403 + return 0;
57404 +#else
57405 + return 1;
57406 +#endif
57407 +}
57408 +
57409 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
57410 +{
57411 +#ifdef CONFIG_GRKERNSEC
57412 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
57413 + return 1;
57414 + return 0;
57415 +#else
57416 + return 1;
57417 +#endif
57418 +}
57419 +
57420 +EXPORT_SYMBOL(gr_is_capable);
57421 +EXPORT_SYMBOL(gr_is_capable_nolog);
57422 +EXPORT_SYMBOL(gr_task_is_capable);
57423 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
57424 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57425 new file mode 100644
57426 index 0000000..d3ee748
57427 --- /dev/null
57428 +++ b/grsecurity/grsec_fifo.c
57429 @@ -0,0 +1,24 @@
57430 +#include <linux/kernel.h>
57431 +#include <linux/sched.h>
57432 +#include <linux/fs.h>
57433 +#include <linux/file.h>
57434 +#include <linux/grinternal.h>
57435 +
57436 +int
57437 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57438 + const struct dentry *dir, const int flag, const int acc_mode)
57439 +{
57440 +#ifdef CONFIG_GRKERNSEC_FIFO
57441 + const struct cred *cred = current_cred();
57442 +
57443 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57444 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57445 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57446 + (cred->fsuid != dentry->d_inode->i_uid)) {
57447 + if (!inode_permission(dentry->d_inode, acc_mode))
57448 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57449 + return -EACCES;
57450 + }
57451 +#endif
57452 + return 0;
57453 +}
57454 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57455 new file mode 100644
57456 index 0000000..8ca18bf
57457 --- /dev/null
57458 +++ b/grsecurity/grsec_fork.c
57459 @@ -0,0 +1,23 @@
57460 +#include <linux/kernel.h>
57461 +#include <linux/sched.h>
57462 +#include <linux/grsecurity.h>
57463 +#include <linux/grinternal.h>
57464 +#include <linux/errno.h>
57465 +
57466 +void
57467 +gr_log_forkfail(const int retval)
57468 +{
57469 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57470 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57471 + switch (retval) {
57472 + case -EAGAIN:
57473 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57474 + break;
57475 + case -ENOMEM:
57476 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57477 + break;
57478 + }
57479 + }
57480 +#endif
57481 + return;
57482 +}
57483 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57484 new file mode 100644
57485 index 0000000..01ddde4
57486 --- /dev/null
57487 +++ b/grsecurity/grsec_init.c
57488 @@ -0,0 +1,277 @@
57489 +#include <linux/kernel.h>
57490 +#include <linux/sched.h>
57491 +#include <linux/mm.h>
57492 +#include <linux/gracl.h>
57493 +#include <linux/slab.h>
57494 +#include <linux/vmalloc.h>
57495 +#include <linux/percpu.h>
57496 +#include <linux/module.h>
57497 +
57498 +int grsec_enable_ptrace_readexec;
57499 +int grsec_enable_setxid;
57500 +int grsec_enable_brute;
57501 +int grsec_enable_link;
57502 +int grsec_enable_dmesg;
57503 +int grsec_enable_harden_ptrace;
57504 +int grsec_enable_fifo;
57505 +int grsec_enable_execlog;
57506 +int grsec_enable_signal;
57507 +int grsec_enable_forkfail;
57508 +int grsec_enable_audit_ptrace;
57509 +int grsec_enable_time;
57510 +int grsec_enable_audit_textrel;
57511 +int grsec_enable_group;
57512 +int grsec_audit_gid;
57513 +int grsec_enable_chdir;
57514 +int grsec_enable_mount;
57515 +int grsec_enable_rofs;
57516 +int grsec_enable_chroot_findtask;
57517 +int grsec_enable_chroot_mount;
57518 +int grsec_enable_chroot_shmat;
57519 +int grsec_enable_chroot_fchdir;
57520 +int grsec_enable_chroot_double;
57521 +int grsec_enable_chroot_pivot;
57522 +int grsec_enable_chroot_chdir;
57523 +int grsec_enable_chroot_chmod;
57524 +int grsec_enable_chroot_mknod;
57525 +int grsec_enable_chroot_nice;
57526 +int grsec_enable_chroot_execlog;
57527 +int grsec_enable_chroot_caps;
57528 +int grsec_enable_chroot_sysctl;
57529 +int grsec_enable_chroot_unix;
57530 +int grsec_enable_tpe;
57531 +int grsec_tpe_gid;
57532 +int grsec_enable_blackhole;
57533 +#ifdef CONFIG_IPV6_MODULE
57534 +EXPORT_SYMBOL(grsec_enable_blackhole);
57535 +#endif
57536 +int grsec_lastack_retries;
57537 +int grsec_enable_tpe_all;
57538 +int grsec_enable_tpe_invert;
57539 +int grsec_enable_socket_all;
57540 +int grsec_socket_all_gid;
57541 +int grsec_enable_socket_client;
57542 +int grsec_socket_client_gid;
57543 +int grsec_enable_socket_server;
57544 +int grsec_socket_server_gid;
57545 +int grsec_resource_logging;
57546 +int grsec_disable_privio;
57547 +int grsec_enable_log_rwxmaps;
57548 +int grsec_lock;
57549 +
57550 +DEFINE_SPINLOCK(grsec_alert_lock);
57551 +unsigned long grsec_alert_wtime = 0;
57552 +unsigned long grsec_alert_fyet = 0;
57553 +
57554 +DEFINE_SPINLOCK(grsec_audit_lock);
57555 +
57556 +DEFINE_RWLOCK(grsec_exec_file_lock);
57557 +
57558 +char *gr_shared_page[4];
57559 +
57560 +char *gr_alert_log_fmt;
57561 +char *gr_audit_log_fmt;
57562 +char *gr_alert_log_buf;
57563 +char *gr_audit_log_buf;
57564 +
57565 +extern struct gr_arg *gr_usermode;
57566 +extern unsigned char *gr_system_salt;
57567 +extern unsigned char *gr_system_sum;
57568 +
57569 +void __init
57570 +grsecurity_init(void)
57571 +{
57572 + int j;
57573 + /* create the per-cpu shared pages */
57574 +
57575 +#ifdef CONFIG_X86
57576 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57577 +#endif
57578 +
57579 + for (j = 0; j < 4; j++) {
57580 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57581 + if (gr_shared_page[j] == NULL) {
57582 + panic("Unable to allocate grsecurity shared page");
57583 + return;
57584 + }
57585 + }
57586 +
57587 + /* allocate log buffers */
57588 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57589 + if (!gr_alert_log_fmt) {
57590 + panic("Unable to allocate grsecurity alert log format buffer");
57591 + return;
57592 + }
57593 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57594 + if (!gr_audit_log_fmt) {
57595 + panic("Unable to allocate grsecurity audit log format buffer");
57596 + return;
57597 + }
57598 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57599 + if (!gr_alert_log_buf) {
57600 + panic("Unable to allocate grsecurity alert log buffer");
57601 + return;
57602 + }
57603 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57604 + if (!gr_audit_log_buf) {
57605 + panic("Unable to allocate grsecurity audit log buffer");
57606 + return;
57607 + }
57608 +
57609 + /* allocate memory for authentication structure */
57610 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57611 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57612 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57613 +
57614 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57615 + panic("Unable to allocate grsecurity authentication structure");
57616 + return;
57617 + }
57618 +
57619 +
57620 +#ifdef CONFIG_GRKERNSEC_IO
57621 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57622 + grsec_disable_privio = 1;
57623 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57624 + grsec_disable_privio = 1;
57625 +#else
57626 + grsec_disable_privio = 0;
57627 +#endif
57628 +#endif
57629 +
57630 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57631 + /* for backward compatibility, tpe_invert always defaults to on if
57632 + enabled in the kernel
57633 + */
57634 + grsec_enable_tpe_invert = 1;
57635 +#endif
57636 +
57637 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57638 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57639 + grsec_lock = 1;
57640 +#endif
57641 +
57642 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57643 + grsec_enable_audit_textrel = 1;
57644 +#endif
57645 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57646 + grsec_enable_log_rwxmaps = 1;
57647 +#endif
57648 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57649 + grsec_enable_group = 1;
57650 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57651 +#endif
57652 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57653 + grsec_enable_ptrace_readexec = 1;
57654 +#endif
57655 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57656 + grsec_enable_chdir = 1;
57657 +#endif
57658 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57659 + grsec_enable_harden_ptrace = 1;
57660 +#endif
57661 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57662 + grsec_enable_mount = 1;
57663 +#endif
57664 +#ifdef CONFIG_GRKERNSEC_LINK
57665 + grsec_enable_link = 1;
57666 +#endif
57667 +#ifdef CONFIG_GRKERNSEC_BRUTE
57668 + grsec_enable_brute = 1;
57669 +#endif
57670 +#ifdef CONFIG_GRKERNSEC_DMESG
57671 + grsec_enable_dmesg = 1;
57672 +#endif
57673 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57674 + grsec_enable_blackhole = 1;
57675 + grsec_lastack_retries = 4;
57676 +#endif
57677 +#ifdef CONFIG_GRKERNSEC_FIFO
57678 + grsec_enable_fifo = 1;
57679 +#endif
57680 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57681 + grsec_enable_execlog = 1;
57682 +#endif
57683 +#ifdef CONFIG_GRKERNSEC_SETXID
57684 + grsec_enable_setxid = 1;
57685 +#endif
57686 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57687 + grsec_enable_signal = 1;
57688 +#endif
57689 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57690 + grsec_enable_forkfail = 1;
57691 +#endif
57692 +#ifdef CONFIG_GRKERNSEC_TIME
57693 + grsec_enable_time = 1;
57694 +#endif
57695 +#ifdef CONFIG_GRKERNSEC_RESLOG
57696 + grsec_resource_logging = 1;
57697 +#endif
57698 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57699 + grsec_enable_chroot_findtask = 1;
57700 +#endif
57701 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57702 + grsec_enable_chroot_unix = 1;
57703 +#endif
57704 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57705 + grsec_enable_chroot_mount = 1;
57706 +#endif
57707 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57708 + grsec_enable_chroot_fchdir = 1;
57709 +#endif
57710 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57711 + grsec_enable_chroot_shmat = 1;
57712 +#endif
57713 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57714 + grsec_enable_audit_ptrace = 1;
57715 +#endif
57716 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57717 + grsec_enable_chroot_double = 1;
57718 +#endif
57719 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57720 + grsec_enable_chroot_pivot = 1;
57721 +#endif
57722 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57723 + grsec_enable_chroot_chdir = 1;
57724 +#endif
57725 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57726 + grsec_enable_chroot_chmod = 1;
57727 +#endif
57728 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57729 + grsec_enable_chroot_mknod = 1;
57730 +#endif
57731 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57732 + grsec_enable_chroot_nice = 1;
57733 +#endif
57734 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57735 + grsec_enable_chroot_execlog = 1;
57736 +#endif
57737 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57738 + grsec_enable_chroot_caps = 1;
57739 +#endif
57740 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57741 + grsec_enable_chroot_sysctl = 1;
57742 +#endif
57743 +#ifdef CONFIG_GRKERNSEC_TPE
57744 + grsec_enable_tpe = 1;
57745 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57746 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57747 + grsec_enable_tpe_all = 1;
57748 +#endif
57749 +#endif
57750 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57751 + grsec_enable_socket_all = 1;
57752 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57753 +#endif
57754 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57755 + grsec_enable_socket_client = 1;
57756 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57757 +#endif
57758 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57759 + grsec_enable_socket_server = 1;
57760 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57761 +#endif
57762 +#endif
57763 +
57764 + return;
57765 +}
57766 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57767 new file mode 100644
57768 index 0000000..3efe141
57769 --- /dev/null
57770 +++ b/grsecurity/grsec_link.c
57771 @@ -0,0 +1,43 @@
57772 +#include <linux/kernel.h>
57773 +#include <linux/sched.h>
57774 +#include <linux/fs.h>
57775 +#include <linux/file.h>
57776 +#include <linux/grinternal.h>
57777 +
57778 +int
57779 +gr_handle_follow_link(const struct inode *parent,
57780 + const struct inode *inode,
57781 + const struct dentry *dentry, const struct vfsmount *mnt)
57782 +{
57783 +#ifdef CONFIG_GRKERNSEC_LINK
57784 + const struct cred *cred = current_cred();
57785 +
57786 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57787 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57788 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57789 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57790 + return -EACCES;
57791 + }
57792 +#endif
57793 + return 0;
57794 +}
57795 +
57796 +int
57797 +gr_handle_hardlink(const struct dentry *dentry,
57798 + const struct vfsmount *mnt,
57799 + struct inode *inode, const int mode, const char *to)
57800 +{
57801 +#ifdef CONFIG_GRKERNSEC_LINK
57802 + const struct cred *cred = current_cred();
57803 +
57804 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57805 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57806 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57807 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57808 + !capable(CAP_FOWNER) && cred->uid) {
57809 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57810 + return -EPERM;
57811 + }
57812 +#endif
57813 + return 0;
57814 +}
57815 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57816 new file mode 100644
57817 index 0000000..a45d2e9
57818 --- /dev/null
57819 +++ b/grsecurity/grsec_log.c
57820 @@ -0,0 +1,322 @@
57821 +#include <linux/kernel.h>
57822 +#include <linux/sched.h>
57823 +#include <linux/file.h>
57824 +#include <linux/tty.h>
57825 +#include <linux/fs.h>
57826 +#include <linux/grinternal.h>
57827 +
57828 +#ifdef CONFIG_TREE_PREEMPT_RCU
57829 +#define DISABLE_PREEMPT() preempt_disable()
57830 +#define ENABLE_PREEMPT() preempt_enable()
57831 +#else
57832 +#define DISABLE_PREEMPT()
57833 +#define ENABLE_PREEMPT()
57834 +#endif
57835 +
57836 +#define BEGIN_LOCKS(x) \
57837 + DISABLE_PREEMPT(); \
57838 + rcu_read_lock(); \
57839 + read_lock(&tasklist_lock); \
57840 + read_lock(&grsec_exec_file_lock); \
57841 + if (x != GR_DO_AUDIT) \
57842 + spin_lock(&grsec_alert_lock); \
57843 + else \
57844 + spin_lock(&grsec_audit_lock)
57845 +
57846 +#define END_LOCKS(x) \
57847 + if (x != GR_DO_AUDIT) \
57848 + spin_unlock(&grsec_alert_lock); \
57849 + else \
57850 + spin_unlock(&grsec_audit_lock); \
57851 + read_unlock(&grsec_exec_file_lock); \
57852 + read_unlock(&tasklist_lock); \
57853 + rcu_read_unlock(); \
57854 + ENABLE_PREEMPT(); \
57855 + if (x == GR_DONT_AUDIT) \
57856 + gr_handle_alertkill(current)
57857 +
57858 +enum {
57859 + FLOODING,
57860 + NO_FLOODING
57861 +};
57862 +
57863 +extern char *gr_alert_log_fmt;
57864 +extern char *gr_audit_log_fmt;
57865 +extern char *gr_alert_log_buf;
57866 +extern char *gr_audit_log_buf;
57867 +
57868 +static int gr_log_start(int audit)
57869 +{
57870 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57871 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57872 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57873 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57874 + unsigned long curr_secs = get_seconds();
57875 +
57876 + if (audit == GR_DO_AUDIT)
57877 + goto set_fmt;
57878 +
57879 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57880 + grsec_alert_wtime = curr_secs;
57881 + grsec_alert_fyet = 0;
57882 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57883 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57884 + grsec_alert_fyet++;
57885 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57886 + grsec_alert_wtime = curr_secs;
57887 + grsec_alert_fyet++;
57888 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57889 + return FLOODING;
57890 + }
57891 + else return FLOODING;
57892 +
57893 +set_fmt:
57894 +#endif
57895 + memset(buf, 0, PAGE_SIZE);
57896 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57897 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57898 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57899 + } else if (current->signal->curr_ip) {
57900 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57901 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57902 + } else if (gr_acl_is_enabled()) {
57903 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57904 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57905 + } else {
57906 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
57907 + strcpy(buf, fmt);
57908 + }
57909 +
57910 + return NO_FLOODING;
57911 +}
57912 +
57913 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57914 + __attribute__ ((format (printf, 2, 0)));
57915 +
57916 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57917 +{
57918 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57919 + unsigned int len = strlen(buf);
57920 +
57921 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57922 +
57923 + return;
57924 +}
57925 +
57926 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57927 + __attribute__ ((format (printf, 2, 3)));
57928 +
57929 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57930 +{
57931 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57932 + unsigned int len = strlen(buf);
57933 + va_list ap;
57934 +
57935 + va_start(ap, msg);
57936 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57937 + va_end(ap);
57938 +
57939 + return;
57940 +}
57941 +
57942 +static void gr_log_end(int audit, int append_default)
57943 +{
57944 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57945 +
57946 + if (append_default) {
57947 + unsigned int len = strlen(buf);
57948 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57949 + }
57950 +
57951 + printk("%s\n", buf);
57952 +
57953 + return;
57954 +}
57955 +
57956 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57957 +{
57958 + int logtype;
57959 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57960 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57961 + void *voidptr = NULL;
57962 + int num1 = 0, num2 = 0;
57963 + unsigned long ulong1 = 0, ulong2 = 0;
57964 + struct dentry *dentry = NULL;
57965 + struct vfsmount *mnt = NULL;
57966 + struct file *file = NULL;
57967 + struct task_struct *task = NULL;
57968 + const struct cred *cred, *pcred;
57969 + va_list ap;
57970 +
57971 + BEGIN_LOCKS(audit);
57972 + logtype = gr_log_start(audit);
57973 + if (logtype == FLOODING) {
57974 + END_LOCKS(audit);
57975 + return;
57976 + }
57977 + va_start(ap, argtypes);
57978 + switch (argtypes) {
57979 + case GR_TTYSNIFF:
57980 + task = va_arg(ap, struct task_struct *);
57981 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57982 + break;
57983 + case GR_SYSCTL_HIDDEN:
57984 + str1 = va_arg(ap, char *);
57985 + gr_log_middle_varargs(audit, msg, result, str1);
57986 + break;
57987 + case GR_RBAC:
57988 + dentry = va_arg(ap, struct dentry *);
57989 + mnt = va_arg(ap, struct vfsmount *);
57990 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57991 + break;
57992 + case GR_RBAC_STR:
57993 + dentry = va_arg(ap, struct dentry *);
57994 + mnt = va_arg(ap, struct vfsmount *);
57995 + str1 = va_arg(ap, char *);
57996 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57997 + break;
57998 + case GR_STR_RBAC:
57999 + str1 = va_arg(ap, char *);
58000 + dentry = va_arg(ap, struct dentry *);
58001 + mnt = va_arg(ap, struct vfsmount *);
58002 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58003 + break;
58004 + case GR_RBAC_MODE2:
58005 + dentry = va_arg(ap, struct dentry *);
58006 + mnt = va_arg(ap, struct vfsmount *);
58007 + str1 = va_arg(ap, char *);
58008 + str2 = va_arg(ap, char *);
58009 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58010 + break;
58011 + case GR_RBAC_MODE3:
58012 + dentry = va_arg(ap, struct dentry *);
58013 + mnt = va_arg(ap, struct vfsmount *);
58014 + str1 = va_arg(ap, char *);
58015 + str2 = va_arg(ap, char *);
58016 + str3 = va_arg(ap, char *);
58017 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58018 + break;
58019 + case GR_FILENAME:
58020 + dentry = va_arg(ap, struct dentry *);
58021 + mnt = va_arg(ap, struct vfsmount *);
58022 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58023 + break;
58024 + case GR_STR_FILENAME:
58025 + str1 = va_arg(ap, char *);
58026 + dentry = va_arg(ap, struct dentry *);
58027 + mnt = va_arg(ap, struct vfsmount *);
58028 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58029 + break;
58030 + case GR_FILENAME_STR:
58031 + dentry = va_arg(ap, struct dentry *);
58032 + mnt = va_arg(ap, struct vfsmount *);
58033 + str1 = va_arg(ap, char *);
58034 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58035 + break;
58036 + case GR_FILENAME_TWO_INT:
58037 + dentry = va_arg(ap, struct dentry *);
58038 + mnt = va_arg(ap, struct vfsmount *);
58039 + num1 = va_arg(ap, int);
58040 + num2 = va_arg(ap, int);
58041 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58042 + break;
58043 + case GR_FILENAME_TWO_INT_STR:
58044 + dentry = va_arg(ap, struct dentry *);
58045 + mnt = va_arg(ap, struct vfsmount *);
58046 + num1 = va_arg(ap, int);
58047 + num2 = va_arg(ap, int);
58048 + str1 = va_arg(ap, char *);
58049 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58050 + break;
58051 + case GR_TEXTREL:
58052 + file = va_arg(ap, struct file *);
58053 + ulong1 = va_arg(ap, unsigned long);
58054 + ulong2 = va_arg(ap, unsigned long);
58055 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58056 + break;
58057 + case GR_PTRACE:
58058 + task = va_arg(ap, struct task_struct *);
58059 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58060 + break;
58061 + case GR_RESOURCE:
58062 + task = va_arg(ap, struct task_struct *);
58063 + cred = __task_cred(task);
58064 + pcred = __task_cred(task->real_parent);
58065 + ulong1 = va_arg(ap, unsigned long);
58066 + str1 = va_arg(ap, char *);
58067 + ulong2 = va_arg(ap, unsigned long);
58068 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58069 + break;
58070 + case GR_CAP:
58071 + task = va_arg(ap, struct task_struct *);
58072 + cred = __task_cred(task);
58073 + pcred = __task_cred(task->real_parent);
58074 + str1 = va_arg(ap, char *);
58075 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58076 + break;
58077 + case GR_SIG:
58078 + str1 = va_arg(ap, char *);
58079 + voidptr = va_arg(ap, void *);
58080 + gr_log_middle_varargs(audit, msg, str1, voidptr);
58081 + break;
58082 + case GR_SIG2:
58083 + task = va_arg(ap, struct task_struct *);
58084 + cred = __task_cred(task);
58085 + pcred = __task_cred(task->real_parent);
58086 + num1 = va_arg(ap, int);
58087 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58088 + break;
58089 + case GR_CRASH1:
58090 + task = va_arg(ap, struct task_struct *);
58091 + cred = __task_cred(task);
58092 + pcred = __task_cred(task->real_parent);
58093 + ulong1 = va_arg(ap, unsigned long);
58094 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58095 + break;
58096 + case GR_CRASH2:
58097 + task = va_arg(ap, struct task_struct *);
58098 + cred = __task_cred(task);
58099 + pcred = __task_cred(task->real_parent);
58100 + ulong1 = va_arg(ap, unsigned long);
58101 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58102 + break;
58103 + case GR_RWXMAP:
58104 + file = va_arg(ap, struct file *);
58105 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58106 + break;
58107 + case GR_PSACCT:
58108 + {
58109 + unsigned int wday, cday;
58110 + __u8 whr, chr;
58111 + __u8 wmin, cmin;
58112 + __u8 wsec, csec;
58113 + char cur_tty[64] = { 0 };
58114 + char parent_tty[64] = { 0 };
58115 +
58116 + task = va_arg(ap, struct task_struct *);
58117 + wday = va_arg(ap, unsigned int);
58118 + cday = va_arg(ap, unsigned int);
58119 + whr = va_arg(ap, int);
58120 + chr = va_arg(ap, int);
58121 + wmin = va_arg(ap, int);
58122 + cmin = va_arg(ap, int);
58123 + wsec = va_arg(ap, int);
58124 + csec = va_arg(ap, int);
58125 + ulong1 = va_arg(ap, unsigned long);
58126 + cred = __task_cred(task);
58127 + pcred = __task_cred(task->real_parent);
58128 +
58129 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58130 + }
58131 + break;
58132 + default:
58133 + gr_log_middle(audit, msg, ap);
58134 + }
58135 + va_end(ap);
58136 + // these don't need DEFAULTSECARGS printed on the end
58137 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58138 + gr_log_end(audit, 0);
58139 + else
58140 + gr_log_end(audit, 1);
58141 + END_LOCKS(audit);
58142 +}
58143 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58144 new file mode 100644
58145 index 0000000..f536303
58146 --- /dev/null
58147 +++ b/grsecurity/grsec_mem.c
58148 @@ -0,0 +1,40 @@
58149 +#include <linux/kernel.h>
58150 +#include <linux/sched.h>
58151 +#include <linux/mm.h>
58152 +#include <linux/mman.h>
58153 +#include <linux/grinternal.h>
58154 +
58155 +void
58156 +gr_handle_ioperm(void)
58157 +{
58158 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58159 + return;
58160 +}
58161 +
58162 +void
58163 +gr_handle_iopl(void)
58164 +{
58165 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58166 + return;
58167 +}
58168 +
58169 +void
58170 +gr_handle_mem_readwrite(u64 from, u64 to)
58171 +{
58172 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58173 + return;
58174 +}
58175 +
58176 +void
58177 +gr_handle_vm86(void)
58178 +{
58179 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58180 + return;
58181 +}
58182 +
58183 +void
58184 +gr_log_badprocpid(const char *entry)
58185 +{
58186 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
58187 + return;
58188 +}
58189 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58190 new file mode 100644
58191 index 0000000..2131422
58192 --- /dev/null
58193 +++ b/grsecurity/grsec_mount.c
58194 @@ -0,0 +1,62 @@
58195 +#include <linux/kernel.h>
58196 +#include <linux/sched.h>
58197 +#include <linux/mount.h>
58198 +#include <linux/grsecurity.h>
58199 +#include <linux/grinternal.h>
58200 +
58201 +void
58202 +gr_log_remount(const char *devname, const int retval)
58203 +{
58204 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58205 + if (grsec_enable_mount && (retval >= 0))
58206 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58207 +#endif
58208 + return;
58209 +}
58210 +
58211 +void
58212 +gr_log_unmount(const char *devname, const int retval)
58213 +{
58214 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58215 + if (grsec_enable_mount && (retval >= 0))
58216 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58217 +#endif
58218 + return;
58219 +}
58220 +
58221 +void
58222 +gr_log_mount(const char *from, const char *to, const int retval)
58223 +{
58224 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58225 + if (grsec_enable_mount && (retval >= 0))
58226 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58227 +#endif
58228 + return;
58229 +}
58230 +
58231 +int
58232 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58233 +{
58234 +#ifdef CONFIG_GRKERNSEC_ROFS
58235 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58236 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58237 + return -EPERM;
58238 + } else
58239 + return 0;
58240 +#endif
58241 + return 0;
58242 +}
58243 +
58244 +int
58245 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58246 +{
58247 +#ifdef CONFIG_GRKERNSEC_ROFS
58248 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58249 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58250 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58251 + return -EPERM;
58252 + } else
58253 + return 0;
58254 +#endif
58255 + return 0;
58256 +}
58257 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58258 new file mode 100644
58259 index 0000000..a3b12a0
58260 --- /dev/null
58261 +++ b/grsecurity/grsec_pax.c
58262 @@ -0,0 +1,36 @@
58263 +#include <linux/kernel.h>
58264 +#include <linux/sched.h>
58265 +#include <linux/mm.h>
58266 +#include <linux/file.h>
58267 +#include <linux/grinternal.h>
58268 +#include <linux/grsecurity.h>
58269 +
58270 +void
58271 +gr_log_textrel(struct vm_area_struct * vma)
58272 +{
58273 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58274 + if (grsec_enable_audit_textrel)
58275 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58276 +#endif
58277 + return;
58278 +}
58279 +
58280 +void
58281 +gr_log_rwxmmap(struct file *file)
58282 +{
58283 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58284 + if (grsec_enable_log_rwxmaps)
58285 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58286 +#endif
58287 + return;
58288 +}
58289 +
58290 +void
58291 +gr_log_rwxmprotect(struct file *file)
58292 +{
58293 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58294 + if (grsec_enable_log_rwxmaps)
58295 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58296 +#endif
58297 + return;
58298 +}
58299 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58300 new file mode 100644
58301 index 0000000..f7f29aa
58302 --- /dev/null
58303 +++ b/grsecurity/grsec_ptrace.c
58304 @@ -0,0 +1,30 @@
58305 +#include <linux/kernel.h>
58306 +#include <linux/sched.h>
58307 +#include <linux/grinternal.h>
58308 +#include <linux/security.h>
58309 +
58310 +void
58311 +gr_audit_ptrace(struct task_struct *task)
58312 +{
58313 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58314 + if (grsec_enable_audit_ptrace)
58315 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58316 +#endif
58317 + return;
58318 +}
58319 +
58320 +int
58321 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
58322 +{
58323 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58324 + const struct dentry *dentry = file->f_path.dentry;
58325 + const struct vfsmount *mnt = file->f_path.mnt;
58326 +
58327 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
58328 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
58329 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
58330 + return -EACCES;
58331 + }
58332 +#endif
58333 + return 0;
58334 +}
58335 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58336 new file mode 100644
58337 index 0000000..7a5b2de
58338 --- /dev/null
58339 +++ b/grsecurity/grsec_sig.c
58340 @@ -0,0 +1,207 @@
58341 +#include <linux/kernel.h>
58342 +#include <linux/sched.h>
58343 +#include <linux/delay.h>
58344 +#include <linux/grsecurity.h>
58345 +#include <linux/grinternal.h>
58346 +#include <linux/hardirq.h>
58347 +
58348 +char *signames[] = {
58349 + [SIGSEGV] = "Segmentation fault",
58350 + [SIGILL] = "Illegal instruction",
58351 + [SIGABRT] = "Abort",
58352 + [SIGBUS] = "Invalid alignment/Bus error"
58353 +};
58354 +
58355 +void
58356 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58357 +{
58358 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58359 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58360 + (sig == SIGABRT) || (sig == SIGBUS))) {
58361 + if (t->pid == current->pid) {
58362 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58363 + } else {
58364 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58365 + }
58366 + }
58367 +#endif
58368 + return;
58369 +}
58370 +
58371 +int
58372 +gr_handle_signal(const struct task_struct *p, const int sig)
58373 +{
58374 +#ifdef CONFIG_GRKERNSEC
58375 + /* ignore the 0 signal for protected task checks */
58376 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
58377 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58378 + return -EPERM;
58379 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58380 + return -EPERM;
58381 + }
58382 +#endif
58383 + return 0;
58384 +}
58385 +
58386 +#ifdef CONFIG_GRKERNSEC
58387 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58388 +
58389 +int gr_fake_force_sig(int sig, struct task_struct *t)
58390 +{
58391 + unsigned long int flags;
58392 + int ret, blocked, ignored;
58393 + struct k_sigaction *action;
58394 +
58395 + spin_lock_irqsave(&t->sighand->siglock, flags);
58396 + action = &t->sighand->action[sig-1];
58397 + ignored = action->sa.sa_handler == SIG_IGN;
58398 + blocked = sigismember(&t->blocked, sig);
58399 + if (blocked || ignored) {
58400 + action->sa.sa_handler = SIG_DFL;
58401 + if (blocked) {
58402 + sigdelset(&t->blocked, sig);
58403 + recalc_sigpending_and_wake(t);
58404 + }
58405 + }
58406 + if (action->sa.sa_handler == SIG_DFL)
58407 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
58408 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58409 +
58410 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
58411 +
58412 + return ret;
58413 +}
58414 +#endif
58415 +
58416 +#ifdef CONFIG_GRKERNSEC_BRUTE
58417 +#define GR_USER_BAN_TIME (15 * 60)
58418 +
58419 +static int __get_dumpable(unsigned long mm_flags)
58420 +{
58421 + int ret;
58422 +
58423 + ret = mm_flags & MMF_DUMPABLE_MASK;
58424 + return (ret >= 2) ? 2 : ret;
58425 +}
58426 +#endif
58427 +
58428 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58429 +{
58430 +#ifdef CONFIG_GRKERNSEC_BRUTE
58431 + uid_t uid = 0;
58432 +
58433 + if (!grsec_enable_brute)
58434 + return;
58435 +
58436 + rcu_read_lock();
58437 + read_lock(&tasklist_lock);
58438 + read_lock(&grsec_exec_file_lock);
58439 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58440 + p->real_parent->brute = 1;
58441 + else {
58442 + const struct cred *cred = __task_cred(p), *cred2;
58443 + struct task_struct *tsk, *tsk2;
58444 +
58445 + if (!__get_dumpable(mm_flags) && cred->uid) {
58446 + struct user_struct *user;
58447 +
58448 + uid = cred->uid;
58449 +
58450 + /* this is put upon execution past expiration */
58451 + user = find_user(uid);
58452 + if (user == NULL)
58453 + goto unlock;
58454 + user->banned = 1;
58455 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58456 + if (user->ban_expires == ~0UL)
58457 + user->ban_expires--;
58458 +
58459 + do_each_thread(tsk2, tsk) {
58460 + cred2 = __task_cred(tsk);
58461 + if (tsk != p && cred2->uid == uid)
58462 + gr_fake_force_sig(SIGKILL, tsk);
58463 + } while_each_thread(tsk2, tsk);
58464 + }
58465 + }
58466 +unlock:
58467 + read_unlock(&grsec_exec_file_lock);
58468 + read_unlock(&tasklist_lock);
58469 + rcu_read_unlock();
58470 +
58471 + if (uid)
58472 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58473 +
58474 +#endif
58475 + return;
58476 +}
58477 +
58478 +void gr_handle_brute_check(void)
58479 +{
58480 +#ifdef CONFIG_GRKERNSEC_BRUTE
58481 + if (current->brute)
58482 + msleep(30 * 1000);
58483 +#endif
58484 + return;
58485 +}
58486 +
58487 +void gr_handle_kernel_exploit(void)
58488 +{
58489 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58490 + const struct cred *cred;
58491 + struct task_struct *tsk, *tsk2;
58492 + struct user_struct *user;
58493 + uid_t uid;
58494 +
58495 + if (in_irq() || in_serving_softirq() || in_nmi())
58496 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58497 +
58498 + uid = current_uid();
58499 +
58500 + if (uid == 0)
58501 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
58502 + else {
58503 + /* kill all the processes of this user, hold a reference
58504 + to their creds struct, and prevent them from creating
58505 + another process until system reset
58506 + */
58507 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58508 + /* we intentionally leak this ref */
58509 + user = get_uid(current->cred->user);
58510 + if (user) {
58511 + user->banned = 1;
58512 + user->ban_expires = ~0UL;
58513 + }
58514 +
58515 + read_lock(&tasklist_lock);
58516 + do_each_thread(tsk2, tsk) {
58517 + cred = __task_cred(tsk);
58518 + if (cred->uid == uid)
58519 + gr_fake_force_sig(SIGKILL, tsk);
58520 + } while_each_thread(tsk2, tsk);
58521 + read_unlock(&tasklist_lock);
58522 + }
58523 +#endif
58524 +}
58525 +
58526 +int __gr_process_user_ban(struct user_struct *user)
58527 +{
58528 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58529 + if (unlikely(user->banned)) {
58530 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58531 + user->banned = 0;
58532 + user->ban_expires = 0;
58533 + free_uid(user);
58534 + } else
58535 + return -EPERM;
58536 + }
58537 +#endif
58538 + return 0;
58539 +}
58540 +
58541 +int gr_process_user_ban(void)
58542 +{
58543 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58544 + return __gr_process_user_ban(current->cred->user);
58545 +#endif
58546 + return 0;
58547 +}
58548 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58549 new file mode 100644
58550 index 0000000..4030d57
58551 --- /dev/null
58552 +++ b/grsecurity/grsec_sock.c
58553 @@ -0,0 +1,244 @@
58554 +#include <linux/kernel.h>
58555 +#include <linux/module.h>
58556 +#include <linux/sched.h>
58557 +#include <linux/file.h>
58558 +#include <linux/net.h>
58559 +#include <linux/in.h>
58560 +#include <linux/ip.h>
58561 +#include <net/sock.h>
58562 +#include <net/inet_sock.h>
58563 +#include <linux/grsecurity.h>
58564 +#include <linux/grinternal.h>
58565 +#include <linux/gracl.h>
58566 +
58567 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58568 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58569 +
58570 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
58571 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
58572 +
58573 +#ifdef CONFIG_UNIX_MODULE
58574 +EXPORT_SYMBOL(gr_acl_handle_unix);
58575 +EXPORT_SYMBOL(gr_acl_handle_mknod);
58576 +EXPORT_SYMBOL(gr_handle_chroot_unix);
58577 +EXPORT_SYMBOL(gr_handle_create);
58578 +#endif
58579 +
58580 +#ifdef CONFIG_GRKERNSEC
58581 +#define gr_conn_table_size 32749
58582 +struct conn_table_entry {
58583 + struct conn_table_entry *next;
58584 + struct signal_struct *sig;
58585 +};
58586 +
58587 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58588 +DEFINE_SPINLOCK(gr_conn_table_lock);
58589 +
58590 +extern const char * gr_socktype_to_name(unsigned char type);
58591 +extern const char * gr_proto_to_name(unsigned char proto);
58592 +extern const char * gr_sockfamily_to_name(unsigned char family);
58593 +
58594 +static __inline__ int
58595 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58596 +{
58597 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58598 +}
58599 +
58600 +static __inline__ int
58601 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58602 + __u16 sport, __u16 dport)
58603 +{
58604 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58605 + sig->gr_sport == sport && sig->gr_dport == dport))
58606 + return 1;
58607 + else
58608 + return 0;
58609 +}
58610 +
58611 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58612 +{
58613 + struct conn_table_entry **match;
58614 + unsigned int index;
58615 +
58616 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58617 + sig->gr_sport, sig->gr_dport,
58618 + gr_conn_table_size);
58619 +
58620 + newent->sig = sig;
58621 +
58622 + match = &gr_conn_table[index];
58623 + newent->next = *match;
58624 + *match = newent;
58625 +
58626 + return;
58627 +}
58628 +
58629 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58630 +{
58631 + struct conn_table_entry *match, *last = NULL;
58632 + unsigned int index;
58633 +
58634 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58635 + sig->gr_sport, sig->gr_dport,
58636 + gr_conn_table_size);
58637 +
58638 + match = gr_conn_table[index];
58639 + while (match && !conn_match(match->sig,
58640 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58641 + sig->gr_dport)) {
58642 + last = match;
58643 + match = match->next;
58644 + }
58645 +
58646 + if (match) {
58647 + if (last)
58648 + last->next = match->next;
58649 + else
58650 + gr_conn_table[index] = NULL;
58651 + kfree(match);
58652 + }
58653 +
58654 + return;
58655 +}
58656 +
58657 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58658 + __u16 sport, __u16 dport)
58659 +{
58660 + struct conn_table_entry *match;
58661 + unsigned int index;
58662 +
58663 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58664 +
58665 + match = gr_conn_table[index];
58666 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58667 + match = match->next;
58668 +
58669 + if (match)
58670 + return match->sig;
58671 + else
58672 + return NULL;
58673 +}
58674 +
58675 +#endif
58676 +
58677 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58678 +{
58679 +#ifdef CONFIG_GRKERNSEC
58680 + struct signal_struct *sig = task->signal;
58681 + struct conn_table_entry *newent;
58682 +
58683 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58684 + if (newent == NULL)
58685 + return;
58686 + /* no bh lock needed since we are called with bh disabled */
58687 + spin_lock(&gr_conn_table_lock);
58688 + gr_del_task_from_ip_table_nolock(sig);
58689 + sig->gr_saddr = inet->inet_rcv_saddr;
58690 + sig->gr_daddr = inet->inet_daddr;
58691 + sig->gr_sport = inet->inet_sport;
58692 + sig->gr_dport = inet->inet_dport;
58693 + gr_add_to_task_ip_table_nolock(sig, newent);
58694 + spin_unlock(&gr_conn_table_lock);
58695 +#endif
58696 + return;
58697 +}
58698 +
58699 +void gr_del_task_from_ip_table(struct task_struct *task)
58700 +{
58701 +#ifdef CONFIG_GRKERNSEC
58702 + spin_lock_bh(&gr_conn_table_lock);
58703 + gr_del_task_from_ip_table_nolock(task->signal);
58704 + spin_unlock_bh(&gr_conn_table_lock);
58705 +#endif
58706 + return;
58707 +}
58708 +
58709 +void
58710 +gr_attach_curr_ip(const struct sock *sk)
58711 +{
58712 +#ifdef CONFIG_GRKERNSEC
58713 + struct signal_struct *p, *set;
58714 + const struct inet_sock *inet = inet_sk(sk);
58715 +
58716 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58717 + return;
58718 +
58719 + set = current->signal;
58720 +
58721 + spin_lock_bh(&gr_conn_table_lock);
58722 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58723 + inet->inet_dport, inet->inet_sport);
58724 + if (unlikely(p != NULL)) {
58725 + set->curr_ip = p->curr_ip;
58726 + set->used_accept = 1;
58727 + gr_del_task_from_ip_table_nolock(p);
58728 + spin_unlock_bh(&gr_conn_table_lock);
58729 + return;
58730 + }
58731 + spin_unlock_bh(&gr_conn_table_lock);
58732 +
58733 + set->curr_ip = inet->inet_daddr;
58734 + set->used_accept = 1;
58735 +#endif
58736 + return;
58737 +}
58738 +
58739 +int
58740 +gr_handle_sock_all(const int family, const int type, const int protocol)
58741 +{
58742 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58743 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58744 + (family != AF_UNIX)) {
58745 + if (family == AF_INET)
58746 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58747 + else
58748 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58749 + return -EACCES;
58750 + }
58751 +#endif
58752 + return 0;
58753 +}
58754 +
58755 +int
58756 +gr_handle_sock_server(const struct sockaddr *sck)
58757 +{
58758 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58759 + if (grsec_enable_socket_server &&
58760 + in_group_p(grsec_socket_server_gid) &&
58761 + sck && (sck->sa_family != AF_UNIX) &&
58762 + (sck->sa_family != AF_LOCAL)) {
58763 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58764 + return -EACCES;
58765 + }
58766 +#endif
58767 + return 0;
58768 +}
58769 +
58770 +int
58771 +gr_handle_sock_server_other(const struct sock *sck)
58772 +{
58773 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58774 + if (grsec_enable_socket_server &&
58775 + in_group_p(grsec_socket_server_gid) &&
58776 + sck && (sck->sk_family != AF_UNIX) &&
58777 + (sck->sk_family != AF_LOCAL)) {
58778 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58779 + return -EACCES;
58780 + }
58781 +#endif
58782 + return 0;
58783 +}
58784 +
58785 +int
58786 +gr_handle_sock_client(const struct sockaddr *sck)
58787 +{
58788 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58789 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58790 + sck && (sck->sa_family != AF_UNIX) &&
58791 + (sck->sa_family != AF_LOCAL)) {
58792 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58793 + return -EACCES;
58794 + }
58795 +#endif
58796 + return 0;
58797 +}
58798 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58799 new file mode 100644
58800 index 0000000..a1aedd7
58801 --- /dev/null
58802 +++ b/grsecurity/grsec_sysctl.c
58803 @@ -0,0 +1,451 @@
58804 +#include <linux/kernel.h>
58805 +#include <linux/sched.h>
58806 +#include <linux/sysctl.h>
58807 +#include <linux/grsecurity.h>
58808 +#include <linux/grinternal.h>
58809 +
58810 +int
58811 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58812 +{
58813 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58814 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58815 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58816 + return -EACCES;
58817 + }
58818 +#endif
58819 + return 0;
58820 +}
58821 +
58822 +#ifdef CONFIG_GRKERNSEC_ROFS
58823 +static int __maybe_unused one = 1;
58824 +#endif
58825 +
58826 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58827 +struct ctl_table grsecurity_table[] = {
58828 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58829 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58830 +#ifdef CONFIG_GRKERNSEC_IO
58831 + {
58832 + .procname = "disable_priv_io",
58833 + .data = &grsec_disable_privio,
58834 + .maxlen = sizeof(int),
58835 + .mode = 0600,
58836 + .proc_handler = &proc_dointvec,
58837 + },
58838 +#endif
58839 +#endif
58840 +#ifdef CONFIG_GRKERNSEC_LINK
58841 + {
58842 + .procname = "linking_restrictions",
58843 + .data = &grsec_enable_link,
58844 + .maxlen = sizeof(int),
58845 + .mode = 0600,
58846 + .proc_handler = &proc_dointvec,
58847 + },
58848 +#endif
58849 +#ifdef CONFIG_GRKERNSEC_BRUTE
58850 + {
58851 + .procname = "deter_bruteforce",
58852 + .data = &grsec_enable_brute,
58853 + .maxlen = sizeof(int),
58854 + .mode = 0600,
58855 + .proc_handler = &proc_dointvec,
58856 + },
58857 +#endif
58858 +#ifdef CONFIG_GRKERNSEC_FIFO
58859 + {
58860 + .procname = "fifo_restrictions",
58861 + .data = &grsec_enable_fifo,
58862 + .maxlen = sizeof(int),
58863 + .mode = 0600,
58864 + .proc_handler = &proc_dointvec,
58865 + },
58866 +#endif
58867 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58868 + {
58869 + .procname = "ptrace_readexec",
58870 + .data = &grsec_enable_ptrace_readexec,
58871 + .maxlen = sizeof(int),
58872 + .mode = 0600,
58873 + .proc_handler = &proc_dointvec,
58874 + },
58875 +#endif
58876 +#ifdef CONFIG_GRKERNSEC_SETXID
58877 + {
58878 + .procname = "consistent_setxid",
58879 + .data = &grsec_enable_setxid,
58880 + .maxlen = sizeof(int),
58881 + .mode = 0600,
58882 + .proc_handler = &proc_dointvec,
58883 + },
58884 +#endif
58885 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58886 + {
58887 + .procname = "ip_blackhole",
58888 + .data = &grsec_enable_blackhole,
58889 + .maxlen = sizeof(int),
58890 + .mode = 0600,
58891 + .proc_handler = &proc_dointvec,
58892 + },
58893 + {
58894 + .procname = "lastack_retries",
58895 + .data = &grsec_lastack_retries,
58896 + .maxlen = sizeof(int),
58897 + .mode = 0600,
58898 + .proc_handler = &proc_dointvec,
58899 + },
58900 +#endif
58901 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58902 + {
58903 + .procname = "exec_logging",
58904 + .data = &grsec_enable_execlog,
58905 + .maxlen = sizeof(int),
58906 + .mode = 0600,
58907 + .proc_handler = &proc_dointvec,
58908 + },
58909 +#endif
58910 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58911 + {
58912 + .procname = "rwxmap_logging",
58913 + .data = &grsec_enable_log_rwxmaps,
58914 + .maxlen = sizeof(int),
58915 + .mode = 0600,
58916 + .proc_handler = &proc_dointvec,
58917 + },
58918 +#endif
58919 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58920 + {
58921 + .procname = "signal_logging",
58922 + .data = &grsec_enable_signal,
58923 + .maxlen = sizeof(int),
58924 + .mode = 0600,
58925 + .proc_handler = &proc_dointvec,
58926 + },
58927 +#endif
58928 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58929 + {
58930 + .procname = "forkfail_logging",
58931 + .data = &grsec_enable_forkfail,
58932 + .maxlen = sizeof(int),
58933 + .mode = 0600,
58934 + .proc_handler = &proc_dointvec,
58935 + },
58936 +#endif
58937 +#ifdef CONFIG_GRKERNSEC_TIME
58938 + {
58939 + .procname = "timechange_logging",
58940 + .data = &grsec_enable_time,
58941 + .maxlen = sizeof(int),
58942 + .mode = 0600,
58943 + .proc_handler = &proc_dointvec,
58944 + },
58945 +#endif
58946 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58947 + {
58948 + .procname = "chroot_deny_shmat",
58949 + .data = &grsec_enable_chroot_shmat,
58950 + .maxlen = sizeof(int),
58951 + .mode = 0600,
58952 + .proc_handler = &proc_dointvec,
58953 + },
58954 +#endif
58955 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58956 + {
58957 + .procname = "chroot_deny_unix",
58958 + .data = &grsec_enable_chroot_unix,
58959 + .maxlen = sizeof(int),
58960 + .mode = 0600,
58961 + .proc_handler = &proc_dointvec,
58962 + },
58963 +#endif
58964 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58965 + {
58966 + .procname = "chroot_deny_mount",
58967 + .data = &grsec_enable_chroot_mount,
58968 + .maxlen = sizeof(int),
58969 + .mode = 0600,
58970 + .proc_handler = &proc_dointvec,
58971 + },
58972 +#endif
58973 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58974 + {
58975 + .procname = "chroot_deny_fchdir",
58976 + .data = &grsec_enable_chroot_fchdir,
58977 + .maxlen = sizeof(int),
58978 + .mode = 0600,
58979 + .proc_handler = &proc_dointvec,
58980 + },
58981 +#endif
58982 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58983 + {
58984 + .procname = "chroot_deny_chroot",
58985 + .data = &grsec_enable_chroot_double,
58986 + .maxlen = sizeof(int),
58987 + .mode = 0600,
58988 + .proc_handler = &proc_dointvec,
58989 + },
58990 +#endif
58991 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58992 + {
58993 + .procname = "chroot_deny_pivot",
58994 + .data = &grsec_enable_chroot_pivot,
58995 + .maxlen = sizeof(int),
58996 + .mode = 0600,
58997 + .proc_handler = &proc_dointvec,
58998 + },
58999 +#endif
59000 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59001 + {
59002 + .procname = "chroot_enforce_chdir",
59003 + .data = &grsec_enable_chroot_chdir,
59004 + .maxlen = sizeof(int),
59005 + .mode = 0600,
59006 + .proc_handler = &proc_dointvec,
59007 + },
59008 +#endif
59009 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59010 + {
59011 + .procname = "chroot_deny_chmod",
59012 + .data = &grsec_enable_chroot_chmod,
59013 + .maxlen = sizeof(int),
59014 + .mode = 0600,
59015 + .proc_handler = &proc_dointvec,
59016 + },
59017 +#endif
59018 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59019 + {
59020 + .procname = "chroot_deny_mknod",
59021 + .data = &grsec_enable_chroot_mknod,
59022 + .maxlen = sizeof(int),
59023 + .mode = 0600,
59024 + .proc_handler = &proc_dointvec,
59025 + },
59026 +#endif
59027 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59028 + {
59029 + .procname = "chroot_restrict_nice",
59030 + .data = &grsec_enable_chroot_nice,
59031 + .maxlen = sizeof(int),
59032 + .mode = 0600,
59033 + .proc_handler = &proc_dointvec,
59034 + },
59035 +#endif
59036 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59037 + {
59038 + .procname = "chroot_execlog",
59039 + .data = &grsec_enable_chroot_execlog,
59040 + .maxlen = sizeof(int),
59041 + .mode = 0600,
59042 + .proc_handler = &proc_dointvec,
59043 + },
59044 +#endif
59045 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59046 + {
59047 + .procname = "chroot_caps",
59048 + .data = &grsec_enable_chroot_caps,
59049 + .maxlen = sizeof(int),
59050 + .mode = 0600,
59051 + .proc_handler = &proc_dointvec,
59052 + },
59053 +#endif
59054 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59055 + {
59056 + .procname = "chroot_deny_sysctl",
59057 + .data = &grsec_enable_chroot_sysctl,
59058 + .maxlen = sizeof(int),
59059 + .mode = 0600,
59060 + .proc_handler = &proc_dointvec,
59061 + },
59062 +#endif
59063 +#ifdef CONFIG_GRKERNSEC_TPE
59064 + {
59065 + .procname = "tpe",
59066 + .data = &grsec_enable_tpe,
59067 + .maxlen = sizeof(int),
59068 + .mode = 0600,
59069 + .proc_handler = &proc_dointvec,
59070 + },
59071 + {
59072 + .procname = "tpe_gid",
59073 + .data = &grsec_tpe_gid,
59074 + .maxlen = sizeof(int),
59075 + .mode = 0600,
59076 + .proc_handler = &proc_dointvec,
59077 + },
59078 +#endif
59079 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59080 + {
59081 + .procname = "tpe_invert",
59082 + .data = &grsec_enable_tpe_invert,
59083 + .maxlen = sizeof(int),
59084 + .mode = 0600,
59085 + .proc_handler = &proc_dointvec,
59086 + },
59087 +#endif
59088 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59089 + {
59090 + .procname = "tpe_restrict_all",
59091 + .data = &grsec_enable_tpe_all,
59092 + .maxlen = sizeof(int),
59093 + .mode = 0600,
59094 + .proc_handler = &proc_dointvec,
59095 + },
59096 +#endif
59097 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59098 + {
59099 + .procname = "socket_all",
59100 + .data = &grsec_enable_socket_all,
59101 + .maxlen = sizeof(int),
59102 + .mode = 0600,
59103 + .proc_handler = &proc_dointvec,
59104 + },
59105 + {
59106 + .procname = "socket_all_gid",
59107 + .data = &grsec_socket_all_gid,
59108 + .maxlen = sizeof(int),
59109 + .mode = 0600,
59110 + .proc_handler = &proc_dointvec,
59111 + },
59112 +#endif
59113 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59114 + {
59115 + .procname = "socket_client",
59116 + .data = &grsec_enable_socket_client,
59117 + .maxlen = sizeof(int),
59118 + .mode = 0600,
59119 + .proc_handler = &proc_dointvec,
59120 + },
59121 + {
59122 + .procname = "socket_client_gid",
59123 + .data = &grsec_socket_client_gid,
59124 + .maxlen = sizeof(int),
59125 + .mode = 0600,
59126 + .proc_handler = &proc_dointvec,
59127 + },
59128 +#endif
59129 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59130 + {
59131 + .procname = "socket_server",
59132 + .data = &grsec_enable_socket_server,
59133 + .maxlen = sizeof(int),
59134 + .mode = 0600,
59135 + .proc_handler = &proc_dointvec,
59136 + },
59137 + {
59138 + .procname = "socket_server_gid",
59139 + .data = &grsec_socket_server_gid,
59140 + .maxlen = sizeof(int),
59141 + .mode = 0600,
59142 + .proc_handler = &proc_dointvec,
59143 + },
59144 +#endif
59145 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59146 + {
59147 + .procname = "audit_group",
59148 + .data = &grsec_enable_group,
59149 + .maxlen = sizeof(int),
59150 + .mode = 0600,
59151 + .proc_handler = &proc_dointvec,
59152 + },
59153 + {
59154 + .procname = "audit_gid",
59155 + .data = &grsec_audit_gid,
59156 + .maxlen = sizeof(int),
59157 + .mode = 0600,
59158 + .proc_handler = &proc_dointvec,
59159 + },
59160 +#endif
59161 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59162 + {
59163 + .procname = "audit_chdir",
59164 + .data = &grsec_enable_chdir,
59165 + .maxlen = sizeof(int),
59166 + .mode = 0600,
59167 + .proc_handler = &proc_dointvec,
59168 + },
59169 +#endif
59170 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59171 + {
59172 + .procname = "audit_mount",
59173 + .data = &grsec_enable_mount,
59174 + .maxlen = sizeof(int),
59175 + .mode = 0600,
59176 + .proc_handler = &proc_dointvec,
59177 + },
59178 +#endif
59179 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59180 + {
59181 + .procname = "audit_textrel",
59182 + .data = &grsec_enable_audit_textrel,
59183 + .maxlen = sizeof(int),
59184 + .mode = 0600,
59185 + .proc_handler = &proc_dointvec,
59186 + },
59187 +#endif
59188 +#ifdef CONFIG_GRKERNSEC_DMESG
59189 + {
59190 + .procname = "dmesg",
59191 + .data = &grsec_enable_dmesg,
59192 + .maxlen = sizeof(int),
59193 + .mode = 0600,
59194 + .proc_handler = &proc_dointvec,
59195 + },
59196 +#endif
59197 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59198 + {
59199 + .procname = "chroot_findtask",
59200 + .data = &grsec_enable_chroot_findtask,
59201 + .maxlen = sizeof(int),
59202 + .mode = 0600,
59203 + .proc_handler = &proc_dointvec,
59204 + },
59205 +#endif
59206 +#ifdef CONFIG_GRKERNSEC_RESLOG
59207 + {
59208 + .procname = "resource_logging",
59209 + .data = &grsec_resource_logging,
59210 + .maxlen = sizeof(int),
59211 + .mode = 0600,
59212 + .proc_handler = &proc_dointvec,
59213 + },
59214 +#endif
59215 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59216 + {
59217 + .procname = "audit_ptrace",
59218 + .data = &grsec_enable_audit_ptrace,
59219 + .maxlen = sizeof(int),
59220 + .mode = 0600,
59221 + .proc_handler = &proc_dointvec,
59222 + },
59223 +#endif
59224 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59225 + {
59226 + .procname = "harden_ptrace",
59227 + .data = &grsec_enable_harden_ptrace,
59228 + .maxlen = sizeof(int),
59229 + .mode = 0600,
59230 + .proc_handler = &proc_dointvec,
59231 + },
59232 +#endif
59233 + {
59234 + .procname = "grsec_lock",
59235 + .data = &grsec_lock,
59236 + .maxlen = sizeof(int),
59237 + .mode = 0600,
59238 + .proc_handler = &proc_dointvec,
59239 + },
59240 +#endif
59241 +#ifdef CONFIG_GRKERNSEC_ROFS
59242 + {
59243 + .procname = "romount_protect",
59244 + .data = &grsec_enable_rofs,
59245 + .maxlen = sizeof(int),
59246 + .mode = 0600,
59247 + .proc_handler = &proc_dointvec_minmax,
59248 + .extra1 = &one,
59249 + .extra2 = &one,
59250 + },
59251 +#endif
59252 + { }
59253 +};
59254 +#endif
59255 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59256 new file mode 100644
59257 index 0000000..0dc13c3
59258 --- /dev/null
59259 +++ b/grsecurity/grsec_time.c
59260 @@ -0,0 +1,16 @@
59261 +#include <linux/kernel.h>
59262 +#include <linux/sched.h>
59263 +#include <linux/grinternal.h>
59264 +#include <linux/module.h>
59265 +
59266 +void
59267 +gr_log_timechange(void)
59268 +{
59269 +#ifdef CONFIG_GRKERNSEC_TIME
59270 + if (grsec_enable_time)
59271 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59272 +#endif
59273 + return;
59274 +}
59275 +
59276 +EXPORT_SYMBOL(gr_log_timechange);
59277 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59278 new file mode 100644
59279 index 0000000..07e0dc0
59280 --- /dev/null
59281 +++ b/grsecurity/grsec_tpe.c
59282 @@ -0,0 +1,73 @@
59283 +#include <linux/kernel.h>
59284 +#include <linux/sched.h>
59285 +#include <linux/file.h>
59286 +#include <linux/fs.h>
59287 +#include <linux/grinternal.h>
59288 +
59289 +extern int gr_acl_tpe_check(void);
59290 +
59291 +int
59292 +gr_tpe_allow(const struct file *file)
59293 +{
59294 +#ifdef CONFIG_GRKERNSEC
59295 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59296 + const struct cred *cred = current_cred();
59297 + char *msg = NULL;
59298 + char *msg2 = NULL;
59299 +
59300 + // never restrict root
59301 + if (!cred->uid)
59302 + return 1;
59303 +
59304 + if (grsec_enable_tpe) {
59305 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59306 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
59307 + msg = "not being in trusted group";
59308 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
59309 + msg = "being in untrusted group";
59310 +#else
59311 + if (in_group_p(grsec_tpe_gid))
59312 + msg = "being in untrusted group";
59313 +#endif
59314 + }
59315 + if (!msg && gr_acl_tpe_check())
59316 + msg = "being in untrusted role";
59317 +
59318 + // not in any affected group/role
59319 + if (!msg)
59320 + goto next_check;
59321 +
59322 + if (inode->i_uid)
59323 + msg2 = "file in non-root-owned directory";
59324 + else if (inode->i_mode & S_IWOTH)
59325 + msg2 = "file in world-writable directory";
59326 + else if (inode->i_mode & S_IWGRP)
59327 + msg2 = "file in group-writable directory";
59328 +
59329 + if (msg && msg2) {
59330 + char fullmsg[70] = {0};
59331 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
59332 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
59333 + return 0;
59334 + }
59335 + msg = NULL;
59336 +next_check:
59337 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59338 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
59339 + return 1;
59340 +
59341 + if (inode->i_uid && (inode->i_uid != cred->uid))
59342 + msg = "directory not owned by user";
59343 + else if (inode->i_mode & S_IWOTH)
59344 + msg = "file in world-writable directory";
59345 + else if (inode->i_mode & S_IWGRP)
59346 + msg = "file in group-writable directory";
59347 +
59348 + if (msg) {
59349 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
59350 + return 0;
59351 + }
59352 +#endif
59353 +#endif
59354 + return 1;
59355 +}
59356 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59357 new file mode 100644
59358 index 0000000..9f7b1ac
59359 --- /dev/null
59360 +++ b/grsecurity/grsum.c
59361 @@ -0,0 +1,61 @@
59362 +#include <linux/err.h>
59363 +#include <linux/kernel.h>
59364 +#include <linux/sched.h>
59365 +#include <linux/mm.h>
59366 +#include <linux/scatterlist.h>
59367 +#include <linux/crypto.h>
59368 +#include <linux/gracl.h>
59369 +
59370 +
59371 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59372 +#error "crypto and sha256 must be built into the kernel"
59373 +#endif
59374 +
59375 +int
59376 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59377 +{
59378 + char *p;
59379 + struct crypto_hash *tfm;
59380 + struct hash_desc desc;
59381 + struct scatterlist sg;
59382 + unsigned char temp_sum[GR_SHA_LEN];
59383 + volatile int retval = 0;
59384 + volatile int dummy = 0;
59385 + unsigned int i;
59386 +
59387 + sg_init_table(&sg, 1);
59388 +
59389 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59390 + if (IS_ERR(tfm)) {
59391 + /* should never happen, since sha256 should be built in */
59392 + return 1;
59393 + }
59394 +
59395 + desc.tfm = tfm;
59396 + desc.flags = 0;
59397 +
59398 + crypto_hash_init(&desc);
59399 +
59400 + p = salt;
59401 + sg_set_buf(&sg, p, GR_SALT_LEN);
59402 + crypto_hash_update(&desc, &sg, sg.length);
59403 +
59404 + p = entry->pw;
59405 + sg_set_buf(&sg, p, strlen(p));
59406 +
59407 + crypto_hash_update(&desc, &sg, sg.length);
59408 +
59409 + crypto_hash_final(&desc, temp_sum);
59410 +
59411 + memset(entry->pw, 0, GR_PW_LEN);
59412 +
59413 + for (i = 0; i < GR_SHA_LEN; i++)
59414 + if (sum[i] != temp_sum[i])
59415 + retval = 1;
59416 + else
59417 + dummy = 1; // waste a cycle
59418 +
59419 + crypto_free_hash(tfm);
59420 +
59421 + return retval;
59422 +}
59423 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59424 index 6cd5b64..f620d2d 100644
59425 --- a/include/acpi/acpi_bus.h
59426 +++ b/include/acpi/acpi_bus.h
59427 @@ -107,7 +107,7 @@ struct acpi_device_ops {
59428 acpi_op_bind bind;
59429 acpi_op_unbind unbind;
59430 acpi_op_notify notify;
59431 -};
59432 +} __no_const;
59433
59434 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59435
59436 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59437 index b7babf0..71e4e74 100644
59438 --- a/include/asm-generic/atomic-long.h
59439 +++ b/include/asm-generic/atomic-long.h
59440 @@ -22,6 +22,12 @@
59441
59442 typedef atomic64_t atomic_long_t;
59443
59444 +#ifdef CONFIG_PAX_REFCOUNT
59445 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
59446 +#else
59447 +typedef atomic64_t atomic_long_unchecked_t;
59448 +#endif
59449 +
59450 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59451
59452 static inline long atomic_long_read(atomic_long_t *l)
59453 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59454 return (long)atomic64_read(v);
59455 }
59456
59457 +#ifdef CONFIG_PAX_REFCOUNT
59458 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59459 +{
59460 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59461 +
59462 + return (long)atomic64_read_unchecked(v);
59463 +}
59464 +#endif
59465 +
59466 static inline void atomic_long_set(atomic_long_t *l, long i)
59467 {
59468 atomic64_t *v = (atomic64_t *)l;
59469 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59470 atomic64_set(v, i);
59471 }
59472
59473 +#ifdef CONFIG_PAX_REFCOUNT
59474 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59475 +{
59476 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59477 +
59478 + atomic64_set_unchecked(v, i);
59479 +}
59480 +#endif
59481 +
59482 static inline void atomic_long_inc(atomic_long_t *l)
59483 {
59484 atomic64_t *v = (atomic64_t *)l;
59485 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59486 atomic64_inc(v);
59487 }
59488
59489 +#ifdef CONFIG_PAX_REFCOUNT
59490 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59491 +{
59492 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59493 +
59494 + atomic64_inc_unchecked(v);
59495 +}
59496 +#endif
59497 +
59498 static inline void atomic_long_dec(atomic_long_t *l)
59499 {
59500 atomic64_t *v = (atomic64_t *)l;
59501 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59502 atomic64_dec(v);
59503 }
59504
59505 +#ifdef CONFIG_PAX_REFCOUNT
59506 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59507 +{
59508 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59509 +
59510 + atomic64_dec_unchecked(v);
59511 +}
59512 +#endif
59513 +
59514 static inline void atomic_long_add(long i, atomic_long_t *l)
59515 {
59516 atomic64_t *v = (atomic64_t *)l;
59517 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59518 atomic64_add(i, v);
59519 }
59520
59521 +#ifdef CONFIG_PAX_REFCOUNT
59522 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59523 +{
59524 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59525 +
59526 + atomic64_add_unchecked(i, v);
59527 +}
59528 +#endif
59529 +
59530 static inline void atomic_long_sub(long i, atomic_long_t *l)
59531 {
59532 atomic64_t *v = (atomic64_t *)l;
59533 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59534 atomic64_sub(i, v);
59535 }
59536
59537 +#ifdef CONFIG_PAX_REFCOUNT
59538 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59539 +{
59540 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59541 +
59542 + atomic64_sub_unchecked(i, v);
59543 +}
59544 +#endif
59545 +
59546 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59547 {
59548 atomic64_t *v = (atomic64_t *)l;
59549 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59550 return (long)atomic64_inc_return(v);
59551 }
59552
59553 +#ifdef CONFIG_PAX_REFCOUNT
59554 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59555 +{
59556 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59557 +
59558 + return (long)atomic64_inc_return_unchecked(v);
59559 +}
59560 +#endif
59561 +
59562 static inline long atomic_long_dec_return(atomic_long_t *l)
59563 {
59564 atomic64_t *v = (atomic64_t *)l;
59565 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59566
59567 typedef atomic_t atomic_long_t;
59568
59569 +#ifdef CONFIG_PAX_REFCOUNT
59570 +typedef atomic_unchecked_t atomic_long_unchecked_t;
59571 +#else
59572 +typedef atomic_t atomic_long_unchecked_t;
59573 +#endif
59574 +
59575 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59576 static inline long atomic_long_read(atomic_long_t *l)
59577 {
59578 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59579 return (long)atomic_read(v);
59580 }
59581
59582 +#ifdef CONFIG_PAX_REFCOUNT
59583 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59584 +{
59585 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59586 +
59587 + return (long)atomic_read_unchecked(v);
59588 +}
59589 +#endif
59590 +
59591 static inline void atomic_long_set(atomic_long_t *l, long i)
59592 {
59593 atomic_t *v = (atomic_t *)l;
59594 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59595 atomic_set(v, i);
59596 }
59597
59598 +#ifdef CONFIG_PAX_REFCOUNT
59599 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59600 +{
59601 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59602 +
59603 + atomic_set_unchecked(v, i);
59604 +}
59605 +#endif
59606 +
59607 static inline void atomic_long_inc(atomic_long_t *l)
59608 {
59609 atomic_t *v = (atomic_t *)l;
59610 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59611 atomic_inc(v);
59612 }
59613
59614 +#ifdef CONFIG_PAX_REFCOUNT
59615 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59616 +{
59617 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59618 +
59619 + atomic_inc_unchecked(v);
59620 +}
59621 +#endif
59622 +
59623 static inline void atomic_long_dec(atomic_long_t *l)
59624 {
59625 atomic_t *v = (atomic_t *)l;
59626 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59627 atomic_dec(v);
59628 }
59629
59630 +#ifdef CONFIG_PAX_REFCOUNT
59631 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59632 +{
59633 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59634 +
59635 + atomic_dec_unchecked(v);
59636 +}
59637 +#endif
59638 +
59639 static inline void atomic_long_add(long i, atomic_long_t *l)
59640 {
59641 atomic_t *v = (atomic_t *)l;
59642 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59643 atomic_add(i, v);
59644 }
59645
59646 +#ifdef CONFIG_PAX_REFCOUNT
59647 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59648 +{
59649 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59650 +
59651 + atomic_add_unchecked(i, v);
59652 +}
59653 +#endif
59654 +
59655 static inline void atomic_long_sub(long i, atomic_long_t *l)
59656 {
59657 atomic_t *v = (atomic_t *)l;
59658 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59659 atomic_sub(i, v);
59660 }
59661
59662 +#ifdef CONFIG_PAX_REFCOUNT
59663 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59664 +{
59665 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59666 +
59667 + atomic_sub_unchecked(i, v);
59668 +}
59669 +#endif
59670 +
59671 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59672 {
59673 atomic_t *v = (atomic_t *)l;
59674 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59675 return (long)atomic_inc_return(v);
59676 }
59677
59678 +#ifdef CONFIG_PAX_REFCOUNT
59679 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59680 +{
59681 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59682 +
59683 + return (long)atomic_inc_return_unchecked(v);
59684 +}
59685 +#endif
59686 +
59687 static inline long atomic_long_dec_return(atomic_long_t *l)
59688 {
59689 atomic_t *v = (atomic_t *)l;
59690 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59691
59692 #endif /* BITS_PER_LONG == 64 */
59693
59694 +#ifdef CONFIG_PAX_REFCOUNT
59695 +static inline void pax_refcount_needs_these_functions(void)
59696 +{
59697 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59698 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59699 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59700 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59701 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59702 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59703 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59704 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59705 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59706 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59707 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59708 +
59709 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59710 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59711 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59712 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59713 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59714 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59715 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59716 +}
59717 +#else
59718 +#define atomic_read_unchecked(v) atomic_read(v)
59719 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59720 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59721 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59722 +#define atomic_inc_unchecked(v) atomic_inc(v)
59723 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59724 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59725 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59726 +#define atomic_dec_unchecked(v) atomic_dec(v)
59727 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59728 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59729 +
59730 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59731 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59732 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59733 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59734 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59735 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59736 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59737 +#endif
59738 +
59739 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59740 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59741 index b18ce4f..2ee2843 100644
59742 --- a/include/asm-generic/atomic64.h
59743 +++ b/include/asm-generic/atomic64.h
59744 @@ -16,6 +16,8 @@ typedef struct {
59745 long long counter;
59746 } atomic64_t;
59747
59748 +typedef atomic64_t atomic64_unchecked_t;
59749 +
59750 #define ATOMIC64_INIT(i) { (i) }
59751
59752 extern long long atomic64_read(const atomic64_t *v);
59753 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59754 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59755 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59756
59757 +#define atomic64_read_unchecked(v) atomic64_read(v)
59758 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59759 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59760 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59761 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59762 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59763 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59764 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59765 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59766 +
59767 #endif /* _ASM_GENERIC_ATOMIC64_H */
59768 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59769 index 1bfcfe5..e04c5c9 100644
59770 --- a/include/asm-generic/cache.h
59771 +++ b/include/asm-generic/cache.h
59772 @@ -6,7 +6,7 @@
59773 * cache lines need to provide their own cache.h.
59774 */
59775
59776 -#define L1_CACHE_SHIFT 5
59777 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59778 +#define L1_CACHE_SHIFT 5UL
59779 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59780
59781 #endif /* __ASM_GENERIC_CACHE_H */
59782 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59783 index 0d68a1e..b74a761 100644
59784 --- a/include/asm-generic/emergency-restart.h
59785 +++ b/include/asm-generic/emergency-restart.h
59786 @@ -1,7 +1,7 @@
59787 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59788 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59789
59790 -static inline void machine_emergency_restart(void)
59791 +static inline __noreturn void machine_emergency_restart(void)
59792 {
59793 machine_restart(NULL);
59794 }
59795 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59796 index 0232ccb..13d9165 100644
59797 --- a/include/asm-generic/kmap_types.h
59798 +++ b/include/asm-generic/kmap_types.h
59799 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59800 KMAP_D(17) KM_NMI,
59801 KMAP_D(18) KM_NMI_PTE,
59802 KMAP_D(19) KM_KDB,
59803 +KMAP_D(20) KM_CLEARPAGE,
59804 /*
59805 * Remember to update debug_kmap_atomic() when adding new kmap types!
59806 */
59807 -KMAP_D(20) KM_TYPE_NR
59808 +KMAP_D(21) KM_TYPE_NR
59809 };
59810
59811 #undef KMAP_D
59812 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59813 index 9ceb03b..2efbcbd 100644
59814 --- a/include/asm-generic/local.h
59815 +++ b/include/asm-generic/local.h
59816 @@ -39,6 +39,7 @@ typedef struct
59817 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59818 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59819 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59820 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59821
59822 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59823 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59824 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59825 index 725612b..9cc513a 100644
59826 --- a/include/asm-generic/pgtable-nopmd.h
59827 +++ b/include/asm-generic/pgtable-nopmd.h
59828 @@ -1,14 +1,19 @@
59829 #ifndef _PGTABLE_NOPMD_H
59830 #define _PGTABLE_NOPMD_H
59831
59832 -#ifndef __ASSEMBLY__
59833 -
59834 #include <asm-generic/pgtable-nopud.h>
59835
59836 -struct mm_struct;
59837 -
59838 #define __PAGETABLE_PMD_FOLDED
59839
59840 +#define PMD_SHIFT PUD_SHIFT
59841 +#define PTRS_PER_PMD 1
59842 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59843 +#define PMD_MASK (~(PMD_SIZE-1))
59844 +
59845 +#ifndef __ASSEMBLY__
59846 +
59847 +struct mm_struct;
59848 +
59849 /*
59850 * Having the pmd type consist of a pud gets the size right, and allows
59851 * us to conceptually access the pud entry that this pmd is folded into
59852 @@ -16,11 +21,6 @@ struct mm_struct;
59853 */
59854 typedef struct { pud_t pud; } pmd_t;
59855
59856 -#define PMD_SHIFT PUD_SHIFT
59857 -#define PTRS_PER_PMD 1
59858 -#define PMD_SIZE (1UL << PMD_SHIFT)
59859 -#define PMD_MASK (~(PMD_SIZE-1))
59860 -
59861 /*
59862 * The "pud_xxx()" functions here are trivial for a folded two-level
59863 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59864 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59865 index 810431d..0ec4804f 100644
59866 --- a/include/asm-generic/pgtable-nopud.h
59867 +++ b/include/asm-generic/pgtable-nopud.h
59868 @@ -1,10 +1,15 @@
59869 #ifndef _PGTABLE_NOPUD_H
59870 #define _PGTABLE_NOPUD_H
59871
59872 -#ifndef __ASSEMBLY__
59873 -
59874 #define __PAGETABLE_PUD_FOLDED
59875
59876 +#define PUD_SHIFT PGDIR_SHIFT
59877 +#define PTRS_PER_PUD 1
59878 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59879 +#define PUD_MASK (~(PUD_SIZE-1))
59880 +
59881 +#ifndef __ASSEMBLY__
59882 +
59883 /*
59884 * Having the pud type consist of a pgd gets the size right, and allows
59885 * us to conceptually access the pgd entry that this pud is folded into
59886 @@ -12,11 +17,6 @@
59887 */
59888 typedef struct { pgd_t pgd; } pud_t;
59889
59890 -#define PUD_SHIFT PGDIR_SHIFT
59891 -#define PTRS_PER_PUD 1
59892 -#define PUD_SIZE (1UL << PUD_SHIFT)
59893 -#define PUD_MASK (~(PUD_SIZE-1))
59894 -
59895 /*
59896 * The "pgd_xxx()" functions here are trivial for a folded two-level
59897 * setup: the pud is never bad, and a pud always exists (as it's folded
59898 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59899 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59900
59901 #define pgd_populate(mm, pgd, pud) do { } while (0)
59902 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59903 /*
59904 * (puds are folded into pgds so this doesn't get actually called,
59905 * but the define is needed for a generic inline function.)
59906 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59907 index a03c098..19751cf 100644
59908 --- a/include/asm-generic/pgtable.h
59909 +++ b/include/asm-generic/pgtable.h
59910 @@ -445,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
59911 #endif /* __HAVE_ARCH_PMD_WRITE */
59912 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
59913
59914 +#ifndef __HAVE_ARCH_READ_PMD_ATOMIC
59915 +static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
59916 +{
59917 + /*
59918 + * Depend on compiler for an atomic pmd read. NOTE: this is
59919 + * only going to work, if the pmdval_t isn't larger than
59920 + * an unsigned long.
59921 + */
59922 + return *pmdp;
59923 +}
59924 +#endif /* __HAVE_ARCH_READ_PMD_ATOMIC */
59925 +
59926 /*
59927 * This function is meant to be used by sites walking pagetables with
59928 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
59929 @@ -458,11 +470,17 @@ static inline int pmd_write(pmd_t pmd)
59930 * undefined so behaving like if the pmd was none is safe (because it
59931 * can return none anyway). The compiler level barrier() is critically
59932 * important to compute the two checks atomically on the same pmdval.
59933 + *
59934 + * For 32bit kernels with a 64bit large pmd_t this automatically takes
59935 + * care of reading the pmd atomically to avoid SMP race conditions
59936 + * against pmd_populate() when the mmap_sem is hold for reading by the
59937 + * caller (a special atomic read not done by "gcc" as in the generic
59938 + * version above, is also needed when THP is disabled because the page
59939 + * fault can populate the pmd from under us).
59940 */
59941 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
59942 {
59943 - /* depend on compiler for an atomic pmd read */
59944 - pmd_t pmdval = *pmd;
59945 + pmd_t pmdval = read_pmd_atomic(pmd);
59946 /*
59947 * The barrier will stabilize the pmdval in a register or on
59948 * the stack so that it will stop changing under the code.
59949 @@ -502,6 +520,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59950 #endif
59951 }
59952
59953 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59954 +static inline unsigned long pax_open_kernel(void) { return 0; }
59955 +#endif
59956 +
59957 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59958 +static inline unsigned long pax_close_kernel(void) { return 0; }
59959 +#endif
59960 +
59961 #endif /* CONFIG_MMU */
59962
59963 #endif /* !__ASSEMBLY__ */
59964 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59965 index b5e2e4c..6a5373e 100644
59966 --- a/include/asm-generic/vmlinux.lds.h
59967 +++ b/include/asm-generic/vmlinux.lds.h
59968 @@ -217,6 +217,7 @@
59969 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59970 VMLINUX_SYMBOL(__start_rodata) = .; \
59971 *(.rodata) *(.rodata.*) \
59972 + *(.data..read_only) \
59973 *(__vermagic) /* Kernel version magic */ \
59974 . = ALIGN(8); \
59975 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59976 @@ -722,17 +723,18 @@
59977 * section in the linker script will go there too. @phdr should have
59978 * a leading colon.
59979 *
59980 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59981 + * Note that this macros defines per_cpu_load as an absolute symbol.
59982 * If there is no need to put the percpu section at a predetermined
59983 * address, use PERCPU_SECTION.
59984 */
59985 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59986 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59987 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59988 + per_cpu_load = .; \
59989 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59990 - LOAD_OFFSET) { \
59991 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59992 PERCPU_INPUT(cacheline) \
59993 } phdr \
59994 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59995 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59996
59997 /**
59998 * PERCPU_SECTION - define output section for percpu area, simple version
59999 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
60000 index 92f0981..d44a37c 100644
60001 --- a/include/drm/drmP.h
60002 +++ b/include/drm/drmP.h
60003 @@ -72,6 +72,7 @@
60004 #include <linux/workqueue.h>
60005 #include <linux/poll.h>
60006 #include <asm/pgalloc.h>
60007 +#include <asm/local.h>
60008 #include "drm.h"
60009
60010 #include <linux/idr.h>
60011 @@ -1038,7 +1039,7 @@ struct drm_device {
60012
60013 /** \name Usage Counters */
60014 /*@{ */
60015 - int open_count; /**< Outstanding files open */
60016 + local_t open_count; /**< Outstanding files open */
60017 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60018 atomic_t vma_count; /**< Outstanding vma areas open */
60019 int buf_use; /**< Buffers in use -- cannot alloc */
60020 @@ -1049,7 +1050,7 @@ struct drm_device {
60021 /*@{ */
60022 unsigned long counters;
60023 enum drm_stat_type types[15];
60024 - atomic_t counts[15];
60025 + atomic_unchecked_t counts[15];
60026 /*@} */
60027
60028 struct list_head filelist;
60029 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60030 index 37515d1..34fa8b0 100644
60031 --- a/include/drm/drm_crtc_helper.h
60032 +++ b/include/drm/drm_crtc_helper.h
60033 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60034
60035 /* disable crtc when not in use - more explicit than dpms off */
60036 void (*disable)(struct drm_crtc *crtc);
60037 -};
60038 +} __no_const;
60039
60040 struct drm_encoder_helper_funcs {
60041 void (*dpms)(struct drm_encoder *encoder, int mode);
60042 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60043 struct drm_connector *connector);
60044 /* disable encoder when not in use - more explicit than dpms off */
60045 void (*disable)(struct drm_encoder *encoder);
60046 -};
60047 +} __no_const;
60048
60049 struct drm_connector_helper_funcs {
60050 int (*get_modes)(struct drm_connector *connector);
60051 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60052 index 26c1f78..6722682 100644
60053 --- a/include/drm/ttm/ttm_memory.h
60054 +++ b/include/drm/ttm/ttm_memory.h
60055 @@ -47,7 +47,7 @@
60056
60057 struct ttm_mem_shrink {
60058 int (*do_shrink) (struct ttm_mem_shrink *);
60059 -};
60060 +} __no_const;
60061
60062 /**
60063 * struct ttm_mem_global - Global memory accounting structure.
60064 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60065 index e86dfca..40cc55f 100644
60066 --- a/include/linux/a.out.h
60067 +++ b/include/linux/a.out.h
60068 @@ -39,6 +39,14 @@ enum machine_type {
60069 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60070 };
60071
60072 +/* Constants for the N_FLAGS field */
60073 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60074 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60075 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60076 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60077 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60078 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60079 +
60080 #if !defined (N_MAGIC)
60081 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60082 #endif
60083 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60084 index f4ff882..84b53a6 100644
60085 --- a/include/linux/atmdev.h
60086 +++ b/include/linux/atmdev.h
60087 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60088 #endif
60089
60090 struct k_atm_aal_stats {
60091 -#define __HANDLE_ITEM(i) atomic_t i
60092 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60093 __AAL_STAT_ITEMS
60094 #undef __HANDLE_ITEM
60095 };
60096 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60097 index 0092102..8a801b4 100644
60098 --- a/include/linux/binfmts.h
60099 +++ b/include/linux/binfmts.h
60100 @@ -89,6 +89,7 @@ struct linux_binfmt {
60101 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60102 int (*load_shlib)(struct file *);
60103 int (*core_dump)(struct coredump_params *cprm);
60104 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60105 unsigned long min_coredump; /* minimal dump size */
60106 };
60107
60108 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60109 index 606cf33..b72c577 100644
60110 --- a/include/linux/blkdev.h
60111 +++ b/include/linux/blkdev.h
60112 @@ -1379,7 +1379,7 @@ struct block_device_operations {
60113 /* this callback is with swap_lock and sometimes page table lock held */
60114 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60115 struct module *owner;
60116 -};
60117 +} __do_const;
60118
60119 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60120 unsigned long);
60121 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60122 index 4d1a074..88f929a 100644
60123 --- a/include/linux/blktrace_api.h
60124 +++ b/include/linux/blktrace_api.h
60125 @@ -162,7 +162,7 @@ struct blk_trace {
60126 struct dentry *dir;
60127 struct dentry *dropped_file;
60128 struct dentry *msg_file;
60129 - atomic_t dropped;
60130 + atomic_unchecked_t dropped;
60131 };
60132
60133 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60134 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60135 index 83195fb..0b0f77d 100644
60136 --- a/include/linux/byteorder/little_endian.h
60137 +++ b/include/linux/byteorder/little_endian.h
60138 @@ -42,51 +42,51 @@
60139
60140 static inline __le64 __cpu_to_le64p(const __u64 *p)
60141 {
60142 - return (__force __le64)*p;
60143 + return (__force const __le64)*p;
60144 }
60145 static inline __u64 __le64_to_cpup(const __le64 *p)
60146 {
60147 - return (__force __u64)*p;
60148 + return (__force const __u64)*p;
60149 }
60150 static inline __le32 __cpu_to_le32p(const __u32 *p)
60151 {
60152 - return (__force __le32)*p;
60153 + return (__force const __le32)*p;
60154 }
60155 static inline __u32 __le32_to_cpup(const __le32 *p)
60156 {
60157 - return (__force __u32)*p;
60158 + return (__force const __u32)*p;
60159 }
60160 static inline __le16 __cpu_to_le16p(const __u16 *p)
60161 {
60162 - return (__force __le16)*p;
60163 + return (__force const __le16)*p;
60164 }
60165 static inline __u16 __le16_to_cpup(const __le16 *p)
60166 {
60167 - return (__force __u16)*p;
60168 + return (__force const __u16)*p;
60169 }
60170 static inline __be64 __cpu_to_be64p(const __u64 *p)
60171 {
60172 - return (__force __be64)__swab64p(p);
60173 + return (__force const __be64)__swab64p(p);
60174 }
60175 static inline __u64 __be64_to_cpup(const __be64 *p)
60176 {
60177 - return __swab64p((__u64 *)p);
60178 + return __swab64p((const __u64 *)p);
60179 }
60180 static inline __be32 __cpu_to_be32p(const __u32 *p)
60181 {
60182 - return (__force __be32)__swab32p(p);
60183 + return (__force const __be32)__swab32p(p);
60184 }
60185 static inline __u32 __be32_to_cpup(const __be32 *p)
60186 {
60187 - return __swab32p((__u32 *)p);
60188 + return __swab32p((const __u32 *)p);
60189 }
60190 static inline __be16 __cpu_to_be16p(const __u16 *p)
60191 {
60192 - return (__force __be16)__swab16p(p);
60193 + return (__force const __be16)__swab16p(p);
60194 }
60195 static inline __u16 __be16_to_cpup(const __be16 *p)
60196 {
60197 - return __swab16p((__u16 *)p);
60198 + return __swab16p((const __u16 *)p);
60199 }
60200 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60201 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60202 diff --git a/include/linux/cache.h b/include/linux/cache.h
60203 index 4c57065..4307975 100644
60204 --- a/include/linux/cache.h
60205 +++ b/include/linux/cache.h
60206 @@ -16,6 +16,10 @@
60207 #define __read_mostly
60208 #endif
60209
60210 +#ifndef __read_only
60211 +#define __read_only __read_mostly
60212 +#endif
60213 +
60214 #ifndef ____cacheline_aligned
60215 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60216 #endif
60217 diff --git a/include/linux/capability.h b/include/linux/capability.h
60218 index 12d52de..b5f7fa7 100644
60219 --- a/include/linux/capability.h
60220 +++ b/include/linux/capability.h
60221 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
60222 extern bool capable(int cap);
60223 extern bool ns_capable(struct user_namespace *ns, int cap);
60224 extern bool nsown_capable(int cap);
60225 +extern bool capable_nolog(int cap);
60226 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60227
60228 /* audit system wants to get cap info from files as well */
60229 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60230 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60231 index 04ffb2e..6799180 100644
60232 --- a/include/linux/cleancache.h
60233 +++ b/include/linux/cleancache.h
60234 @@ -31,7 +31,7 @@ struct cleancache_ops {
60235 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
60236 void (*flush_inode)(int, struct cleancache_filekey);
60237 void (*flush_fs)(int);
60238 -};
60239 +} __no_const;
60240
60241 extern struct cleancache_ops
60242 cleancache_register_ops(struct cleancache_ops *ops);
60243 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60244 index 2f40791..a62d196 100644
60245 --- a/include/linux/compiler-gcc4.h
60246 +++ b/include/linux/compiler-gcc4.h
60247 @@ -32,6 +32,16 @@
60248 #define __linktime_error(message) __attribute__((__error__(message)))
60249
60250 #if __GNUC_MINOR__ >= 5
60251 +
60252 +#ifdef CONSTIFY_PLUGIN
60253 +#define __no_const __attribute__((no_const))
60254 +#define __do_const __attribute__((do_const))
60255 +#endif
60256 +
60257 +#ifdef SIZE_OVERFLOW_PLUGIN
60258 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
60259 +#endif
60260 +
60261 /*
60262 * Mark a position in code as unreachable. This can be used to
60263 * suppress control flow warnings after asm blocks that transfer
60264 @@ -47,6 +57,11 @@
60265 #define __noclone __attribute__((__noclone__))
60266
60267 #endif
60268 +
60269 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60270 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60271 +#define __bos0(ptr) __bos((ptr), 0)
60272 +#define __bos1(ptr) __bos((ptr), 1)
60273 #endif
60274
60275 #if __GNUC_MINOR__ > 0
60276 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60277 index 4a24354..7149ac2 100644
60278 --- a/include/linux/compiler.h
60279 +++ b/include/linux/compiler.h
60280 @@ -5,31 +5,62 @@
60281
60282 #ifdef __CHECKER__
60283 # define __user __attribute__((noderef, address_space(1)))
60284 +# define __force_user __force __user
60285 # define __kernel __attribute__((address_space(0)))
60286 +# define __force_kernel __force __kernel
60287 # define __safe __attribute__((safe))
60288 # define __force __attribute__((force))
60289 # define __nocast __attribute__((nocast))
60290 # define __iomem __attribute__((noderef, address_space(2)))
60291 +# define __force_iomem __force __iomem
60292 # define __acquires(x) __attribute__((context(x,0,1)))
60293 # define __releases(x) __attribute__((context(x,1,0)))
60294 # define __acquire(x) __context__(x,1)
60295 # define __release(x) __context__(x,-1)
60296 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60297 # define __percpu __attribute__((noderef, address_space(3)))
60298 +# define __force_percpu __force __percpu
60299 #ifdef CONFIG_SPARSE_RCU_POINTER
60300 # define __rcu __attribute__((noderef, address_space(4)))
60301 +# define __force_rcu __force __rcu
60302 #else
60303 # define __rcu
60304 +# define __force_rcu
60305 #endif
60306 extern void __chk_user_ptr(const volatile void __user *);
60307 extern void __chk_io_ptr(const volatile void __iomem *);
60308 +#elif defined(CHECKER_PLUGIN)
60309 +//# define __user
60310 +//# define __force_user
60311 +//# define __kernel
60312 +//# define __force_kernel
60313 +# define __safe
60314 +# define __force
60315 +# define __nocast
60316 +# define __iomem
60317 +# define __force_iomem
60318 +# define __chk_user_ptr(x) (void)0
60319 +# define __chk_io_ptr(x) (void)0
60320 +# define __builtin_warning(x, y...) (1)
60321 +# define __acquires(x)
60322 +# define __releases(x)
60323 +# define __acquire(x) (void)0
60324 +# define __release(x) (void)0
60325 +# define __cond_lock(x,c) (c)
60326 +# define __percpu
60327 +# define __force_percpu
60328 +# define __rcu
60329 +# define __force_rcu
60330 #else
60331 # define __user
60332 +# define __force_user
60333 # define __kernel
60334 +# define __force_kernel
60335 # define __safe
60336 # define __force
60337 # define __nocast
60338 # define __iomem
60339 +# define __force_iomem
60340 # define __chk_user_ptr(x) (void)0
60341 # define __chk_io_ptr(x) (void)0
60342 # define __builtin_warning(x, y...) (1)
60343 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60344 # define __release(x) (void)0
60345 # define __cond_lock(x,c) (c)
60346 # define __percpu
60347 +# define __force_percpu
60348 # define __rcu
60349 +# define __force_rcu
60350 #endif
60351
60352 #ifdef __KERNEL__
60353 @@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60354 # define __attribute_const__ /* unimplemented */
60355 #endif
60356
60357 +#ifndef __no_const
60358 +# define __no_const
60359 +#endif
60360 +
60361 +#ifndef __do_const
60362 +# define __do_const
60363 +#endif
60364 +
60365 +#ifndef __size_overflow
60366 +# define __size_overflow(...)
60367 +#endif
60368 +
60369 /*
60370 * Tell gcc if a function is cold. The compiler will assume any path
60371 * directly leading to the call is unlikely.
60372 @@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60373 #define __cold
60374 #endif
60375
60376 +#ifndef __alloc_size
60377 +#define __alloc_size(...)
60378 +#endif
60379 +
60380 +#ifndef __bos
60381 +#define __bos(ptr, arg)
60382 +#endif
60383 +
60384 +#ifndef __bos0
60385 +#define __bos0(ptr)
60386 +#endif
60387 +
60388 +#ifndef __bos1
60389 +#define __bos1(ptr)
60390 +#endif
60391 +
60392 /* Simple shorthand for a section definition */
60393 #ifndef __section
60394 # define __section(S) __attribute__ ((__section__(#S)))
60395 @@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60396 * use is to mediate communication between process-level code and irq/NMI
60397 * handlers, all running on the same CPU.
60398 */
60399 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60400 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60401 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60402
60403 #endif /* __LINUX_COMPILER_H */
60404 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
60405 index e9eaec5..bfeb9bb 100644
60406 --- a/include/linux/cpuset.h
60407 +++ b/include/linux/cpuset.h
60408 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
60409 * nodemask.
60410 */
60411 smp_mb();
60412 - --ACCESS_ONCE(current->mems_allowed_change_disable);
60413 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
60414 }
60415
60416 static inline void set_mems_allowed(nodemask_t nodemask)
60417 diff --git a/include/linux/cred.h b/include/linux/cred.h
60418 index adadf71..6af5560 100644
60419 --- a/include/linux/cred.h
60420 +++ b/include/linux/cred.h
60421 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
60422 static inline void validate_process_creds(void)
60423 {
60424 }
60425 +static inline void validate_task_creds(struct task_struct *task)
60426 +{
60427 +}
60428 #endif
60429
60430 /**
60431 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60432 index 8a94217..15d49e3 100644
60433 --- a/include/linux/crypto.h
60434 +++ b/include/linux/crypto.h
60435 @@ -365,7 +365,7 @@ struct cipher_tfm {
60436 const u8 *key, unsigned int keylen);
60437 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60438 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60439 -};
60440 +} __no_const;
60441
60442 struct hash_tfm {
60443 int (*init)(struct hash_desc *desc);
60444 @@ -386,13 +386,13 @@ struct compress_tfm {
60445 int (*cot_decompress)(struct crypto_tfm *tfm,
60446 const u8 *src, unsigned int slen,
60447 u8 *dst, unsigned int *dlen);
60448 -};
60449 +} __no_const;
60450
60451 struct rng_tfm {
60452 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60453 unsigned int dlen);
60454 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60455 -};
60456 +} __no_const;
60457
60458 #define crt_ablkcipher crt_u.ablkcipher
60459 #define crt_aead crt_u.aead
60460 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60461 index 7925bf0..d5143d2 100644
60462 --- a/include/linux/decompress/mm.h
60463 +++ b/include/linux/decompress/mm.h
60464 @@ -77,7 +77,7 @@ static void free(void *where)
60465 * warnings when not needed (indeed large_malloc / large_free are not
60466 * needed by inflate */
60467
60468 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60469 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60470 #define free(a) kfree(a)
60471
60472 #define large_malloc(a) vmalloc(a)
60473 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60474 index e13117c..e9fc938 100644
60475 --- a/include/linux/dma-mapping.h
60476 +++ b/include/linux/dma-mapping.h
60477 @@ -46,7 +46,7 @@ struct dma_map_ops {
60478 u64 (*get_required_mask)(struct device *dev);
60479 #endif
60480 int is_phys;
60481 -};
60482 +} __do_const;
60483
60484 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
60485
60486 diff --git a/include/linux/efi.h b/include/linux/efi.h
60487 index 7cce0ea..c2085e4 100644
60488 --- a/include/linux/efi.h
60489 +++ b/include/linux/efi.h
60490 @@ -591,7 +591,7 @@ struct efivar_operations {
60491 efi_get_variable_t *get_variable;
60492 efi_get_next_variable_t *get_next_variable;
60493 efi_set_variable_t *set_variable;
60494 -};
60495 +} __no_const;
60496
60497 struct efivars {
60498 /*
60499 diff --git a/include/linux/elf.h b/include/linux/elf.h
60500 index 999b4f5..57753b4 100644
60501 --- a/include/linux/elf.h
60502 +++ b/include/linux/elf.h
60503 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
60504 #define PT_GNU_EH_FRAME 0x6474e550
60505
60506 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60507 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60508 +
60509 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60510 +
60511 +/* Constants for the e_flags field */
60512 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60513 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60514 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60515 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60516 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60517 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60518
60519 /*
60520 * Extended Numbering
60521 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
60522 #define DT_DEBUG 21
60523 #define DT_TEXTREL 22
60524 #define DT_JMPREL 23
60525 +#define DT_FLAGS 30
60526 + #define DF_TEXTREL 0x00000004
60527 #define DT_ENCODING 32
60528 #define OLD_DT_LOOS 0x60000000
60529 #define DT_LOOS 0x6000000d
60530 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
60531 #define PF_W 0x2
60532 #define PF_X 0x1
60533
60534 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60535 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60536 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60537 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60538 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60539 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60540 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60541 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60542 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60543 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60544 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60545 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60546 +
60547 typedef struct elf32_phdr{
60548 Elf32_Word p_type;
60549 Elf32_Off p_offset;
60550 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
60551 #define EI_OSABI 7
60552 #define EI_PAD 8
60553
60554 +#define EI_PAX 14
60555 +
60556 #define ELFMAG0 0x7f /* EI_MAG */
60557 #define ELFMAG1 'E'
60558 #define ELFMAG2 'L'
60559 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
60560 #define elf_note elf32_note
60561 #define elf_addr_t Elf32_Off
60562 #define Elf_Half Elf32_Half
60563 +#define elf_dyn Elf32_Dyn
60564
60565 #else
60566
60567 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
60568 #define elf_note elf64_note
60569 #define elf_addr_t Elf64_Off
60570 #define Elf_Half Elf64_Half
60571 +#define elf_dyn Elf64_Dyn
60572
60573 #endif
60574
60575 diff --git a/include/linux/filter.h b/include/linux/filter.h
60576 index 8eeb205..d59bfa2 100644
60577 --- a/include/linux/filter.h
60578 +++ b/include/linux/filter.h
60579 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60580
60581 struct sk_buff;
60582 struct sock;
60583 +struct bpf_jit_work;
60584
60585 struct sk_filter
60586 {
60587 @@ -141,6 +142,9 @@ struct sk_filter
60588 unsigned int len; /* Number of filter blocks */
60589 unsigned int (*bpf_func)(const struct sk_buff *skb,
60590 const struct sock_filter *filter);
60591 +#ifdef CONFIG_BPF_JIT
60592 + struct bpf_jit_work *work;
60593 +#endif
60594 struct rcu_head rcu;
60595 struct sock_filter insns[0];
60596 };
60597 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60598 index 84ccf8e..2e9b14c 100644
60599 --- a/include/linux/firewire.h
60600 +++ b/include/linux/firewire.h
60601 @@ -428,7 +428,7 @@ struct fw_iso_context {
60602 union {
60603 fw_iso_callback_t sc;
60604 fw_iso_mc_callback_t mc;
60605 - } callback;
60606 + } __no_const callback;
60607 void *callback_data;
60608 };
60609
60610 diff --git a/include/linux/fs.h b/include/linux/fs.h
60611 index f4b6e06..d6ba573 100644
60612 --- a/include/linux/fs.h
60613 +++ b/include/linux/fs.h
60614 @@ -1628,7 +1628,8 @@ struct file_operations {
60615 int (*setlease)(struct file *, long, struct file_lock **);
60616 long (*fallocate)(struct file *file, int mode, loff_t offset,
60617 loff_t len);
60618 -};
60619 +} __do_const;
60620 +typedef struct file_operations __no_const file_operations_no_const;
60621
60622 struct inode_operations {
60623 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60624 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60625 index 003dc0f..3c4ea97 100644
60626 --- a/include/linux/fs_struct.h
60627 +++ b/include/linux/fs_struct.h
60628 @@ -6,7 +6,7 @@
60629 #include <linux/seqlock.h>
60630
60631 struct fs_struct {
60632 - int users;
60633 + atomic_t users;
60634 spinlock_t lock;
60635 seqcount_t seq;
60636 int umask;
60637 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60638 index ce31408..b1ad003 100644
60639 --- a/include/linux/fscache-cache.h
60640 +++ b/include/linux/fscache-cache.h
60641 @@ -102,7 +102,7 @@ struct fscache_operation {
60642 fscache_operation_release_t release;
60643 };
60644
60645 -extern atomic_t fscache_op_debug_id;
60646 +extern atomic_unchecked_t fscache_op_debug_id;
60647 extern void fscache_op_work_func(struct work_struct *work);
60648
60649 extern void fscache_enqueue_operation(struct fscache_operation *);
60650 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60651 {
60652 INIT_WORK(&op->work, fscache_op_work_func);
60653 atomic_set(&op->usage, 1);
60654 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60655 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60656 op->processor = processor;
60657 op->release = release;
60658 INIT_LIST_HEAD(&op->pend_link);
60659 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60660 index 2a53f10..0187fdf 100644
60661 --- a/include/linux/fsnotify.h
60662 +++ b/include/linux/fsnotify.h
60663 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60664 */
60665 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60666 {
60667 - return kstrdup(name, GFP_KERNEL);
60668 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60669 }
60670
60671 /*
60672 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60673 index 91d0e0a3..035666b 100644
60674 --- a/include/linux/fsnotify_backend.h
60675 +++ b/include/linux/fsnotify_backend.h
60676 @@ -105,6 +105,7 @@ struct fsnotify_ops {
60677 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60678 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60679 };
60680 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60681
60682 /*
60683 * A group is a "thing" that wants to receive notification about filesystem
60684 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60685 index c3da42d..c70e0df 100644
60686 --- a/include/linux/ftrace_event.h
60687 +++ b/include/linux/ftrace_event.h
60688 @@ -97,7 +97,7 @@ struct trace_event_functions {
60689 trace_print_func raw;
60690 trace_print_func hex;
60691 trace_print_func binary;
60692 -};
60693 +} __no_const;
60694
60695 struct trace_event {
60696 struct hlist_node node;
60697 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60698 extern int trace_add_event_call(struct ftrace_event_call *call);
60699 extern void trace_remove_event_call(struct ftrace_event_call *call);
60700
60701 -#define is_signed_type(type) (((type)(-1)) < 0)
60702 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60703
60704 int trace_set_clr_event(const char *system, const char *event, int set);
60705
60706 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60707 index e61d319..0da8505 100644
60708 --- a/include/linux/genhd.h
60709 +++ b/include/linux/genhd.h
60710 @@ -185,7 +185,7 @@ struct gendisk {
60711 struct kobject *slave_dir;
60712
60713 struct timer_rand_state *random;
60714 - atomic_t sync_io; /* RAID */
60715 + atomic_unchecked_t sync_io; /* RAID */
60716 struct disk_events *ev;
60717 #ifdef CONFIG_BLK_DEV_INTEGRITY
60718 struct blk_integrity *integrity;
60719 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60720 new file mode 100644
60721 index 0000000..8a130b6
60722 --- /dev/null
60723 +++ b/include/linux/gracl.h
60724 @@ -0,0 +1,319 @@
60725 +#ifndef GR_ACL_H
60726 +#define GR_ACL_H
60727 +
60728 +#include <linux/grdefs.h>
60729 +#include <linux/resource.h>
60730 +#include <linux/capability.h>
60731 +#include <linux/dcache.h>
60732 +#include <asm/resource.h>
60733 +
60734 +/* Major status information */
60735 +
60736 +#define GR_VERSION "grsecurity 2.9"
60737 +#define GRSECURITY_VERSION 0x2900
60738 +
60739 +enum {
60740 + GR_SHUTDOWN = 0,
60741 + GR_ENABLE = 1,
60742 + GR_SPROLE = 2,
60743 + GR_RELOAD = 3,
60744 + GR_SEGVMOD = 4,
60745 + GR_STATUS = 5,
60746 + GR_UNSPROLE = 6,
60747 + GR_PASSSET = 7,
60748 + GR_SPROLEPAM = 8,
60749 +};
60750 +
60751 +/* Password setup definitions
60752 + * kernel/grhash.c */
60753 +enum {
60754 + GR_PW_LEN = 128,
60755 + GR_SALT_LEN = 16,
60756 + GR_SHA_LEN = 32,
60757 +};
60758 +
60759 +enum {
60760 + GR_SPROLE_LEN = 64,
60761 +};
60762 +
60763 +enum {
60764 + GR_NO_GLOB = 0,
60765 + GR_REG_GLOB,
60766 + GR_CREATE_GLOB
60767 +};
60768 +
60769 +#define GR_NLIMITS 32
60770 +
60771 +/* Begin Data Structures */
60772 +
60773 +struct sprole_pw {
60774 + unsigned char *rolename;
60775 + unsigned char salt[GR_SALT_LEN];
60776 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60777 +};
60778 +
60779 +struct name_entry {
60780 + __u32 key;
60781 + ino_t inode;
60782 + dev_t device;
60783 + char *name;
60784 + __u16 len;
60785 + __u8 deleted;
60786 + struct name_entry *prev;
60787 + struct name_entry *next;
60788 +};
60789 +
60790 +struct inodev_entry {
60791 + struct name_entry *nentry;
60792 + struct inodev_entry *prev;
60793 + struct inodev_entry *next;
60794 +};
60795 +
60796 +struct acl_role_db {
60797 + struct acl_role_label **r_hash;
60798 + __u32 r_size;
60799 +};
60800 +
60801 +struct inodev_db {
60802 + struct inodev_entry **i_hash;
60803 + __u32 i_size;
60804 +};
60805 +
60806 +struct name_db {
60807 + struct name_entry **n_hash;
60808 + __u32 n_size;
60809 +};
60810 +
60811 +struct crash_uid {
60812 + uid_t uid;
60813 + unsigned long expires;
60814 +};
60815 +
60816 +struct gr_hash_struct {
60817 + void **table;
60818 + void **nametable;
60819 + void *first;
60820 + __u32 table_size;
60821 + __u32 used_size;
60822 + int type;
60823 +};
60824 +
60825 +/* Userspace Grsecurity ACL data structures */
60826 +
60827 +struct acl_subject_label {
60828 + char *filename;
60829 + ino_t inode;
60830 + dev_t device;
60831 + __u32 mode;
60832 + kernel_cap_t cap_mask;
60833 + kernel_cap_t cap_lower;
60834 + kernel_cap_t cap_invert_audit;
60835 +
60836 + struct rlimit res[GR_NLIMITS];
60837 + __u32 resmask;
60838 +
60839 + __u8 user_trans_type;
60840 + __u8 group_trans_type;
60841 + uid_t *user_transitions;
60842 + gid_t *group_transitions;
60843 + __u16 user_trans_num;
60844 + __u16 group_trans_num;
60845 +
60846 + __u32 sock_families[2];
60847 + __u32 ip_proto[8];
60848 + __u32 ip_type;
60849 + struct acl_ip_label **ips;
60850 + __u32 ip_num;
60851 + __u32 inaddr_any_override;
60852 +
60853 + __u32 crashes;
60854 + unsigned long expires;
60855 +
60856 + struct acl_subject_label *parent_subject;
60857 + struct gr_hash_struct *hash;
60858 + struct acl_subject_label *prev;
60859 + struct acl_subject_label *next;
60860 +
60861 + struct acl_object_label **obj_hash;
60862 + __u32 obj_hash_size;
60863 + __u16 pax_flags;
60864 +};
60865 +
60866 +struct role_allowed_ip {
60867 + __u32 addr;
60868 + __u32 netmask;
60869 +
60870 + struct role_allowed_ip *prev;
60871 + struct role_allowed_ip *next;
60872 +};
60873 +
60874 +struct role_transition {
60875 + char *rolename;
60876 +
60877 + struct role_transition *prev;
60878 + struct role_transition *next;
60879 +};
60880 +
60881 +struct acl_role_label {
60882 + char *rolename;
60883 + uid_t uidgid;
60884 + __u16 roletype;
60885 +
60886 + __u16 auth_attempts;
60887 + unsigned long expires;
60888 +
60889 + struct acl_subject_label *root_label;
60890 + struct gr_hash_struct *hash;
60891 +
60892 + struct acl_role_label *prev;
60893 + struct acl_role_label *next;
60894 +
60895 + struct role_transition *transitions;
60896 + struct role_allowed_ip *allowed_ips;
60897 + uid_t *domain_children;
60898 + __u16 domain_child_num;
60899 +
60900 + umode_t umask;
60901 +
60902 + struct acl_subject_label **subj_hash;
60903 + __u32 subj_hash_size;
60904 +};
60905 +
60906 +struct user_acl_role_db {
60907 + struct acl_role_label **r_table;
60908 + __u32 num_pointers; /* Number of allocations to track */
60909 + __u32 num_roles; /* Number of roles */
60910 + __u32 num_domain_children; /* Number of domain children */
60911 + __u32 num_subjects; /* Number of subjects */
60912 + __u32 num_objects; /* Number of objects */
60913 +};
60914 +
60915 +struct acl_object_label {
60916 + char *filename;
60917 + ino_t inode;
60918 + dev_t device;
60919 + __u32 mode;
60920 +
60921 + struct acl_subject_label *nested;
60922 + struct acl_object_label *globbed;
60923 +
60924 + /* next two structures not used */
60925 +
60926 + struct acl_object_label *prev;
60927 + struct acl_object_label *next;
60928 +};
60929 +
60930 +struct acl_ip_label {
60931 + char *iface;
60932 + __u32 addr;
60933 + __u32 netmask;
60934 + __u16 low, high;
60935 + __u8 mode;
60936 + __u32 type;
60937 + __u32 proto[8];
60938 +
60939 + /* next two structures not used */
60940 +
60941 + struct acl_ip_label *prev;
60942 + struct acl_ip_label *next;
60943 +};
60944 +
60945 +struct gr_arg {
60946 + struct user_acl_role_db role_db;
60947 + unsigned char pw[GR_PW_LEN];
60948 + unsigned char salt[GR_SALT_LEN];
60949 + unsigned char sum[GR_SHA_LEN];
60950 + unsigned char sp_role[GR_SPROLE_LEN];
60951 + struct sprole_pw *sprole_pws;
60952 + dev_t segv_device;
60953 + ino_t segv_inode;
60954 + uid_t segv_uid;
60955 + __u16 num_sprole_pws;
60956 + __u16 mode;
60957 +};
60958 +
60959 +struct gr_arg_wrapper {
60960 + struct gr_arg *arg;
60961 + __u32 version;
60962 + __u32 size;
60963 +};
60964 +
60965 +struct subject_map {
60966 + struct acl_subject_label *user;
60967 + struct acl_subject_label *kernel;
60968 + struct subject_map *prev;
60969 + struct subject_map *next;
60970 +};
60971 +
60972 +struct acl_subj_map_db {
60973 + struct subject_map **s_hash;
60974 + __u32 s_size;
60975 +};
60976 +
60977 +/* End Data Structures Section */
60978 +
60979 +/* Hash functions generated by empirical testing by Brad Spengler
60980 + Makes good use of the low bits of the inode. Generally 0-1 times
60981 + in loop for successful match. 0-3 for unsuccessful match.
60982 + Shift/add algorithm with modulus of table size and an XOR*/
60983 +
60984 +static __inline__ unsigned int
60985 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60986 +{
60987 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60988 +}
60989 +
60990 + static __inline__ unsigned int
60991 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60992 +{
60993 + return ((const unsigned long)userp % sz);
60994 +}
60995 +
60996 +static __inline__ unsigned int
60997 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60998 +{
60999 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61000 +}
61001 +
61002 +static __inline__ unsigned int
61003 +nhash(const char *name, const __u16 len, const unsigned int sz)
61004 +{
61005 + return full_name_hash((const unsigned char *)name, len) % sz;
61006 +}
61007 +
61008 +#define FOR_EACH_ROLE_START(role) \
61009 + role = role_list; \
61010 + while (role) {
61011 +
61012 +#define FOR_EACH_ROLE_END(role) \
61013 + role = role->prev; \
61014 + }
61015 +
61016 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61017 + subj = NULL; \
61018 + iter = 0; \
61019 + while (iter < role->subj_hash_size) { \
61020 + if (subj == NULL) \
61021 + subj = role->subj_hash[iter]; \
61022 + if (subj == NULL) { \
61023 + iter++; \
61024 + continue; \
61025 + }
61026 +
61027 +#define FOR_EACH_SUBJECT_END(subj,iter) \
61028 + subj = subj->next; \
61029 + if (subj == NULL) \
61030 + iter++; \
61031 + }
61032 +
61033 +
61034 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61035 + subj = role->hash->first; \
61036 + while (subj != NULL) {
61037 +
61038 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61039 + subj = subj->next; \
61040 + }
61041 +
61042 +#endif
61043 +
61044 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61045 new file mode 100644
61046 index 0000000..323ecf2
61047 --- /dev/null
61048 +++ b/include/linux/gralloc.h
61049 @@ -0,0 +1,9 @@
61050 +#ifndef __GRALLOC_H
61051 +#define __GRALLOC_H
61052 +
61053 +void acl_free_all(void);
61054 +int acl_alloc_stack_init(unsigned long size);
61055 +void *acl_alloc(unsigned long len);
61056 +void *acl_alloc_num(unsigned long num, unsigned long len);
61057 +
61058 +#endif
61059 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61060 new file mode 100644
61061 index 0000000..b30e9bc
61062 --- /dev/null
61063 +++ b/include/linux/grdefs.h
61064 @@ -0,0 +1,140 @@
61065 +#ifndef GRDEFS_H
61066 +#define GRDEFS_H
61067 +
61068 +/* Begin grsecurity status declarations */
61069 +
61070 +enum {
61071 + GR_READY = 0x01,
61072 + GR_STATUS_INIT = 0x00 // disabled state
61073 +};
61074 +
61075 +/* Begin ACL declarations */
61076 +
61077 +/* Role flags */
61078 +
61079 +enum {
61080 + GR_ROLE_USER = 0x0001,
61081 + GR_ROLE_GROUP = 0x0002,
61082 + GR_ROLE_DEFAULT = 0x0004,
61083 + GR_ROLE_SPECIAL = 0x0008,
61084 + GR_ROLE_AUTH = 0x0010,
61085 + GR_ROLE_NOPW = 0x0020,
61086 + GR_ROLE_GOD = 0x0040,
61087 + GR_ROLE_LEARN = 0x0080,
61088 + GR_ROLE_TPE = 0x0100,
61089 + GR_ROLE_DOMAIN = 0x0200,
61090 + GR_ROLE_PAM = 0x0400,
61091 + GR_ROLE_PERSIST = 0x0800
61092 +};
61093 +
61094 +/* ACL Subject and Object mode flags */
61095 +enum {
61096 + GR_DELETED = 0x80000000
61097 +};
61098 +
61099 +/* ACL Object-only mode flags */
61100 +enum {
61101 + GR_READ = 0x00000001,
61102 + GR_APPEND = 0x00000002,
61103 + GR_WRITE = 0x00000004,
61104 + GR_EXEC = 0x00000008,
61105 + GR_FIND = 0x00000010,
61106 + GR_INHERIT = 0x00000020,
61107 + GR_SETID = 0x00000040,
61108 + GR_CREATE = 0x00000080,
61109 + GR_DELETE = 0x00000100,
61110 + GR_LINK = 0x00000200,
61111 + GR_AUDIT_READ = 0x00000400,
61112 + GR_AUDIT_APPEND = 0x00000800,
61113 + GR_AUDIT_WRITE = 0x00001000,
61114 + GR_AUDIT_EXEC = 0x00002000,
61115 + GR_AUDIT_FIND = 0x00004000,
61116 + GR_AUDIT_INHERIT= 0x00008000,
61117 + GR_AUDIT_SETID = 0x00010000,
61118 + GR_AUDIT_CREATE = 0x00020000,
61119 + GR_AUDIT_DELETE = 0x00040000,
61120 + GR_AUDIT_LINK = 0x00080000,
61121 + GR_PTRACERD = 0x00100000,
61122 + GR_NOPTRACE = 0x00200000,
61123 + GR_SUPPRESS = 0x00400000,
61124 + GR_NOLEARN = 0x00800000,
61125 + GR_INIT_TRANSFER= 0x01000000
61126 +};
61127 +
61128 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61129 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61130 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61131 +
61132 +/* ACL subject-only mode flags */
61133 +enum {
61134 + GR_KILL = 0x00000001,
61135 + GR_VIEW = 0x00000002,
61136 + GR_PROTECTED = 0x00000004,
61137 + GR_LEARN = 0x00000008,
61138 + GR_OVERRIDE = 0x00000010,
61139 + /* just a placeholder, this mode is only used in userspace */
61140 + GR_DUMMY = 0x00000020,
61141 + GR_PROTSHM = 0x00000040,
61142 + GR_KILLPROC = 0x00000080,
61143 + GR_KILLIPPROC = 0x00000100,
61144 + /* just a placeholder, this mode is only used in userspace */
61145 + GR_NOTROJAN = 0x00000200,
61146 + GR_PROTPROCFD = 0x00000400,
61147 + GR_PROCACCT = 0x00000800,
61148 + GR_RELAXPTRACE = 0x00001000,
61149 + GR_NESTED = 0x00002000,
61150 + GR_INHERITLEARN = 0x00004000,
61151 + GR_PROCFIND = 0x00008000,
61152 + GR_POVERRIDE = 0x00010000,
61153 + GR_KERNELAUTH = 0x00020000,
61154 + GR_ATSECURE = 0x00040000,
61155 + GR_SHMEXEC = 0x00080000
61156 +};
61157 +
61158 +enum {
61159 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61160 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61161 + GR_PAX_ENABLE_MPROTECT = 0x0004,
61162 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
61163 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61164 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61165 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61166 + GR_PAX_DISABLE_MPROTECT = 0x0400,
61167 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
61168 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61169 +};
61170 +
61171 +enum {
61172 + GR_ID_USER = 0x01,
61173 + GR_ID_GROUP = 0x02,
61174 +};
61175 +
61176 +enum {
61177 + GR_ID_ALLOW = 0x01,
61178 + GR_ID_DENY = 0x02,
61179 +};
61180 +
61181 +#define GR_CRASH_RES 31
61182 +#define GR_UIDTABLE_MAX 500
61183 +
61184 +/* begin resource learning section */
61185 +enum {
61186 + GR_RLIM_CPU_BUMP = 60,
61187 + GR_RLIM_FSIZE_BUMP = 50000,
61188 + GR_RLIM_DATA_BUMP = 10000,
61189 + GR_RLIM_STACK_BUMP = 1000,
61190 + GR_RLIM_CORE_BUMP = 10000,
61191 + GR_RLIM_RSS_BUMP = 500000,
61192 + GR_RLIM_NPROC_BUMP = 1,
61193 + GR_RLIM_NOFILE_BUMP = 5,
61194 + GR_RLIM_MEMLOCK_BUMP = 50000,
61195 + GR_RLIM_AS_BUMP = 500000,
61196 + GR_RLIM_LOCKS_BUMP = 2,
61197 + GR_RLIM_SIGPENDING_BUMP = 5,
61198 + GR_RLIM_MSGQUEUE_BUMP = 10000,
61199 + GR_RLIM_NICE_BUMP = 1,
61200 + GR_RLIM_RTPRIO_BUMP = 1,
61201 + GR_RLIM_RTTIME_BUMP = 1000000
61202 +};
61203 +
61204 +#endif
61205 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61206 new file mode 100644
61207 index 0000000..da390f1
61208 --- /dev/null
61209 +++ b/include/linux/grinternal.h
61210 @@ -0,0 +1,221 @@
61211 +#ifndef __GRINTERNAL_H
61212 +#define __GRINTERNAL_H
61213 +
61214 +#ifdef CONFIG_GRKERNSEC
61215 +
61216 +#include <linux/fs.h>
61217 +#include <linux/mnt_namespace.h>
61218 +#include <linux/nsproxy.h>
61219 +#include <linux/gracl.h>
61220 +#include <linux/grdefs.h>
61221 +#include <linux/grmsg.h>
61222 +
61223 +void gr_add_learn_entry(const char *fmt, ...)
61224 + __attribute__ ((format (printf, 1, 2)));
61225 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61226 + const struct vfsmount *mnt);
61227 +__u32 gr_check_create(const struct dentry *new_dentry,
61228 + const struct dentry *parent,
61229 + const struct vfsmount *mnt, const __u32 mode);
61230 +int gr_check_protected_task(const struct task_struct *task);
61231 +__u32 to_gr_audit(const __u32 reqmode);
61232 +int gr_set_acls(const int type);
61233 +int gr_apply_subject_to_task(struct task_struct *task);
61234 +int gr_acl_is_enabled(void);
61235 +char gr_roletype_to_char(void);
61236 +
61237 +void gr_handle_alertkill(struct task_struct *task);
61238 +char *gr_to_filename(const struct dentry *dentry,
61239 + const struct vfsmount *mnt);
61240 +char *gr_to_filename1(const struct dentry *dentry,
61241 + const struct vfsmount *mnt);
61242 +char *gr_to_filename2(const struct dentry *dentry,
61243 + const struct vfsmount *mnt);
61244 +char *gr_to_filename3(const struct dentry *dentry,
61245 + const struct vfsmount *mnt);
61246 +
61247 +extern int grsec_enable_ptrace_readexec;
61248 +extern int grsec_enable_harden_ptrace;
61249 +extern int grsec_enable_link;
61250 +extern int grsec_enable_fifo;
61251 +extern int grsec_enable_execve;
61252 +extern int grsec_enable_shm;
61253 +extern int grsec_enable_execlog;
61254 +extern int grsec_enable_signal;
61255 +extern int grsec_enable_audit_ptrace;
61256 +extern int grsec_enable_forkfail;
61257 +extern int grsec_enable_time;
61258 +extern int grsec_enable_rofs;
61259 +extern int grsec_enable_chroot_shmat;
61260 +extern int grsec_enable_chroot_mount;
61261 +extern int grsec_enable_chroot_double;
61262 +extern int grsec_enable_chroot_pivot;
61263 +extern int grsec_enable_chroot_chdir;
61264 +extern int grsec_enable_chroot_chmod;
61265 +extern int grsec_enable_chroot_mknod;
61266 +extern int grsec_enable_chroot_fchdir;
61267 +extern int grsec_enable_chroot_nice;
61268 +extern int grsec_enable_chroot_execlog;
61269 +extern int grsec_enable_chroot_caps;
61270 +extern int grsec_enable_chroot_sysctl;
61271 +extern int grsec_enable_chroot_unix;
61272 +extern int grsec_enable_tpe;
61273 +extern int grsec_tpe_gid;
61274 +extern int grsec_enable_tpe_all;
61275 +extern int grsec_enable_tpe_invert;
61276 +extern int grsec_enable_socket_all;
61277 +extern int grsec_socket_all_gid;
61278 +extern int grsec_enable_socket_client;
61279 +extern int grsec_socket_client_gid;
61280 +extern int grsec_enable_socket_server;
61281 +extern int grsec_socket_server_gid;
61282 +extern int grsec_audit_gid;
61283 +extern int grsec_enable_group;
61284 +extern int grsec_enable_audit_textrel;
61285 +extern int grsec_enable_log_rwxmaps;
61286 +extern int grsec_enable_mount;
61287 +extern int grsec_enable_chdir;
61288 +extern int grsec_resource_logging;
61289 +extern int grsec_enable_blackhole;
61290 +extern int grsec_lastack_retries;
61291 +extern int grsec_enable_brute;
61292 +extern int grsec_lock;
61293 +
61294 +extern spinlock_t grsec_alert_lock;
61295 +extern unsigned long grsec_alert_wtime;
61296 +extern unsigned long grsec_alert_fyet;
61297 +
61298 +extern spinlock_t grsec_audit_lock;
61299 +
61300 +extern rwlock_t grsec_exec_file_lock;
61301 +
61302 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61303 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61304 + (tsk)->exec_file->f_vfsmnt) : "/")
61305 +
61306 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61307 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61308 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61309 +
61310 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61311 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
61312 + (tsk)->exec_file->f_vfsmnt) : "/")
61313 +
61314 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61315 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61316 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61317 +
61318 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61319 +
61320 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61321 +
61322 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61323 + (task)->pid, (cred)->uid, \
61324 + (cred)->euid, (cred)->gid, (cred)->egid, \
61325 + gr_parent_task_fullpath(task), \
61326 + (task)->real_parent->comm, (task)->real_parent->pid, \
61327 + (pcred)->uid, (pcred)->euid, \
61328 + (pcred)->gid, (pcred)->egid
61329 +
61330 +#define GR_CHROOT_CAPS {{ \
61331 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61332 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61333 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61334 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61335 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61336 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61337 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61338 +
61339 +#define security_learn(normal_msg,args...) \
61340 +({ \
61341 + read_lock(&grsec_exec_file_lock); \
61342 + gr_add_learn_entry(normal_msg "\n", ## args); \
61343 + read_unlock(&grsec_exec_file_lock); \
61344 +})
61345 +
61346 +enum {
61347 + GR_DO_AUDIT,
61348 + GR_DONT_AUDIT,
61349 + /* used for non-audit messages that we shouldn't kill the task on */
61350 + GR_DONT_AUDIT_GOOD
61351 +};
61352 +
61353 +enum {
61354 + GR_TTYSNIFF,
61355 + GR_RBAC,
61356 + GR_RBAC_STR,
61357 + GR_STR_RBAC,
61358 + GR_RBAC_MODE2,
61359 + GR_RBAC_MODE3,
61360 + GR_FILENAME,
61361 + GR_SYSCTL_HIDDEN,
61362 + GR_NOARGS,
61363 + GR_ONE_INT,
61364 + GR_ONE_INT_TWO_STR,
61365 + GR_ONE_STR,
61366 + GR_STR_INT,
61367 + GR_TWO_STR_INT,
61368 + GR_TWO_INT,
61369 + GR_TWO_U64,
61370 + GR_THREE_INT,
61371 + GR_FIVE_INT_TWO_STR,
61372 + GR_TWO_STR,
61373 + GR_THREE_STR,
61374 + GR_FOUR_STR,
61375 + GR_STR_FILENAME,
61376 + GR_FILENAME_STR,
61377 + GR_FILENAME_TWO_INT,
61378 + GR_FILENAME_TWO_INT_STR,
61379 + GR_TEXTREL,
61380 + GR_PTRACE,
61381 + GR_RESOURCE,
61382 + GR_CAP,
61383 + GR_SIG,
61384 + GR_SIG2,
61385 + GR_CRASH1,
61386 + GR_CRASH2,
61387 + GR_PSACCT,
61388 + GR_RWXMAP
61389 +};
61390 +
61391 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61392 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61393 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61394 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61395 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61396 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61397 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61398 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61399 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61400 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61401 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61402 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61403 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61404 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61405 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61406 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61407 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61408 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61409 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61410 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61411 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61412 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61413 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61414 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61415 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61416 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61417 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61418 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61419 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61420 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61421 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61422 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61423 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61424 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61425 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61426 +
61427 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61428 +
61429 +#endif
61430 +
61431 +#endif
61432 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61433 new file mode 100644
61434 index 0000000..ae576a1
61435 --- /dev/null
61436 +++ b/include/linux/grmsg.h
61437 @@ -0,0 +1,109 @@
61438 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61439 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61440 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61441 +#define GR_STOPMOD_MSG "denied modification of module state by "
61442 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61443 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
61444 +#define GR_IOPERM_MSG "denied use of ioperm() by "
61445 +#define GR_IOPL_MSG "denied use of iopl() by "
61446 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61447 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61448 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
61449 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
61450 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
61451 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61452 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
61453 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61454 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61455 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61456 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61457 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61458 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61459 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
61460 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
61461 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61462 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61463 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61464 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61465 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61466 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61467 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61468 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
61469 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
61470 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
61471 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61472 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
61473 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61474 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61475 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61476 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61477 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61478 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61479 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61480 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61481 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61482 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61483 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61484 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
61485 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
61486 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61487 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61488 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61489 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
61490 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61491 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61492 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61493 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61494 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61495 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61496 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61497 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61498 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61499 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61500 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61501 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61502 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61503 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61504 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61505 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61506 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61507 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61508 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61509 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
61510 +#define GR_NICE_CHROOT_MSG "denied priority change by "
61511 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61512 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61513 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61514 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61515 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61516 +#define GR_TIME_MSG "time set by "
61517 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61518 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61519 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61520 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61521 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61522 +#define GR_BIND_MSG "denied bind() by "
61523 +#define GR_CONNECT_MSG "denied connect() by "
61524 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61525 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61526 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61527 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61528 +#define GR_CAP_ACL_MSG "use of %s denied for "
61529 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61530 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61531 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61532 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61533 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61534 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61535 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61536 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61537 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61538 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61539 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61540 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61541 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61542 +#define GR_VM86_MSG "denied use of vm86 by "
61543 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61544 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
61545 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61546 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
61547 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61548 new file mode 100644
61549 index 0000000..acd05db
61550 --- /dev/null
61551 +++ b/include/linux/grsecurity.h
61552 @@ -0,0 +1,232 @@
61553 +#ifndef GR_SECURITY_H
61554 +#define GR_SECURITY_H
61555 +#include <linux/fs.h>
61556 +#include <linux/fs_struct.h>
61557 +#include <linux/binfmts.h>
61558 +#include <linux/gracl.h>
61559 +
61560 +/* notify of brain-dead configs */
61561 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61562 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61563 +#endif
61564 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61565 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61566 +#endif
61567 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61568 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61569 +#endif
61570 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61571 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
61572 +#endif
61573 +
61574 +#include <linux/compat.h>
61575 +
61576 +struct user_arg_ptr {
61577 +#ifdef CONFIG_COMPAT
61578 + bool is_compat;
61579 +#endif
61580 + union {
61581 + const char __user *const __user *native;
61582 +#ifdef CONFIG_COMPAT
61583 + compat_uptr_t __user *compat;
61584 +#endif
61585 + } ptr;
61586 +};
61587 +
61588 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61589 +void gr_handle_brute_check(void);
61590 +void gr_handle_kernel_exploit(void);
61591 +int gr_process_user_ban(void);
61592 +
61593 +char gr_roletype_to_char(void);
61594 +
61595 +int gr_acl_enable_at_secure(void);
61596 +
61597 +int gr_check_user_change(int real, int effective, int fs);
61598 +int gr_check_group_change(int real, int effective, int fs);
61599 +
61600 +void gr_del_task_from_ip_table(struct task_struct *p);
61601 +
61602 +int gr_pid_is_chrooted(struct task_struct *p);
61603 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61604 +int gr_handle_chroot_nice(void);
61605 +int gr_handle_chroot_sysctl(const int op);
61606 +int gr_handle_chroot_setpriority(struct task_struct *p,
61607 + const int niceval);
61608 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61609 +int gr_handle_chroot_chroot(const struct dentry *dentry,
61610 + const struct vfsmount *mnt);
61611 +void gr_handle_chroot_chdir(struct path *path);
61612 +int gr_handle_chroot_chmod(const struct dentry *dentry,
61613 + const struct vfsmount *mnt, const int mode);
61614 +int gr_handle_chroot_mknod(const struct dentry *dentry,
61615 + const struct vfsmount *mnt, const int mode);
61616 +int gr_handle_chroot_mount(const struct dentry *dentry,
61617 + const struct vfsmount *mnt,
61618 + const char *dev_name);
61619 +int gr_handle_chroot_pivot(void);
61620 +int gr_handle_chroot_unix(const pid_t pid);
61621 +
61622 +int gr_handle_rawio(const struct inode *inode);
61623 +
61624 +void gr_handle_ioperm(void);
61625 +void gr_handle_iopl(void);
61626 +
61627 +umode_t gr_acl_umask(void);
61628 +
61629 +int gr_tpe_allow(const struct file *file);
61630 +
61631 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61632 +void gr_clear_chroot_entries(struct task_struct *task);
61633 +
61634 +void gr_log_forkfail(const int retval);
61635 +void gr_log_timechange(void);
61636 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61637 +void gr_log_chdir(const struct dentry *dentry,
61638 + const struct vfsmount *mnt);
61639 +void gr_log_chroot_exec(const struct dentry *dentry,
61640 + const struct vfsmount *mnt);
61641 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61642 +void gr_log_remount(const char *devname, const int retval);
61643 +void gr_log_unmount(const char *devname, const int retval);
61644 +void gr_log_mount(const char *from, const char *to, const int retval);
61645 +void gr_log_textrel(struct vm_area_struct *vma);
61646 +void gr_log_rwxmmap(struct file *file);
61647 +void gr_log_rwxmprotect(struct file *file);
61648 +
61649 +int gr_handle_follow_link(const struct inode *parent,
61650 + const struct inode *inode,
61651 + const struct dentry *dentry,
61652 + const struct vfsmount *mnt);
61653 +int gr_handle_fifo(const struct dentry *dentry,
61654 + const struct vfsmount *mnt,
61655 + const struct dentry *dir, const int flag,
61656 + const int acc_mode);
61657 +int gr_handle_hardlink(const struct dentry *dentry,
61658 + const struct vfsmount *mnt,
61659 + struct inode *inode,
61660 + const int mode, const char *to);
61661 +
61662 +int gr_is_capable(const int cap);
61663 +int gr_is_capable_nolog(const int cap);
61664 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61665 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61666 +
61667 +void gr_learn_resource(const struct task_struct *task, const int limit,
61668 + const unsigned long wanted, const int gt);
61669 +void gr_copy_label(struct task_struct *tsk);
61670 +void gr_handle_crash(struct task_struct *task, const int sig);
61671 +int gr_handle_signal(const struct task_struct *p, const int sig);
61672 +int gr_check_crash_uid(const uid_t uid);
61673 +int gr_check_protected_task(const struct task_struct *task);
61674 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61675 +int gr_acl_handle_mmap(const struct file *file,
61676 + const unsigned long prot);
61677 +int gr_acl_handle_mprotect(const struct file *file,
61678 + const unsigned long prot);
61679 +int gr_check_hidden_task(const struct task_struct *tsk);
61680 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61681 + const struct vfsmount *mnt);
61682 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61683 + const struct vfsmount *mnt);
61684 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61685 + const struct vfsmount *mnt, const int fmode);
61686 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61687 + const struct vfsmount *mnt, umode_t *mode);
61688 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61689 + const struct vfsmount *mnt);
61690 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61691 + const struct vfsmount *mnt);
61692 +int gr_handle_ptrace(struct task_struct *task, const long request);
61693 +int gr_handle_proc_ptrace(struct task_struct *task);
61694 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61695 + const struct vfsmount *mnt);
61696 +int gr_check_crash_exec(const struct file *filp);
61697 +int gr_acl_is_enabled(void);
61698 +void gr_set_kernel_label(struct task_struct *task);
61699 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61700 + const gid_t gid);
61701 +int gr_set_proc_label(const struct dentry *dentry,
61702 + const struct vfsmount *mnt,
61703 + const int unsafe_flags);
61704 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61705 + const struct vfsmount *mnt);
61706 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61707 + const struct vfsmount *mnt, int acc_mode);
61708 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61709 + const struct dentry *p_dentry,
61710 + const struct vfsmount *p_mnt,
61711 + int open_flags, int acc_mode, const int imode);
61712 +void gr_handle_create(const struct dentry *dentry,
61713 + const struct vfsmount *mnt);
61714 +void gr_handle_proc_create(const struct dentry *dentry,
61715 + const struct inode *inode);
61716 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61717 + const struct dentry *parent_dentry,
61718 + const struct vfsmount *parent_mnt,
61719 + const int mode);
61720 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61721 + const struct dentry *parent_dentry,
61722 + const struct vfsmount *parent_mnt);
61723 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61724 + const struct vfsmount *mnt);
61725 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61726 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61727 + const struct vfsmount *mnt);
61728 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61729 + const struct dentry *parent_dentry,
61730 + const struct vfsmount *parent_mnt,
61731 + const char *from);
61732 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61733 + const struct dentry *parent_dentry,
61734 + const struct vfsmount *parent_mnt,
61735 + const struct dentry *old_dentry,
61736 + const struct vfsmount *old_mnt, const char *to);
61737 +int gr_acl_handle_rename(struct dentry *new_dentry,
61738 + struct dentry *parent_dentry,
61739 + const struct vfsmount *parent_mnt,
61740 + struct dentry *old_dentry,
61741 + struct inode *old_parent_inode,
61742 + struct vfsmount *old_mnt, const char *newname);
61743 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61744 + struct dentry *old_dentry,
61745 + struct dentry *new_dentry,
61746 + struct vfsmount *mnt, const __u8 replace);
61747 +__u32 gr_check_link(const struct dentry *new_dentry,
61748 + const struct dentry *parent_dentry,
61749 + const struct vfsmount *parent_mnt,
61750 + const struct dentry *old_dentry,
61751 + const struct vfsmount *old_mnt);
61752 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61753 + const unsigned int namelen, const ino_t ino);
61754 +
61755 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61756 + const struct vfsmount *mnt);
61757 +void gr_acl_handle_exit(void);
61758 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61759 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61760 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61761 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61762 +void gr_audit_ptrace(struct task_struct *task);
61763 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61764 +
61765 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61766 +
61767 +#ifdef CONFIG_GRKERNSEC
61768 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61769 +void gr_handle_vm86(void);
61770 +void gr_handle_mem_readwrite(u64 from, u64 to);
61771 +
61772 +void gr_log_badprocpid(const char *entry);
61773 +
61774 +extern int grsec_enable_dmesg;
61775 +extern int grsec_disable_privio;
61776 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61777 +extern int grsec_enable_chroot_findtask;
61778 +#endif
61779 +#ifdef CONFIG_GRKERNSEC_SETXID
61780 +extern int grsec_enable_setxid;
61781 +#endif
61782 +#endif
61783 +
61784 +#endif
61785 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61786 new file mode 100644
61787 index 0000000..e7ffaaf
61788 --- /dev/null
61789 +++ b/include/linux/grsock.h
61790 @@ -0,0 +1,19 @@
61791 +#ifndef __GRSOCK_H
61792 +#define __GRSOCK_H
61793 +
61794 +extern void gr_attach_curr_ip(const struct sock *sk);
61795 +extern int gr_handle_sock_all(const int family, const int type,
61796 + const int protocol);
61797 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61798 +extern int gr_handle_sock_server_other(const struct sock *sck);
61799 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61800 +extern int gr_search_connect(struct socket * sock,
61801 + struct sockaddr_in * addr);
61802 +extern int gr_search_bind(struct socket * sock,
61803 + struct sockaddr_in * addr);
61804 +extern int gr_search_listen(struct socket * sock);
61805 +extern int gr_search_accept(struct socket * sock);
61806 +extern int gr_search_socket(const int domain, const int type,
61807 + const int protocol);
61808 +
61809 +#endif
61810 diff --git a/include/linux/hid.h b/include/linux/hid.h
61811 index 3a95da6..51986f1 100644
61812 --- a/include/linux/hid.h
61813 +++ b/include/linux/hid.h
61814 @@ -696,7 +696,7 @@ struct hid_ll_driver {
61815 unsigned int code, int value);
61816
61817 int (*parse)(struct hid_device *hdev);
61818 -};
61819 +} __no_const;
61820
61821 #define PM_HINT_FULLON 1<<5
61822 #define PM_HINT_NORMAL 1<<1
61823 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61824 index 3a93f73..b19d0b3 100644
61825 --- a/include/linux/highmem.h
61826 +++ b/include/linux/highmem.h
61827 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
61828 kunmap_atomic(kaddr, KM_USER0);
61829 }
61830
61831 +static inline void sanitize_highpage(struct page *page)
61832 +{
61833 + void *kaddr;
61834 + unsigned long flags;
61835 +
61836 + local_irq_save(flags);
61837 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
61838 + clear_page(kaddr);
61839 + kunmap_atomic(kaddr, KM_CLEARPAGE);
61840 + local_irq_restore(flags);
61841 +}
61842 +
61843 static inline void zero_user_segments(struct page *page,
61844 unsigned start1, unsigned end1,
61845 unsigned start2, unsigned end2)
61846 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61847 index 8e25a91..551b161 100644
61848 --- a/include/linux/i2c.h
61849 +++ b/include/linux/i2c.h
61850 @@ -364,6 +364,7 @@ struct i2c_algorithm {
61851 /* To determine what the adapter supports */
61852 u32 (*functionality) (struct i2c_adapter *);
61853 };
61854 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61855
61856 /*
61857 * i2c_adapter is the structure used to identify a physical i2c bus along
61858 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61859 index a6deef4..c56a7f2 100644
61860 --- a/include/linux/i2o.h
61861 +++ b/include/linux/i2o.h
61862 @@ -564,7 +564,7 @@ struct i2o_controller {
61863 struct i2o_device *exec; /* Executive */
61864 #if BITS_PER_LONG == 64
61865 spinlock_t context_list_lock; /* lock for context_list */
61866 - atomic_t context_list_counter; /* needed for unique contexts */
61867 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61868 struct list_head context_list; /* list of context id's
61869 and pointers */
61870 #endif
61871 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61872 index 58404b0..439ed95 100644
61873 --- a/include/linux/if_team.h
61874 +++ b/include/linux/if_team.h
61875 @@ -64,6 +64,7 @@ struct team_mode_ops {
61876 void (*port_leave)(struct team *team, struct team_port *port);
61877 void (*port_change_mac)(struct team *team, struct team_port *port);
61878 };
61879 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61880
61881 enum team_option_type {
61882 TEAM_OPTION_TYPE_U32,
61883 @@ -112,7 +113,7 @@ struct team {
61884 struct list_head option_list;
61885
61886 const struct team_mode *mode;
61887 - struct team_mode_ops ops;
61888 + team_mode_ops_no_const ops;
61889 long mode_priv[TEAM_MODE_PRIV_LONGS];
61890 };
61891
61892 diff --git a/include/linux/init.h b/include/linux/init.h
61893 index 6b95109..4aca62c 100644
61894 --- a/include/linux/init.h
61895 +++ b/include/linux/init.h
61896 @@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
61897
61898 /* Each module must use one module_init(). */
61899 #define module_init(initfn) \
61900 - static inline initcall_t __inittest(void) \
61901 + static inline __used initcall_t __inittest(void) \
61902 { return initfn; } \
61903 int init_module(void) __attribute__((alias(#initfn)));
61904
61905 /* This is only required if you want to be unloadable. */
61906 #define module_exit(exitfn) \
61907 - static inline exitcall_t __exittest(void) \
61908 + static inline __used exitcall_t __exittest(void) \
61909 { return exitfn; } \
61910 void cleanup_module(void) __attribute__((alias(#exitfn)));
61911
61912 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61913 index 9c66b1a..a3fdded 100644
61914 --- a/include/linux/init_task.h
61915 +++ b/include/linux/init_task.h
61916 @@ -127,6 +127,12 @@ extern struct cred init_cred;
61917
61918 #define INIT_TASK_COMM "swapper"
61919
61920 +#ifdef CONFIG_X86
61921 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61922 +#else
61923 +#define INIT_TASK_THREAD_INFO
61924 +#endif
61925 +
61926 /*
61927 * INIT_TASK is used to set up the first task table, touch at
61928 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61929 @@ -165,6 +171,7 @@ extern struct cred init_cred;
61930 RCU_INIT_POINTER(.cred, &init_cred), \
61931 .comm = INIT_TASK_COMM, \
61932 .thread = INIT_THREAD, \
61933 + INIT_TASK_THREAD_INFO \
61934 .fs = &init_fs, \
61935 .files = &init_files, \
61936 .signal = &init_signals, \
61937 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61938 index e6ca56d..8583707 100644
61939 --- a/include/linux/intel-iommu.h
61940 +++ b/include/linux/intel-iommu.h
61941 @@ -296,7 +296,7 @@ struct iommu_flush {
61942 u8 fm, u64 type);
61943 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61944 unsigned int size_order, u64 type);
61945 -};
61946 +} __no_const;
61947
61948 enum {
61949 SR_DMAR_FECTL_REG,
61950 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61951 index a64b00e..464d8bc 100644
61952 --- a/include/linux/interrupt.h
61953 +++ b/include/linux/interrupt.h
61954 @@ -441,7 +441,7 @@ enum
61955 /* map softirq index to softirq name. update 'softirq_to_name' in
61956 * kernel/softirq.c when adding a new softirq.
61957 */
61958 -extern char *softirq_to_name[NR_SOFTIRQS];
61959 +extern const char * const softirq_to_name[NR_SOFTIRQS];
61960
61961 /* softirq mask and active fields moved to irq_cpustat_t in
61962 * asm/hardirq.h to get better cache usage. KAO
61963 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61964
61965 struct softirq_action
61966 {
61967 - void (*action)(struct softirq_action *);
61968 + void (*action)(void);
61969 };
61970
61971 asmlinkage void do_softirq(void);
61972 asmlinkage void __do_softirq(void);
61973 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61974 +extern void open_softirq(int nr, void (*action)(void));
61975 extern void softirq_init(void);
61976 static inline void __raise_softirq_irqoff(unsigned int nr)
61977 {
61978 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61979 index 3875719..4cd454c 100644
61980 --- a/include/linux/kallsyms.h
61981 +++ b/include/linux/kallsyms.h
61982 @@ -15,7 +15,8 @@
61983
61984 struct module;
61985
61986 -#ifdef CONFIG_KALLSYMS
61987 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61988 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61989 /* Lookup the address for a symbol. Returns 0 if not found. */
61990 unsigned long kallsyms_lookup_name(const char *name);
61991
61992 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61993 /* Stupid that this does nothing, but I didn't create this mess. */
61994 #define __print_symbol(fmt, addr)
61995 #endif /*CONFIG_KALLSYMS*/
61996 +#else /* when included by kallsyms.c, vsnprintf.c, or
61997 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61998 +extern void __print_symbol(const char *fmt, unsigned long address);
61999 +extern int sprint_backtrace(char *buffer, unsigned long address);
62000 +extern int sprint_symbol(char *buffer, unsigned long address);
62001 +const char *kallsyms_lookup(unsigned long addr,
62002 + unsigned long *symbolsize,
62003 + unsigned long *offset,
62004 + char **modname, char *namebuf);
62005 +#endif
62006
62007 /* This macro allows us to keep printk typechecking */
62008 static __printf(1, 2)
62009 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62010 index c4d2fc1..5df9c19 100644
62011 --- a/include/linux/kgdb.h
62012 +++ b/include/linux/kgdb.h
62013 @@ -53,7 +53,7 @@ extern int kgdb_connected;
62014 extern int kgdb_io_module_registered;
62015
62016 extern atomic_t kgdb_setting_breakpoint;
62017 -extern atomic_t kgdb_cpu_doing_single_step;
62018 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62019
62020 extern struct task_struct *kgdb_usethread;
62021 extern struct task_struct *kgdb_contthread;
62022 @@ -252,7 +252,7 @@ struct kgdb_arch {
62023 void (*disable_hw_break)(struct pt_regs *regs);
62024 void (*remove_all_hw_break)(void);
62025 void (*correct_hw_break)(void);
62026 -};
62027 +} __do_const;
62028
62029 /**
62030 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62031 @@ -277,7 +277,7 @@ struct kgdb_io {
62032 void (*pre_exception) (void);
62033 void (*post_exception) (void);
62034 int is_console;
62035 -};
62036 +} __do_const;
62037
62038 extern struct kgdb_arch arch_kgdb_ops;
62039
62040 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62041 index 0fb48ef..1b680b2 100644
62042 --- a/include/linux/kmod.h
62043 +++ b/include/linux/kmod.h
62044 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62045 * usually useless though. */
62046 extern __printf(2, 3)
62047 int __request_module(bool wait, const char *name, ...);
62048 +extern __printf(3, 4)
62049 +int ___request_module(bool wait, char *param_name, const char *name, ...);
62050 #define request_module(mod...) __request_module(true, mod)
62051 #define request_module_nowait(mod...) __request_module(false, mod)
62052 #define try_then_request_module(x, mod...) \
62053 diff --git a/include/linux/kref.h b/include/linux/kref.h
62054 index 9c07dce..a92fa71 100644
62055 --- a/include/linux/kref.h
62056 +++ b/include/linux/kref.h
62057 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
62058 static inline int kref_sub(struct kref *kref, unsigned int count,
62059 void (*release)(struct kref *kref))
62060 {
62061 - WARN_ON(release == NULL);
62062 + BUG_ON(release == NULL);
62063
62064 if (atomic_sub_and_test((int) count, &kref->refcount)) {
62065 release(kref);
62066 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62067 index 4c4e83d..695674f 100644
62068 --- a/include/linux/kvm_host.h
62069 +++ b/include/linux/kvm_host.h
62070 @@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62071 void vcpu_load(struct kvm_vcpu *vcpu);
62072 void vcpu_put(struct kvm_vcpu *vcpu);
62073
62074 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62075 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62076 struct module *module);
62077 void kvm_exit(void);
62078
62079 @@ -485,7 +485,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
62080 struct kvm_guest_debug *dbg);
62081 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62082
62083 -int kvm_arch_init(void *opaque);
62084 +int kvm_arch_init(const void *opaque);
62085 void kvm_arch_exit(void);
62086
62087 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62088 diff --git a/include/linux/libata.h b/include/linux/libata.h
62089 index cafc09a..d7e7829 100644
62090 --- a/include/linux/libata.h
62091 +++ b/include/linux/libata.h
62092 @@ -909,7 +909,7 @@ struct ata_port_operations {
62093 * fields must be pointers.
62094 */
62095 const struct ata_port_operations *inherits;
62096 -};
62097 +} __do_const;
62098
62099 struct ata_port_info {
62100 unsigned long flags;
62101 diff --git a/include/linux/mca.h b/include/linux/mca.h
62102 index 3797270..7765ede 100644
62103 --- a/include/linux/mca.h
62104 +++ b/include/linux/mca.h
62105 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62106 int region);
62107 void * (*mca_transform_memory)(struct mca_device *,
62108 void *memory);
62109 -};
62110 +} __no_const;
62111
62112 struct mca_bus {
62113 u64 default_dma_mask;
62114 diff --git a/include/linux/memory.h b/include/linux/memory.h
62115 index 1ac7f6e..a5794d0 100644
62116 --- a/include/linux/memory.h
62117 +++ b/include/linux/memory.h
62118 @@ -143,7 +143,7 @@ struct memory_accessor {
62119 size_t count);
62120 ssize_t (*write)(struct memory_accessor *, const char *buf,
62121 off_t offset, size_t count);
62122 -};
62123 +} __no_const;
62124
62125 /*
62126 * Kernel text modification mutex, used for code patching. Users of this lock
62127 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62128 index 9970337..9444122 100644
62129 --- a/include/linux/mfd/abx500.h
62130 +++ b/include/linux/mfd/abx500.h
62131 @@ -188,6 +188,7 @@ struct abx500_ops {
62132 int (*event_registers_startup_state_get) (struct device *, u8 *);
62133 int (*startup_irq_enabled) (struct device *, unsigned int);
62134 };
62135 +typedef struct abx500_ops __no_const abx500_ops_no_const;
62136
62137 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62138 void abx500_remove_ops(struct device *dev);
62139 diff --git a/include/linux/mm.h b/include/linux/mm.h
62140 index 17b27cd..baea141 100644
62141 --- a/include/linux/mm.h
62142 +++ b/include/linux/mm.h
62143 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
62144
62145 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62146 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62147 +
62148 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62149 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62150 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62151 +#else
62152 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62153 +#endif
62154 +
62155 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62156 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62157
62158 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
62159 int set_page_dirty_lock(struct page *page);
62160 int clear_page_dirty_for_io(struct page *page);
62161
62162 -/* Is the vma a continuation of the stack vma above it? */
62163 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62164 -{
62165 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62166 -}
62167 -
62168 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
62169 - unsigned long addr)
62170 -{
62171 - return (vma->vm_flags & VM_GROWSDOWN) &&
62172 - (vma->vm_start == addr) &&
62173 - !vma_growsdown(vma->vm_prev, addr);
62174 -}
62175 -
62176 -/* Is the vma a continuation of the stack vma below it? */
62177 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62178 -{
62179 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62180 -}
62181 -
62182 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
62183 - unsigned long addr)
62184 -{
62185 - return (vma->vm_flags & VM_GROWSUP) &&
62186 - (vma->vm_end == addr) &&
62187 - !vma_growsup(vma->vm_next, addr);
62188 -}
62189 -
62190 extern unsigned long move_page_tables(struct vm_area_struct *vma,
62191 unsigned long old_addr, struct vm_area_struct *new_vma,
62192 unsigned long new_addr, unsigned long len);
62193 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
62194 }
62195 #endif
62196
62197 +#ifdef CONFIG_MMU
62198 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
62199 +#else
62200 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
62201 +{
62202 + return __pgprot(0);
62203 +}
62204 +#endif
62205 +
62206 int vma_wants_writenotify(struct vm_area_struct *vma);
62207
62208 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
62209 @@ -1152,8 +1140,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
62210 {
62211 return 0;
62212 }
62213 +
62214 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
62215 + unsigned long address)
62216 +{
62217 + return 0;
62218 +}
62219 #else
62220 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62221 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62222 #endif
62223
62224 #ifdef __PAGETABLE_PMD_FOLDED
62225 @@ -1162,8 +1157,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
62226 {
62227 return 0;
62228 }
62229 +
62230 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
62231 + unsigned long address)
62232 +{
62233 + return 0;
62234 +}
62235 #else
62236 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
62237 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
62238 #endif
62239
62240 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
62241 @@ -1181,11 +1183,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
62242 NULL: pud_offset(pgd, address);
62243 }
62244
62245 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
62246 +{
62247 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
62248 + NULL: pud_offset(pgd, address);
62249 +}
62250 +
62251 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
62252 {
62253 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
62254 NULL: pmd_offset(pud, address);
62255 }
62256 +
62257 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
62258 +{
62259 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
62260 + NULL: pmd_offset(pud, address);
62261 +}
62262 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
62263
62264 #if USE_SPLIT_PTLOCKS
62265 @@ -1409,6 +1423,7 @@ out:
62266 }
62267
62268 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62269 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62270
62271 extern unsigned long do_brk(unsigned long, unsigned long);
62272
62273 @@ -1466,6 +1481,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62274 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62275 struct vm_area_struct **pprev);
62276
62277 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62278 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62279 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62280 +
62281 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62282 NULL if none. Assume start_addr < end_addr. */
62283 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62284 @@ -1494,15 +1513,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
62285 return vma;
62286 }
62287
62288 -#ifdef CONFIG_MMU
62289 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
62290 -#else
62291 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62292 -{
62293 - return __pgprot(0);
62294 -}
62295 -#endif
62296 -
62297 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62298 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62299 unsigned long pfn, unsigned long size, pgprot_t);
62300 @@ -1606,7 +1616,7 @@ extern int unpoison_memory(unsigned long pfn);
62301 extern int sysctl_memory_failure_early_kill;
62302 extern int sysctl_memory_failure_recovery;
62303 extern void shake_page(struct page *p, int access);
62304 -extern atomic_long_t mce_bad_pages;
62305 +extern atomic_long_unchecked_t mce_bad_pages;
62306 extern int soft_offline_page(struct page *page, int flags);
62307
62308 extern void dump_page(struct page *page);
62309 @@ -1637,5 +1647,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
62310 static inline bool page_is_guard(struct page *page) { return false; }
62311 #endif /* CONFIG_DEBUG_PAGEALLOC */
62312
62313 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62314 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62315 +#else
62316 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62317 +#endif
62318 +
62319 #endif /* __KERNEL__ */
62320 #endif /* _LINUX_MM_H */
62321 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62322 index 3cc3062..efeaeb7 100644
62323 --- a/include/linux/mm_types.h
62324 +++ b/include/linux/mm_types.h
62325 @@ -252,6 +252,8 @@ struct vm_area_struct {
62326 #ifdef CONFIG_NUMA
62327 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62328 #endif
62329 +
62330 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62331 };
62332
62333 struct core_thread {
62334 @@ -326,7 +328,7 @@ struct mm_struct {
62335 unsigned long def_flags;
62336 unsigned long nr_ptes; /* Page table pages */
62337 unsigned long start_code, end_code, start_data, end_data;
62338 - unsigned long start_brk, brk, start_stack;
62339 + unsigned long brk_gap, start_brk, brk, start_stack;
62340 unsigned long arg_start, arg_end, env_start, env_end;
62341
62342 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
62343 @@ -388,6 +390,24 @@ struct mm_struct {
62344 #ifdef CONFIG_CPUMASK_OFFSTACK
62345 struct cpumask cpumask_allocation;
62346 #endif
62347 +
62348 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62349 + unsigned long pax_flags;
62350 +#endif
62351 +
62352 +#ifdef CONFIG_PAX_DLRESOLVE
62353 + unsigned long call_dl_resolve;
62354 +#endif
62355 +
62356 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62357 + unsigned long call_syscall;
62358 +#endif
62359 +
62360 +#ifdef CONFIG_PAX_ASLR
62361 + unsigned long delta_mmap; /* randomized offset */
62362 + unsigned long delta_stack; /* randomized offset */
62363 +#endif
62364 +
62365 };
62366
62367 static inline void mm_init_cpumask(struct mm_struct *mm)
62368 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62369 index 1d1b1e1..2a13c78 100644
62370 --- a/include/linux/mmu_notifier.h
62371 +++ b/include/linux/mmu_notifier.h
62372 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62373 */
62374 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62375 ({ \
62376 - pte_t __pte; \
62377 + pte_t ___pte; \
62378 struct vm_area_struct *___vma = __vma; \
62379 unsigned long ___address = __address; \
62380 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62381 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62382 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62383 - __pte; \
62384 + ___pte; \
62385 })
62386
62387 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62388 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62389 index 650ba2f..af0a58c 100644
62390 --- a/include/linux/mmzone.h
62391 +++ b/include/linux/mmzone.h
62392 @@ -379,7 +379,7 @@ struct zone {
62393 unsigned long flags; /* zone flags, see below */
62394
62395 /* Zone statistics */
62396 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62397 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62398
62399 /*
62400 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62401 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62402 index 83ac071..2656e0e 100644
62403 --- a/include/linux/mod_devicetable.h
62404 +++ b/include/linux/mod_devicetable.h
62405 @@ -12,7 +12,7 @@
62406 typedef unsigned long kernel_ulong_t;
62407 #endif
62408
62409 -#define PCI_ANY_ID (~0)
62410 +#define PCI_ANY_ID ((__u16)~0)
62411
62412 struct pci_device_id {
62413 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62414 @@ -131,7 +131,7 @@ struct usb_device_id {
62415 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62416 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62417
62418 -#define HID_ANY_ID (~0)
62419 +#define HID_ANY_ID (~0U)
62420
62421 struct hid_device_id {
62422 __u16 bus;
62423 diff --git a/include/linux/module.h b/include/linux/module.h
62424 index 4598bf0..e069d7f 100644
62425 --- a/include/linux/module.h
62426 +++ b/include/linux/module.h
62427 @@ -17,6 +17,7 @@
62428 #include <linux/moduleparam.h>
62429 #include <linux/tracepoint.h>
62430 #include <linux/export.h>
62431 +#include <linux/fs.h>
62432
62433 #include <linux/percpu.h>
62434 #include <asm/module.h>
62435 @@ -275,19 +276,16 @@ struct module
62436 int (*init)(void);
62437
62438 /* If this is non-NULL, vfree after init() returns */
62439 - void *module_init;
62440 + void *module_init_rx, *module_init_rw;
62441
62442 /* Here is the actual code + data, vfree'd on unload. */
62443 - void *module_core;
62444 + void *module_core_rx, *module_core_rw;
62445
62446 /* Here are the sizes of the init and core sections */
62447 - unsigned int init_size, core_size;
62448 + unsigned int init_size_rw, core_size_rw;
62449
62450 /* The size of the executable code in each section. */
62451 - unsigned int init_text_size, core_text_size;
62452 -
62453 - /* Size of RO sections of the module (text+rodata) */
62454 - unsigned int init_ro_size, core_ro_size;
62455 + unsigned int init_size_rx, core_size_rx;
62456
62457 /* Arch-specific module values */
62458 struct mod_arch_specific arch;
62459 @@ -343,6 +341,10 @@ struct module
62460 #ifdef CONFIG_EVENT_TRACING
62461 struct ftrace_event_call **trace_events;
62462 unsigned int num_trace_events;
62463 + struct file_operations trace_id;
62464 + struct file_operations trace_enable;
62465 + struct file_operations trace_format;
62466 + struct file_operations trace_filter;
62467 #endif
62468 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62469 unsigned int num_ftrace_callsites;
62470 @@ -390,16 +392,46 @@ bool is_module_address(unsigned long addr);
62471 bool is_module_percpu_address(unsigned long addr);
62472 bool is_module_text_address(unsigned long addr);
62473
62474 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62475 +{
62476 +
62477 +#ifdef CONFIG_PAX_KERNEXEC
62478 + if (ktla_ktva(addr) >= (unsigned long)start &&
62479 + ktla_ktva(addr) < (unsigned long)start + size)
62480 + return 1;
62481 +#endif
62482 +
62483 + return ((void *)addr >= start && (void *)addr < start + size);
62484 +}
62485 +
62486 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62487 +{
62488 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62489 +}
62490 +
62491 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62492 +{
62493 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62494 +}
62495 +
62496 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62497 +{
62498 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62499 +}
62500 +
62501 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62502 +{
62503 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62504 +}
62505 +
62506 static inline int within_module_core(unsigned long addr, struct module *mod)
62507 {
62508 - return (unsigned long)mod->module_core <= addr &&
62509 - addr < (unsigned long)mod->module_core + mod->core_size;
62510 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62511 }
62512
62513 static inline int within_module_init(unsigned long addr, struct module *mod)
62514 {
62515 - return (unsigned long)mod->module_init <= addr &&
62516 - addr < (unsigned long)mod->module_init + mod->init_size;
62517 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62518 }
62519
62520 /* Search for module by name: must hold module_mutex. */
62521 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62522 index b2be02e..0a61daa 100644
62523 --- a/include/linux/moduleloader.h
62524 +++ b/include/linux/moduleloader.h
62525 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62526 sections. Returns NULL on failure. */
62527 void *module_alloc(unsigned long size);
62528
62529 +#ifdef CONFIG_PAX_KERNEXEC
62530 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
62531 +#else
62532 +#define module_alloc_exec(x) module_alloc(x)
62533 +#endif
62534 +
62535 /* Free memory returned from module_alloc. */
62536 void module_free(struct module *mod, void *module_region);
62537
62538 +#ifdef CONFIG_PAX_KERNEXEC
62539 +void module_free_exec(struct module *mod, void *module_region);
62540 +#else
62541 +#define module_free_exec(x, y) module_free((x), (y))
62542 +#endif
62543 +
62544 /* Apply the given relocation to the (simplified) ELF. Return -error
62545 or 0. */
62546 int apply_relocate(Elf_Shdr *sechdrs,
62547 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62548 index c47f4d6..23f9bdb 100644
62549 --- a/include/linux/moduleparam.h
62550 +++ b/include/linux/moduleparam.h
62551 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
62552 * @len is usually just sizeof(string).
62553 */
62554 #define module_param_string(name, string, len, perm) \
62555 - static const struct kparam_string __param_string_##name \
62556 + static const struct kparam_string __param_string_##name __used \
62557 = { len, string }; \
62558 __module_param_call(MODULE_PARAM_PREFIX, name, \
62559 &param_ops_string, \
62560 @@ -396,7 +396,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
62561 */
62562 #define module_param_array_named(name, array, type, nump, perm) \
62563 param_check_##type(name, &(array)[0]); \
62564 - static const struct kparam_array __param_arr_##name \
62565 + static const struct kparam_array __param_arr_##name __used \
62566 = { .max = ARRAY_SIZE(array), .num = nump, \
62567 .ops = &param_ops_##type, \
62568 .elemsize = sizeof(array[0]), .elem = array }; \
62569 diff --git a/include/linux/namei.h b/include/linux/namei.h
62570 index ffc0213..2c1f2cb 100644
62571 --- a/include/linux/namei.h
62572 +++ b/include/linux/namei.h
62573 @@ -24,7 +24,7 @@ struct nameidata {
62574 unsigned seq;
62575 int last_type;
62576 unsigned depth;
62577 - char *saved_names[MAX_NESTED_LINKS + 1];
62578 + const char *saved_names[MAX_NESTED_LINKS + 1];
62579
62580 /* Intent data */
62581 union {
62582 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62583 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62584 extern void unlock_rename(struct dentry *, struct dentry *);
62585
62586 -static inline void nd_set_link(struct nameidata *nd, char *path)
62587 +static inline void nd_set_link(struct nameidata *nd, const char *path)
62588 {
62589 nd->saved_names[nd->depth] = path;
62590 }
62591
62592 -static inline char *nd_get_link(struct nameidata *nd)
62593 +static inline const char *nd_get_link(const struct nameidata *nd)
62594 {
62595 return nd->saved_names[nd->depth];
62596 }
62597 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62598 index 7e472b7..212d381 100644
62599 --- a/include/linux/netdevice.h
62600 +++ b/include/linux/netdevice.h
62601 @@ -1002,6 +1002,7 @@ struct net_device_ops {
62602 int (*ndo_neigh_construct)(struct neighbour *n);
62603 void (*ndo_neigh_destroy)(struct neighbour *n);
62604 };
62605 +typedef struct net_device_ops __no_const net_device_ops_no_const;
62606
62607 /*
62608 * The DEVICE structure.
62609 @@ -1063,7 +1064,7 @@ struct net_device {
62610 int iflink;
62611
62612 struct net_device_stats stats;
62613 - atomic_long_t rx_dropped; /* dropped packets by core network
62614 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62615 * Do not use this in drivers.
62616 */
62617
62618 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62619 new file mode 100644
62620 index 0000000..33f4af8
62621 --- /dev/null
62622 +++ b/include/linux/netfilter/xt_gradm.h
62623 @@ -0,0 +1,9 @@
62624 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
62625 +#define _LINUX_NETFILTER_XT_GRADM_H 1
62626 +
62627 +struct xt_gradm_mtinfo {
62628 + __u16 flags;
62629 + __u16 invflags;
62630 +};
62631 +
62632 +#endif
62633 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62634 index c65a18a..0c05f3a 100644
62635 --- a/include/linux/of_pdt.h
62636 +++ b/include/linux/of_pdt.h
62637 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62638
62639 /* return 0 on success; fill in 'len' with number of bytes in path */
62640 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62641 -};
62642 +} __no_const;
62643
62644 extern void *prom_early_alloc(unsigned long size);
62645
62646 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62647 index a4c5624..79d6d88 100644
62648 --- a/include/linux/oprofile.h
62649 +++ b/include/linux/oprofile.h
62650 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62651 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62652 char const * name, ulong * val);
62653
62654 -/** Create a file for read-only access to an atomic_t. */
62655 +/** Create a file for read-only access to an atomic_unchecked_t. */
62656 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62657 - char const * name, atomic_t * val);
62658 + char const * name, atomic_unchecked_t * val);
62659
62660 /** create a directory */
62661 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62662 diff --git a/include/linux/padata.h b/include/linux/padata.h
62663 index 4633b2f..988bc08 100644
62664 --- a/include/linux/padata.h
62665 +++ b/include/linux/padata.h
62666 @@ -129,7 +129,7 @@ struct parallel_data {
62667 struct padata_instance *pinst;
62668 struct padata_parallel_queue __percpu *pqueue;
62669 struct padata_serial_queue __percpu *squeue;
62670 - atomic_t seq_nr;
62671 + atomic_unchecked_t seq_nr;
62672 atomic_t reorder_objects;
62673 atomic_t refcnt;
62674 unsigned int max_seq_nr;
62675 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62676 index abb2776..d8b8e15 100644
62677 --- a/include/linux/perf_event.h
62678 +++ b/include/linux/perf_event.h
62679 @@ -750,8 +750,8 @@ struct perf_event {
62680
62681 enum perf_event_active_state state;
62682 unsigned int attach_state;
62683 - local64_t count;
62684 - atomic64_t child_count;
62685 + local64_t count; /* PaX: fix it one day */
62686 + atomic64_unchecked_t child_count;
62687
62688 /*
62689 * These are the total time in nanoseconds that the event
62690 @@ -802,8 +802,8 @@ struct perf_event {
62691 * These accumulate total time (in nanoseconds) that children
62692 * events have been enabled and running, respectively.
62693 */
62694 - atomic64_t child_total_time_enabled;
62695 - atomic64_t child_total_time_running;
62696 + atomic64_unchecked_t child_total_time_enabled;
62697 + atomic64_unchecked_t child_total_time_running;
62698
62699 /*
62700 * Protect attach/detach and child_list:
62701 diff --git a/include/linux/personality.h b/include/linux/personality.h
62702 index 8fc7dd1a..c19d89e 100644
62703 --- a/include/linux/personality.h
62704 +++ b/include/linux/personality.h
62705 @@ -44,6 +44,7 @@ enum {
62706 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62707 ADDR_NO_RANDOMIZE | \
62708 ADDR_COMPAT_LAYOUT | \
62709 + ADDR_LIMIT_3GB | \
62710 MMAP_PAGE_ZERO)
62711
62712 /*
62713 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62714 index 0072a53..c5dcca5 100644
62715 --- a/include/linux/pipe_fs_i.h
62716 +++ b/include/linux/pipe_fs_i.h
62717 @@ -47,9 +47,9 @@ struct pipe_buffer {
62718 struct pipe_inode_info {
62719 wait_queue_head_t wait;
62720 unsigned int nrbufs, curbuf, buffers;
62721 - unsigned int readers;
62722 - unsigned int writers;
62723 - unsigned int waiting_writers;
62724 + atomic_t readers;
62725 + atomic_t writers;
62726 + atomic_t waiting_writers;
62727 unsigned int r_counter;
62728 unsigned int w_counter;
62729 struct page *tmp_page;
62730 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62731 index 609daae..5392427 100644
62732 --- a/include/linux/pm_runtime.h
62733 +++ b/include/linux/pm_runtime.h
62734 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62735
62736 static inline void pm_runtime_mark_last_busy(struct device *dev)
62737 {
62738 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62739 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62740 }
62741
62742 #else /* !CONFIG_PM_RUNTIME */
62743 diff --git a/include/linux/poison.h b/include/linux/poison.h
62744 index 2110a81..13a11bb 100644
62745 --- a/include/linux/poison.h
62746 +++ b/include/linux/poison.h
62747 @@ -19,8 +19,8 @@
62748 * under normal circumstances, used to verify that nobody uses
62749 * non-initialized list entries.
62750 */
62751 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62752 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62753 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62754 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62755
62756 /********** include/linux/timer.h **********/
62757 /*
62758 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62759 index 58969b2..ead129b 100644
62760 --- a/include/linux/preempt.h
62761 +++ b/include/linux/preempt.h
62762 @@ -123,7 +123,7 @@ struct preempt_ops {
62763 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62764 void (*sched_out)(struct preempt_notifier *notifier,
62765 struct task_struct *next);
62766 -};
62767 +} __no_const;
62768
62769 /**
62770 * preempt_notifier - key for installing preemption notifiers
62771 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62772 index 85c5073..51fac8b 100644
62773 --- a/include/linux/proc_fs.h
62774 +++ b/include/linux/proc_fs.h
62775 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62776 return proc_create_data(name, mode, parent, proc_fops, NULL);
62777 }
62778
62779 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62780 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62781 +{
62782 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62783 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62784 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62785 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62786 +#else
62787 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62788 +#endif
62789 +}
62790 +
62791 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62792 umode_t mode, struct proc_dir_entry *base,
62793 read_proc_t *read_proc, void * data)
62794 @@ -258,7 +270,7 @@ union proc_op {
62795 int (*proc_show)(struct seq_file *m,
62796 struct pid_namespace *ns, struct pid *pid,
62797 struct task_struct *task);
62798 -};
62799 +} __no_const;
62800
62801 struct ctl_table_header;
62802 struct ctl_table;
62803 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
62804 index c2f1f6a..6fdb196 100644
62805 --- a/include/linux/ptrace.h
62806 +++ b/include/linux/ptrace.h
62807 @@ -199,9 +199,10 @@ static inline void ptrace_event(int event, unsigned long message)
62808 if (unlikely(ptrace_event_enabled(current, event))) {
62809 current->ptrace_message = message;
62810 ptrace_notify((event << 8) | SIGTRAP);
62811 - } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
62812 + } else if (event == PTRACE_EVENT_EXEC) {
62813 /* legacy EXEC report via SIGTRAP */
62814 - send_sig(SIGTRAP, current, 0);
62815 + if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
62816 + send_sig(SIGTRAP, current, 0);
62817 }
62818 }
62819
62820 diff --git a/include/linux/random.h b/include/linux/random.h
62821 index 8f74538..02a1012 100644
62822 --- a/include/linux/random.h
62823 +++ b/include/linux/random.h
62824 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
62825
62826 u32 prandom32(struct rnd_state *);
62827
62828 +static inline unsigned long pax_get_random_long(void)
62829 +{
62830 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62831 +}
62832 +
62833 /*
62834 * Handle minimum values for seeds
62835 */
62836 static inline u32 __seed(u32 x, u32 m)
62837 {
62838 - return (x < m) ? x + m : x;
62839 + return (x <= m) ? x + m + 1 : x;
62840 }
62841
62842 /**
62843 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62844 index e0879a7..a12f962 100644
62845 --- a/include/linux/reboot.h
62846 +++ b/include/linux/reboot.h
62847 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62848 * Architecture-specific implementations of sys_reboot commands.
62849 */
62850
62851 -extern void machine_restart(char *cmd);
62852 -extern void machine_halt(void);
62853 -extern void machine_power_off(void);
62854 +extern void machine_restart(char *cmd) __noreturn;
62855 +extern void machine_halt(void) __noreturn;
62856 +extern void machine_power_off(void) __noreturn;
62857
62858 extern void machine_shutdown(void);
62859 struct pt_regs;
62860 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62861 */
62862
62863 extern void kernel_restart_prepare(char *cmd);
62864 -extern void kernel_restart(char *cmd);
62865 -extern void kernel_halt(void);
62866 -extern void kernel_power_off(void);
62867 +extern void kernel_restart(char *cmd) __noreturn;
62868 +extern void kernel_halt(void) __noreturn;
62869 +extern void kernel_power_off(void) __noreturn;
62870
62871 extern int C_A_D; /* for sysctl */
62872 void ctrl_alt_del(void);
62873 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62874 * Emergency restart, callable from an interrupt handler.
62875 */
62876
62877 -extern void emergency_restart(void);
62878 +extern void emergency_restart(void) __noreturn;
62879 #include <asm/emergency-restart.h>
62880
62881 #endif
62882 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
62883 index 2213ddc..650212a 100644
62884 --- a/include/linux/reiserfs_fs.h
62885 +++ b/include/linux/reiserfs_fs.h
62886 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
62887 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
62888
62889 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
62890 -#define get_generation(s) atomic_read (&fs_generation(s))
62891 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
62892 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
62893 #define __fs_changed(gen,s) (gen != get_generation (s))
62894 #define fs_changed(gen,s) \
62895 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
62896 index 8c9e85c..1698e9a 100644
62897 --- a/include/linux/reiserfs_fs_sb.h
62898 +++ b/include/linux/reiserfs_fs_sb.h
62899 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
62900 /* Comment? -Hans */
62901 wait_queue_head_t s_wait;
62902 /* To be obsoleted soon by per buffer seals.. -Hans */
62903 - atomic_t s_generation_counter; // increased by one every time the
62904 + atomic_unchecked_t s_generation_counter; // increased by one every time the
62905 // tree gets re-balanced
62906 unsigned long s_properties; /* File system properties. Currently holds
62907 on-disk FS format */
62908 diff --git a/include/linux/relay.h b/include/linux/relay.h
62909 index a822fd7..62b70f6 100644
62910 --- a/include/linux/relay.h
62911 +++ b/include/linux/relay.h
62912 @@ -159,7 +159,7 @@ struct rchan_callbacks
62913 * The callback should return 0 if successful, negative if not.
62914 */
62915 int (*remove_buf_file)(struct dentry *dentry);
62916 -};
62917 +} __no_const;
62918
62919 /*
62920 * CONFIG_RELAY kernel API, kernel/relay.c
62921 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62922 index c6c6084..5bf1212 100644
62923 --- a/include/linux/rfkill.h
62924 +++ b/include/linux/rfkill.h
62925 @@ -147,6 +147,7 @@ struct rfkill_ops {
62926 void (*query)(struct rfkill *rfkill, void *data);
62927 int (*set_block)(void *data, bool blocked);
62928 };
62929 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62930
62931 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62932 /**
62933 diff --git a/include/linux/rio.h b/include/linux/rio.h
62934 index 4d50611..c6858a2 100644
62935 --- a/include/linux/rio.h
62936 +++ b/include/linux/rio.h
62937 @@ -315,7 +315,7 @@ struct rio_ops {
62938 int mbox, void *buffer, size_t len);
62939 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62940 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62941 -};
62942 +} __no_const;
62943
62944 #define RIO_RESOURCE_MEM 0x00000100
62945 #define RIO_RESOURCE_DOORBELL 0x00000200
62946 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62947 index 1cdd62a..e399f0d 100644
62948 --- a/include/linux/rmap.h
62949 +++ b/include/linux/rmap.h
62950 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62951 void anon_vma_init(void); /* create anon_vma_cachep */
62952 int anon_vma_prepare(struct vm_area_struct *);
62953 void unlink_anon_vmas(struct vm_area_struct *);
62954 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62955 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62956 void anon_vma_moveto_tail(struct vm_area_struct *);
62957 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62958 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62959 void __anon_vma_link(struct vm_area_struct *);
62960
62961 static inline void anon_vma_merge(struct vm_area_struct *vma,
62962 diff --git a/include/linux/sched.h b/include/linux/sched.h
62963 index 0657368..765f70f 100644
62964 --- a/include/linux/sched.h
62965 +++ b/include/linux/sched.h
62966 @@ -101,6 +101,7 @@ struct bio_list;
62967 struct fs_struct;
62968 struct perf_event_context;
62969 struct blk_plug;
62970 +struct linux_binprm;
62971
62972 /*
62973 * List of flags we want to share for kernel threads,
62974 @@ -382,10 +383,13 @@ struct user_namespace;
62975 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62976
62977 extern int sysctl_max_map_count;
62978 +extern unsigned long sysctl_heap_stack_gap;
62979
62980 #include <linux/aio.h>
62981
62982 #ifdef CONFIG_MMU
62983 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62984 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62985 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62986 extern unsigned long
62987 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62988 @@ -631,6 +635,17 @@ struct signal_struct {
62989 #ifdef CONFIG_TASKSTATS
62990 struct taskstats *stats;
62991 #endif
62992 +
62993 +#ifdef CONFIG_GRKERNSEC
62994 + u32 curr_ip;
62995 + u32 saved_ip;
62996 + u32 gr_saddr;
62997 + u32 gr_daddr;
62998 + u16 gr_sport;
62999 + u16 gr_dport;
63000 + u8 used_accept:1;
63001 +#endif
63002 +
63003 #ifdef CONFIG_AUDIT
63004 unsigned audit_tty;
63005 struct tty_audit_buf *tty_audit_buf;
63006 @@ -714,6 +729,11 @@ struct user_struct {
63007 struct key *session_keyring; /* UID's default session keyring */
63008 #endif
63009
63010 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63011 + unsigned int banned;
63012 + unsigned long ban_expires;
63013 +#endif
63014 +
63015 /* Hash table maintenance information */
63016 struct hlist_node uidhash_node;
63017 uid_t uid;
63018 @@ -1354,8 +1374,8 @@ struct task_struct {
63019 struct list_head thread_group;
63020
63021 struct completion *vfork_done; /* for vfork() */
63022 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63023 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63024 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63025 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63026
63027 cputime_t utime, stime, utimescaled, stimescaled;
63028 cputime_t gtime;
63029 @@ -1371,13 +1391,6 @@ struct task_struct {
63030 struct task_cputime cputime_expires;
63031 struct list_head cpu_timers[3];
63032
63033 -/* process credentials */
63034 - const struct cred __rcu *real_cred; /* objective and real subjective task
63035 - * credentials (COW) */
63036 - const struct cred __rcu *cred; /* effective (overridable) subjective task
63037 - * credentials (COW) */
63038 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63039 -
63040 char comm[TASK_COMM_LEN]; /* executable name excluding path
63041 - access with [gs]et_task_comm (which lock
63042 it with task_lock())
63043 @@ -1394,8 +1407,16 @@ struct task_struct {
63044 #endif
63045 /* CPU-specific state of this task */
63046 struct thread_struct thread;
63047 +/* thread_info moved to task_struct */
63048 +#ifdef CONFIG_X86
63049 + struct thread_info tinfo;
63050 +#endif
63051 /* filesystem information */
63052 struct fs_struct *fs;
63053 +
63054 + const struct cred __rcu *cred; /* effective (overridable) subjective task
63055 + * credentials (COW) */
63056 +
63057 /* open file information */
63058 struct files_struct *files;
63059 /* namespaces */
63060 @@ -1442,6 +1463,11 @@ struct task_struct {
63061 struct rt_mutex_waiter *pi_blocked_on;
63062 #endif
63063
63064 +/* process credentials */
63065 + const struct cred __rcu *real_cred; /* objective and real subjective task
63066 + * credentials (COW) */
63067 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63068 +
63069 #ifdef CONFIG_DEBUG_MUTEXES
63070 /* mutex deadlock detection */
63071 struct mutex_waiter *blocked_on;
63072 @@ -1558,6 +1584,27 @@ struct task_struct {
63073 unsigned long default_timer_slack_ns;
63074
63075 struct list_head *scm_work_list;
63076 +
63077 +#ifdef CONFIG_GRKERNSEC
63078 + /* grsecurity */
63079 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63080 + u64 exec_id;
63081 +#endif
63082 +#ifdef CONFIG_GRKERNSEC_SETXID
63083 + const struct cred *delayed_cred;
63084 +#endif
63085 + struct dentry *gr_chroot_dentry;
63086 + struct acl_subject_label *acl;
63087 + struct acl_role_label *role;
63088 + struct file *exec_file;
63089 + u16 acl_role_id;
63090 + /* is this the task that authenticated to the special role */
63091 + u8 acl_sp_role;
63092 + u8 is_writable;
63093 + u8 brute;
63094 + u8 gr_is_chrooted;
63095 +#endif
63096 +
63097 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63098 /* Index of current stored address in ret_stack */
63099 int curr_ret_stack;
63100 @@ -1592,6 +1639,51 @@ struct task_struct {
63101 #endif
63102 };
63103
63104 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63105 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63106 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63107 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63108 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63109 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63110 +
63111 +#ifdef CONFIG_PAX_SOFTMODE
63112 +extern int pax_softmode;
63113 +#endif
63114 +
63115 +extern int pax_check_flags(unsigned long *);
63116 +
63117 +/* if tsk != current then task_lock must be held on it */
63118 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63119 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
63120 +{
63121 + if (likely(tsk->mm))
63122 + return tsk->mm->pax_flags;
63123 + else
63124 + return 0UL;
63125 +}
63126 +
63127 +/* if tsk != current then task_lock must be held on it */
63128 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63129 +{
63130 + if (likely(tsk->mm)) {
63131 + tsk->mm->pax_flags = flags;
63132 + return 0;
63133 + }
63134 + return -EINVAL;
63135 +}
63136 +#endif
63137 +
63138 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63139 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
63140 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63141 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63142 +#endif
63143 +
63144 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
63145 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
63146 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
63147 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
63148 +
63149 /* Future-safe accessor for struct task_struct's cpus_allowed. */
63150 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
63151
63152 @@ -2104,7 +2196,9 @@ void yield(void);
63153 extern struct exec_domain default_exec_domain;
63154
63155 union thread_union {
63156 +#ifndef CONFIG_X86
63157 struct thread_info thread_info;
63158 +#endif
63159 unsigned long stack[THREAD_SIZE/sizeof(long)];
63160 };
63161
63162 @@ -2137,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
63163 */
63164
63165 extern struct task_struct *find_task_by_vpid(pid_t nr);
63166 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63167 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63168 struct pid_namespace *ns);
63169
63170 @@ -2280,7 +2375,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
63171 extern void exit_itimers(struct signal_struct *);
63172 extern void flush_itimer_signals(void);
63173
63174 -extern void do_group_exit(int);
63175 +extern __noreturn void do_group_exit(int);
63176
63177 extern void daemonize(const char *, ...);
63178 extern int allow_signal(int);
63179 @@ -2478,13 +2573,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
63180
63181 #endif
63182
63183 -static inline int object_is_on_stack(void *obj)
63184 +static inline int object_starts_on_stack(void *obj)
63185 {
63186 - void *stack = task_stack_page(current);
63187 + const void *stack = task_stack_page(current);
63188
63189 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63190 }
63191
63192 +#ifdef CONFIG_PAX_USERCOPY
63193 +extern int object_is_on_stack(const void *obj, unsigned long len);
63194 +#endif
63195 +
63196 extern void thread_info_cache_init(void);
63197
63198 #ifdef CONFIG_DEBUG_STACK_USAGE
63199 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63200 index 899fbb4..1cb4138 100644
63201 --- a/include/linux/screen_info.h
63202 +++ b/include/linux/screen_info.h
63203 @@ -43,7 +43,8 @@ struct screen_info {
63204 __u16 pages; /* 0x32 */
63205 __u16 vesa_attributes; /* 0x34 */
63206 __u32 capabilities; /* 0x36 */
63207 - __u8 _reserved[6]; /* 0x3a */
63208 + __u16 vesapm_size; /* 0x3a */
63209 + __u8 _reserved[4]; /* 0x3c */
63210 } __attribute__((packed));
63211
63212 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
63213 diff --git a/include/linux/security.h b/include/linux/security.h
63214 index 83c18e8..2d98860 100644
63215 --- a/include/linux/security.h
63216 +++ b/include/linux/security.h
63217 @@ -37,6 +37,7 @@
63218 #include <linux/xfrm.h>
63219 #include <linux/slab.h>
63220 #include <linux/xattr.h>
63221 +#include <linux/grsecurity.h>
63222 #include <net/flow.h>
63223
63224 /* Maximum number of letters for an LSM name string */
63225 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
63226 index 44f1514..2bbf6c1 100644
63227 --- a/include/linux/seq_file.h
63228 +++ b/include/linux/seq_file.h
63229 @@ -24,6 +24,9 @@ struct seq_file {
63230 struct mutex lock;
63231 const struct seq_operations *op;
63232 int poll_event;
63233 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63234 + u64 exec_id;
63235 +#endif
63236 void *private;
63237 };
63238
63239 @@ -33,6 +36,7 @@ struct seq_operations {
63240 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63241 int (*show) (struct seq_file *m, void *v);
63242 };
63243 +typedef struct seq_operations __no_const seq_operations_no_const;
63244
63245 #define SEQ_SKIP 1
63246
63247 diff --git a/include/linux/shm.h b/include/linux/shm.h
63248 index 92808b8..c28cac4 100644
63249 --- a/include/linux/shm.h
63250 +++ b/include/linux/shm.h
63251 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63252
63253 /* The task created the shm object. NULL if the task is dead. */
63254 struct task_struct *shm_creator;
63255 +#ifdef CONFIG_GRKERNSEC
63256 + time_t shm_createtime;
63257 + pid_t shm_lapid;
63258 +#endif
63259 };
63260
63261 /* shm_mode upper byte flags */
63262 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63263 index 42854ce..3b7d3c8 100644
63264 --- a/include/linux/skbuff.h
63265 +++ b/include/linux/skbuff.h
63266 @@ -655,7 +655,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63267 */
63268 static inline int skb_queue_empty(const struct sk_buff_head *list)
63269 {
63270 - return list->next == (struct sk_buff *)list;
63271 + return list->next == (const struct sk_buff *)list;
63272 }
63273
63274 /**
63275 @@ -668,7 +668,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63276 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63277 const struct sk_buff *skb)
63278 {
63279 - return skb->next == (struct sk_buff *)list;
63280 + return skb->next == (const struct sk_buff *)list;
63281 }
63282
63283 /**
63284 @@ -681,7 +681,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63285 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63286 const struct sk_buff *skb)
63287 {
63288 - return skb->prev == (struct sk_buff *)list;
63289 + return skb->prev == (const struct sk_buff *)list;
63290 }
63291
63292 /**
63293 @@ -1558,7 +1558,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63294 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63295 */
63296 #ifndef NET_SKB_PAD
63297 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63298 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63299 #endif
63300
63301 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63302 diff --git a/include/linux/slab.h b/include/linux/slab.h
63303 index 573c809..eaaf6ea 100644
63304 --- a/include/linux/slab.h
63305 +++ b/include/linux/slab.h
63306 @@ -11,12 +11,20 @@
63307
63308 #include <linux/gfp.h>
63309 #include <linux/types.h>
63310 +#include <linux/err.h>
63311
63312 /*
63313 * Flags to pass to kmem_cache_create().
63314 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63315 */
63316 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63317 +
63318 +#ifdef CONFIG_PAX_USERCOPY
63319 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63320 +#else
63321 +#define SLAB_USERCOPY 0x00000000UL
63322 +#endif
63323 +
63324 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63325 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63326 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63327 @@ -87,10 +95,13 @@
63328 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63329 * Both make kfree a no-op.
63330 */
63331 -#define ZERO_SIZE_PTR ((void *)16)
63332 +#define ZERO_SIZE_PTR \
63333 +({ \
63334 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63335 + (void *)(-MAX_ERRNO-1L); \
63336 +})
63337
63338 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63339 - (unsigned long)ZERO_SIZE_PTR)
63340 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63341
63342 /*
63343 * struct kmem_cache related prototypes
63344 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
63345 void kfree(const void *);
63346 void kzfree(const void *);
63347 size_t ksize(const void *);
63348 +void check_object_size(const void *ptr, unsigned long n, bool to);
63349
63350 /*
63351 * Allocator specific definitions. These are mainly used to establish optimized
63352 @@ -287,7 +299,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
63353 */
63354 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63355 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63356 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63357 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
63358 #define kmalloc_track_caller(size, flags) \
63359 __kmalloc_track_caller(size, flags, _RET_IP_)
63360 #else
63361 @@ -306,7 +318,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63362 */
63363 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63364 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63365 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
63366 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
63367 #define kmalloc_node_track_caller(size, flags, node) \
63368 __kmalloc_node_track_caller(size, flags, node, \
63369 _RET_IP_)
63370 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63371 index fbd1117..d4d8ef8 100644
63372 --- a/include/linux/slab_def.h
63373 +++ b/include/linux/slab_def.h
63374 @@ -66,10 +66,10 @@ struct kmem_cache {
63375 unsigned long node_allocs;
63376 unsigned long node_frees;
63377 unsigned long node_overflow;
63378 - atomic_t allochit;
63379 - atomic_t allocmiss;
63380 - atomic_t freehit;
63381 - atomic_t freemiss;
63382 + atomic_unchecked_t allochit;
63383 + atomic_unchecked_t allocmiss;
63384 + atomic_unchecked_t freehit;
63385 + atomic_unchecked_t freemiss;
63386
63387 /*
63388 * If debugging is enabled, then the allocator can add additional
63389 @@ -107,7 +107,7 @@ struct cache_sizes {
63390 extern struct cache_sizes malloc_sizes[];
63391
63392 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63393 -void *__kmalloc(size_t size, gfp_t flags);
63394 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63395
63396 #ifdef CONFIG_TRACING
63397 extern void *kmem_cache_alloc_trace(size_t size,
63398 @@ -160,7 +160,7 @@ found:
63399 }
63400
63401 #ifdef CONFIG_NUMA
63402 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
63403 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63404 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63405
63406 #ifdef CONFIG_TRACING
63407 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
63408 index 0ec00b3..39cb7fc 100644
63409 --- a/include/linux/slob_def.h
63410 +++ b/include/linux/slob_def.h
63411 @@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
63412 return kmem_cache_alloc_node(cachep, flags, -1);
63413 }
63414
63415 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
63416 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63417
63418 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63419 {
63420 @@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63421 return __kmalloc_node(size, flags, -1);
63422 }
63423
63424 +static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63425 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
63426 {
63427 return kmalloc(size, flags);
63428 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63429 index a32bcfd..a80ed70 100644
63430 --- a/include/linux/slub_def.h
63431 +++ b/include/linux/slub_def.h
63432 @@ -89,7 +89,7 @@ struct kmem_cache {
63433 struct kmem_cache_order_objects max;
63434 struct kmem_cache_order_objects min;
63435 gfp_t allocflags; /* gfp flags to use on each alloc */
63436 - int refcount; /* Refcount for slab cache destroy */
63437 + atomic_t refcount; /* Refcount for slab cache destroy */
63438 void (*ctor)(void *);
63439 int inuse; /* Offset to metadata */
63440 int align; /* Alignment */
63441 @@ -150,6 +150,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
63442 * Sorry that the following has to be that ugly but some versions of GCC
63443 * have trouble with constant propagation and loops.
63444 */
63445 +static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
63446 static __always_inline int kmalloc_index(size_t size)
63447 {
63448 if (!size)
63449 @@ -215,7 +216,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63450 }
63451
63452 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63453 -void *__kmalloc(size_t size, gfp_t flags);
63454 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
63455
63456 static __always_inline void *
63457 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
63458 @@ -256,6 +257,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
63459 }
63460 #endif
63461
63462 +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
63463 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
63464 {
63465 unsigned int order = get_order(size);
63466 @@ -281,7 +283,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63467 }
63468
63469 #ifdef CONFIG_NUMA
63470 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
63471 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63472 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63473
63474 #ifdef CONFIG_TRACING
63475 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63476 index de8832d..0147b46 100644
63477 --- a/include/linux/sonet.h
63478 +++ b/include/linux/sonet.h
63479 @@ -61,7 +61,7 @@ struct sonet_stats {
63480 #include <linux/atomic.h>
63481
63482 struct k_sonet_stats {
63483 -#define __HANDLE_ITEM(i) atomic_t i
63484 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
63485 __SONET_ITEMS
63486 #undef __HANDLE_ITEM
63487 };
63488 diff --git a/include/linux/stddef.h b/include/linux/stddef.h
63489 index 6a40c76..1747b67 100644
63490 --- a/include/linux/stddef.h
63491 +++ b/include/linux/stddef.h
63492 @@ -3,14 +3,10 @@
63493
63494 #include <linux/compiler.h>
63495
63496 +#ifdef __KERNEL__
63497 +
63498 #undef NULL
63499 -#if defined(__cplusplus)
63500 -#define NULL 0
63501 -#else
63502 #define NULL ((void *)0)
63503 -#endif
63504 -
63505 -#ifdef __KERNEL__
63506
63507 enum {
63508 false = 0,
63509 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63510 index 2c5993a..b0e79f0 100644
63511 --- a/include/linux/sunrpc/clnt.h
63512 +++ b/include/linux/sunrpc/clnt.h
63513 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
63514 {
63515 switch (sap->sa_family) {
63516 case AF_INET:
63517 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
63518 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63519 case AF_INET6:
63520 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63521 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63522 }
63523 return 0;
63524 }
63525 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
63526 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63527 const struct sockaddr *src)
63528 {
63529 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63530 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63531 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63532
63533 dsin->sin_family = ssin->sin_family;
63534 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63535 if (sa->sa_family != AF_INET6)
63536 return 0;
63537
63538 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63539 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63540 }
63541
63542 #endif /* __KERNEL__ */
63543 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63544 index e775689..9e206d9 100644
63545 --- a/include/linux/sunrpc/sched.h
63546 +++ b/include/linux/sunrpc/sched.h
63547 @@ -105,6 +105,7 @@ struct rpc_call_ops {
63548 void (*rpc_call_done)(struct rpc_task *, void *);
63549 void (*rpc_release)(void *);
63550 };
63551 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63552
63553 struct rpc_task_setup {
63554 struct rpc_task *task;
63555 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63556 index c14fe86..393245e 100644
63557 --- a/include/linux/sunrpc/svc_rdma.h
63558 +++ b/include/linux/sunrpc/svc_rdma.h
63559 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63560 extern unsigned int svcrdma_max_requests;
63561 extern unsigned int svcrdma_max_req_size;
63562
63563 -extern atomic_t rdma_stat_recv;
63564 -extern atomic_t rdma_stat_read;
63565 -extern atomic_t rdma_stat_write;
63566 -extern atomic_t rdma_stat_sq_starve;
63567 -extern atomic_t rdma_stat_rq_starve;
63568 -extern atomic_t rdma_stat_rq_poll;
63569 -extern atomic_t rdma_stat_rq_prod;
63570 -extern atomic_t rdma_stat_sq_poll;
63571 -extern atomic_t rdma_stat_sq_prod;
63572 +extern atomic_unchecked_t rdma_stat_recv;
63573 +extern atomic_unchecked_t rdma_stat_read;
63574 +extern atomic_unchecked_t rdma_stat_write;
63575 +extern atomic_unchecked_t rdma_stat_sq_starve;
63576 +extern atomic_unchecked_t rdma_stat_rq_starve;
63577 +extern atomic_unchecked_t rdma_stat_rq_poll;
63578 +extern atomic_unchecked_t rdma_stat_rq_prod;
63579 +extern atomic_unchecked_t rdma_stat_sq_poll;
63580 +extern atomic_unchecked_t rdma_stat_sq_prod;
63581
63582 #define RPCRDMA_VERSION 1
63583
63584 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63585 index bb9127d..34ab358 100644
63586 --- a/include/linux/sysctl.h
63587 +++ b/include/linux/sysctl.h
63588 @@ -155,7 +155,11 @@ enum
63589 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63590 };
63591
63592 -
63593 +#ifdef CONFIG_PAX_SOFTMODE
63594 +enum {
63595 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63596 +};
63597 +#endif
63598
63599 /* CTL_VM names: */
63600 enum
63601 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63602
63603 extern int proc_dostring(struct ctl_table *, int,
63604 void __user *, size_t *, loff_t *);
63605 +extern int proc_dostring_modpriv(struct ctl_table *, int,
63606 + void __user *, size_t *, loff_t *);
63607 extern int proc_dointvec(struct ctl_table *, int,
63608 void __user *, size_t *, loff_t *);
63609 extern int proc_dointvec_minmax(struct ctl_table *, int,
63610 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
63611 index a71a292..51bd91d 100644
63612 --- a/include/linux/tracehook.h
63613 +++ b/include/linux/tracehook.h
63614 @@ -54,12 +54,12 @@ struct linux_binprm;
63615 /*
63616 * ptrace report for syscall entry and exit looks identical.
63617 */
63618 -static inline void ptrace_report_syscall(struct pt_regs *regs)
63619 +static inline int ptrace_report_syscall(struct pt_regs *regs)
63620 {
63621 int ptrace = current->ptrace;
63622
63623 if (!(ptrace & PT_PTRACED))
63624 - return;
63625 + return 0;
63626
63627 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
63628
63629 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
63630 send_sig(current->exit_code, current, 1);
63631 current->exit_code = 0;
63632 }
63633 +
63634 + return fatal_signal_pending(current);
63635 }
63636
63637 /**
63638 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
63639 static inline __must_check int tracehook_report_syscall_entry(
63640 struct pt_regs *regs)
63641 {
63642 - ptrace_report_syscall(regs);
63643 - return 0;
63644 + return ptrace_report_syscall(regs);
63645 }
63646
63647 /**
63648 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63649 index ff7dc08..893e1bd 100644
63650 --- a/include/linux/tty_ldisc.h
63651 +++ b/include/linux/tty_ldisc.h
63652 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63653
63654 struct module *owner;
63655
63656 - int refcount;
63657 + atomic_t refcount;
63658 };
63659
63660 struct tty_ldisc {
63661 diff --git a/include/linux/types.h b/include/linux/types.h
63662 index e5fa503..df6e8a4 100644
63663 --- a/include/linux/types.h
63664 +++ b/include/linux/types.h
63665 @@ -214,10 +214,26 @@ typedef struct {
63666 int counter;
63667 } atomic_t;
63668
63669 +#ifdef CONFIG_PAX_REFCOUNT
63670 +typedef struct {
63671 + int counter;
63672 +} atomic_unchecked_t;
63673 +#else
63674 +typedef atomic_t atomic_unchecked_t;
63675 +#endif
63676 +
63677 #ifdef CONFIG_64BIT
63678 typedef struct {
63679 long counter;
63680 } atomic64_t;
63681 +
63682 +#ifdef CONFIG_PAX_REFCOUNT
63683 +typedef struct {
63684 + long counter;
63685 +} atomic64_unchecked_t;
63686 +#else
63687 +typedef atomic64_t atomic64_unchecked_t;
63688 +#endif
63689 #endif
63690
63691 struct list_head {
63692 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63693 index 5ca0951..ab496a5 100644
63694 --- a/include/linux/uaccess.h
63695 +++ b/include/linux/uaccess.h
63696 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63697 long ret; \
63698 mm_segment_t old_fs = get_fs(); \
63699 \
63700 - set_fs(KERNEL_DS); \
63701 pagefault_disable(); \
63702 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63703 - pagefault_enable(); \
63704 + set_fs(KERNEL_DS); \
63705 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63706 set_fs(old_fs); \
63707 + pagefault_enable(); \
63708 ret; \
63709 })
63710
63711 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63712 index 99c1b4d..bb94261 100644
63713 --- a/include/linux/unaligned/access_ok.h
63714 +++ b/include/linux/unaligned/access_ok.h
63715 @@ -6,32 +6,32 @@
63716
63717 static inline u16 get_unaligned_le16(const void *p)
63718 {
63719 - return le16_to_cpup((__le16 *)p);
63720 + return le16_to_cpup((const __le16 *)p);
63721 }
63722
63723 static inline u32 get_unaligned_le32(const void *p)
63724 {
63725 - return le32_to_cpup((__le32 *)p);
63726 + return le32_to_cpup((const __le32 *)p);
63727 }
63728
63729 static inline u64 get_unaligned_le64(const void *p)
63730 {
63731 - return le64_to_cpup((__le64 *)p);
63732 + return le64_to_cpup((const __le64 *)p);
63733 }
63734
63735 static inline u16 get_unaligned_be16(const void *p)
63736 {
63737 - return be16_to_cpup((__be16 *)p);
63738 + return be16_to_cpup((const __be16 *)p);
63739 }
63740
63741 static inline u32 get_unaligned_be32(const void *p)
63742 {
63743 - return be32_to_cpup((__be32 *)p);
63744 + return be32_to_cpup((const __be32 *)p);
63745 }
63746
63747 static inline u64 get_unaligned_be64(const void *p)
63748 {
63749 - return be64_to_cpup((__be64 *)p);
63750 + return be64_to_cpup((const __be64 *)p);
63751 }
63752
63753 static inline void put_unaligned_le16(u16 val, void *p)
63754 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63755 index 0d3f988..000f101 100644
63756 --- a/include/linux/usb/renesas_usbhs.h
63757 +++ b/include/linux/usb/renesas_usbhs.h
63758 @@ -39,7 +39,7 @@ enum {
63759 */
63760 struct renesas_usbhs_driver_callback {
63761 int (*notify_hotplug)(struct platform_device *pdev);
63762 -};
63763 +} __no_const;
63764
63765 /*
63766 * callback functions for platform
63767 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63768 * VBUS control is needed for Host
63769 */
63770 int (*set_vbus)(struct platform_device *pdev, int enable);
63771 -};
63772 +} __no_const;
63773
63774 /*
63775 * parameters for renesas usbhs
63776 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63777 index 6f8fbcf..8259001 100644
63778 --- a/include/linux/vermagic.h
63779 +++ b/include/linux/vermagic.h
63780 @@ -25,9 +25,35 @@
63781 #define MODULE_ARCH_VERMAGIC ""
63782 #endif
63783
63784 +#ifdef CONFIG_PAX_REFCOUNT
63785 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63786 +#else
63787 +#define MODULE_PAX_REFCOUNT ""
63788 +#endif
63789 +
63790 +#ifdef CONSTIFY_PLUGIN
63791 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63792 +#else
63793 +#define MODULE_CONSTIFY_PLUGIN ""
63794 +#endif
63795 +
63796 +#ifdef STACKLEAK_PLUGIN
63797 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63798 +#else
63799 +#define MODULE_STACKLEAK_PLUGIN ""
63800 +#endif
63801 +
63802 +#ifdef CONFIG_GRKERNSEC
63803 +#define MODULE_GRSEC "GRSEC "
63804 +#else
63805 +#define MODULE_GRSEC ""
63806 +#endif
63807 +
63808 #define VERMAGIC_STRING \
63809 UTS_RELEASE " " \
63810 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63811 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63812 - MODULE_ARCH_VERMAGIC
63813 + MODULE_ARCH_VERMAGIC \
63814 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63815 + MODULE_GRSEC
63816
63817 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63818 index dcdfc2b..ec79ab5 100644
63819 --- a/include/linux/vmalloc.h
63820 +++ b/include/linux/vmalloc.h
63821 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63822 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63823 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63824 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63825 +
63826 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63827 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63828 +#endif
63829 +
63830 /* bits [20..32] reserved for arch specific ioremap internals */
63831
63832 /*
63833 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63834 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63835 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63836 unsigned long start, unsigned long end, gfp_t gfp_mask,
63837 - pgprot_t prot, int node, void *caller);
63838 + pgprot_t prot, int node, void *caller) __size_overflow(1);
63839 extern void vfree(const void *addr);
63840
63841 extern void *vmap(struct page **pages, unsigned int count,
63842 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63843 extern void free_vm_area(struct vm_struct *area);
63844
63845 /* for /dev/kmem */
63846 -extern long vread(char *buf, char *addr, unsigned long count);
63847 -extern long vwrite(char *buf, char *addr, unsigned long count);
63848 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63849 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63850
63851 /*
63852 * Internals. Dont't use..
63853 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63854 index 65efb92..137adbb 100644
63855 --- a/include/linux/vmstat.h
63856 +++ b/include/linux/vmstat.h
63857 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63858 /*
63859 * Zone based page accounting with per cpu differentials.
63860 */
63861 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63862 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63863
63864 static inline void zone_page_state_add(long x, struct zone *zone,
63865 enum zone_stat_item item)
63866 {
63867 - atomic_long_add(x, &zone->vm_stat[item]);
63868 - atomic_long_add(x, &vm_stat[item]);
63869 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63870 + atomic_long_add_unchecked(x, &vm_stat[item]);
63871 }
63872
63873 static inline unsigned long global_page_state(enum zone_stat_item item)
63874 {
63875 - long x = atomic_long_read(&vm_stat[item]);
63876 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63877 #ifdef CONFIG_SMP
63878 if (x < 0)
63879 x = 0;
63880 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63881 static inline unsigned long zone_page_state(struct zone *zone,
63882 enum zone_stat_item item)
63883 {
63884 - long x = atomic_long_read(&zone->vm_stat[item]);
63885 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63886 #ifdef CONFIG_SMP
63887 if (x < 0)
63888 x = 0;
63889 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63890 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63891 enum zone_stat_item item)
63892 {
63893 - long x = atomic_long_read(&zone->vm_stat[item]);
63894 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63895
63896 #ifdef CONFIG_SMP
63897 int cpu;
63898 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63899
63900 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63901 {
63902 - atomic_long_inc(&zone->vm_stat[item]);
63903 - atomic_long_inc(&vm_stat[item]);
63904 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63905 + atomic_long_inc_unchecked(&vm_stat[item]);
63906 }
63907
63908 static inline void __inc_zone_page_state(struct page *page,
63909 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63910
63911 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63912 {
63913 - atomic_long_dec(&zone->vm_stat[item]);
63914 - atomic_long_dec(&vm_stat[item]);
63915 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63916 + atomic_long_dec_unchecked(&vm_stat[item]);
63917 }
63918
63919 static inline void __dec_zone_page_state(struct page *page,
63920 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63921 index e5d1220..ef6e406 100644
63922 --- a/include/linux/xattr.h
63923 +++ b/include/linux/xattr.h
63924 @@ -57,6 +57,11 @@
63925 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63926 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63927
63928 +/* User namespace */
63929 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63930 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
63931 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63932 +
63933 #ifdef __KERNEL__
63934
63935 #include <linux/types.h>
63936 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63937 index 4aeff96..b378cdc 100644
63938 --- a/include/media/saa7146_vv.h
63939 +++ b/include/media/saa7146_vv.h
63940 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63941 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63942
63943 /* the extension can override this */
63944 - struct v4l2_ioctl_ops ops;
63945 + v4l2_ioctl_ops_no_const ops;
63946 /* pointer to the saa7146 core ops */
63947 const struct v4l2_ioctl_ops *core_ops;
63948
63949 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63950 index c7c40f1..4f01585 100644
63951 --- a/include/media/v4l2-dev.h
63952 +++ b/include/media/v4l2-dev.h
63953 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63954
63955
63956 struct v4l2_file_operations {
63957 - struct module *owner;
63958 + struct module * const owner;
63959 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63960 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63961 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63962 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
63963 int (*open) (struct file *);
63964 int (*release) (struct file *);
63965 };
63966 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63967
63968 /*
63969 * Newer version of video_device, handled by videodev2.c
63970 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63971 index 3f5d60f..44210ed 100644
63972 --- a/include/media/v4l2-ioctl.h
63973 +++ b/include/media/v4l2-ioctl.h
63974 @@ -278,7 +278,7 @@ struct v4l2_ioctl_ops {
63975 long (*vidioc_default) (struct file *file, void *fh,
63976 bool valid_prio, int cmd, void *arg);
63977 };
63978 -
63979 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63980
63981 /* v4l debugging and diagnostics */
63982
63983 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63984 index 8d55251..dfe5b0a 100644
63985 --- a/include/net/caif/caif_hsi.h
63986 +++ b/include/net/caif/caif_hsi.h
63987 @@ -98,7 +98,7 @@ struct cfhsi_drv {
63988 void (*rx_done_cb) (struct cfhsi_drv *drv);
63989 void (*wake_up_cb) (struct cfhsi_drv *drv);
63990 void (*wake_down_cb) (struct cfhsi_drv *drv);
63991 -};
63992 +} __no_const;
63993
63994 /* Structure implemented by HSI device. */
63995 struct cfhsi_dev {
63996 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63997 index 9e5425b..8136ffc 100644
63998 --- a/include/net/caif/cfctrl.h
63999 +++ b/include/net/caif/cfctrl.h
64000 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
64001 void (*radioset_rsp)(void);
64002 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
64003 struct cflayer *client_layer);
64004 -};
64005 +} __no_const;
64006
64007 /* Link Setup Parameters for CAIF-Links. */
64008 struct cfctrl_link_param {
64009 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
64010 struct cfctrl {
64011 struct cfsrvl serv;
64012 struct cfctrl_rsp res;
64013 - atomic_t req_seq_no;
64014 - atomic_t rsp_seq_no;
64015 + atomic_unchecked_t req_seq_no;
64016 + atomic_unchecked_t rsp_seq_no;
64017 struct list_head list;
64018 /* Protects from simultaneous access to first_req list */
64019 spinlock_t info_list_lock;
64020 diff --git a/include/net/flow.h b/include/net/flow.h
64021 index 6c469db..7743b8e 100644
64022 --- a/include/net/flow.h
64023 +++ b/include/net/flow.h
64024 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
64025
64026 extern void flow_cache_flush(void);
64027 extern void flow_cache_flush_deferred(void);
64028 -extern atomic_t flow_cache_genid;
64029 +extern atomic_unchecked_t flow_cache_genid;
64030
64031 #endif
64032 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
64033 index b94765e..053f68b 100644
64034 --- a/include/net/inetpeer.h
64035 +++ b/include/net/inetpeer.h
64036 @@ -48,8 +48,8 @@ struct inet_peer {
64037 */
64038 union {
64039 struct {
64040 - atomic_t rid; /* Frag reception counter */
64041 - atomic_t ip_id_count; /* IP ID for the next packet */
64042 + atomic_unchecked_t rid; /* Frag reception counter */
64043 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64044 __u32 tcp_ts;
64045 __u32 tcp_ts_stamp;
64046 };
64047 @@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
64048 more++;
64049 inet_peer_refcheck(p);
64050 do {
64051 - old = atomic_read(&p->ip_id_count);
64052 + old = atomic_read_unchecked(&p->ip_id_count);
64053 new = old + more;
64054 if (!new)
64055 new = 1;
64056 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64057 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64058 return new;
64059 }
64060
64061 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64062 index 10422ef..662570f 100644
64063 --- a/include/net/ip_fib.h
64064 +++ b/include/net/ip_fib.h
64065 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
64066
64067 #define FIB_RES_SADDR(net, res) \
64068 ((FIB_RES_NH(res).nh_saddr_genid == \
64069 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64070 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64071 FIB_RES_NH(res).nh_saddr : \
64072 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64073 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
64074 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
64075 index ebe517f..1bd286b 100644
64076 --- a/include/net/ip_vs.h
64077 +++ b/include/net/ip_vs.h
64078 @@ -509,7 +509,7 @@ struct ip_vs_conn {
64079 struct ip_vs_conn *control; /* Master control connection */
64080 atomic_t n_control; /* Number of controlled ones */
64081 struct ip_vs_dest *dest; /* real server */
64082 - atomic_t in_pkts; /* incoming packet counter */
64083 + atomic_unchecked_t in_pkts; /* incoming packet counter */
64084
64085 /* packet transmitter for different forwarding methods. If it
64086 mangles the packet, it must return NF_DROP or better NF_STOLEN,
64087 @@ -647,7 +647,7 @@ struct ip_vs_dest {
64088 __be16 port; /* port number of the server */
64089 union nf_inet_addr addr; /* IP address of the server */
64090 volatile unsigned flags; /* dest status flags */
64091 - atomic_t conn_flags; /* flags to copy to conn */
64092 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
64093 atomic_t weight; /* server weight */
64094
64095 atomic_t refcnt; /* reference counter */
64096 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64097 index 69b610a..fe3962c 100644
64098 --- a/include/net/irda/ircomm_core.h
64099 +++ b/include/net/irda/ircomm_core.h
64100 @@ -51,7 +51,7 @@ typedef struct {
64101 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64102 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64103 struct ircomm_info *);
64104 -} call_t;
64105 +} __no_const call_t;
64106
64107 struct ircomm_cb {
64108 irda_queue_t queue;
64109 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64110 index 59ba38bc..d515662 100644
64111 --- a/include/net/irda/ircomm_tty.h
64112 +++ b/include/net/irda/ircomm_tty.h
64113 @@ -35,6 +35,7 @@
64114 #include <linux/termios.h>
64115 #include <linux/timer.h>
64116 #include <linux/tty.h> /* struct tty_struct */
64117 +#include <asm/local.h>
64118
64119 #include <net/irda/irias_object.h>
64120 #include <net/irda/ircomm_core.h>
64121 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
64122 unsigned short close_delay;
64123 unsigned short closing_wait; /* time to wait before closing */
64124
64125 - int open_count;
64126 - int blocked_open; /* # of blocked opens */
64127 + local_t open_count;
64128 + local_t blocked_open; /* # of blocked opens */
64129
64130 /* Protect concurent access to :
64131 * o self->open_count
64132 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
64133 index 0954ec9..7413562 100644
64134 --- a/include/net/iucv/af_iucv.h
64135 +++ b/include/net/iucv/af_iucv.h
64136 @@ -138,7 +138,7 @@ struct iucv_sock {
64137 struct iucv_sock_list {
64138 struct hlist_head head;
64139 rwlock_t lock;
64140 - atomic_t autobind_name;
64141 + atomic_unchecked_t autobind_name;
64142 };
64143
64144 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
64145 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
64146 index 34c996f..bb3b4d4 100644
64147 --- a/include/net/neighbour.h
64148 +++ b/include/net/neighbour.h
64149 @@ -123,7 +123,7 @@ struct neigh_ops {
64150 void (*error_report)(struct neighbour *, struct sk_buff *);
64151 int (*output)(struct neighbour *, struct sk_buff *);
64152 int (*connected_output)(struct neighbour *, struct sk_buff *);
64153 -};
64154 +} __do_const;
64155
64156 struct pneigh_entry {
64157 struct pneigh_entry *next;
64158 diff --git a/include/net/netlink.h b/include/net/netlink.h
64159 index cb1f350..3279d2c 100644
64160 --- a/include/net/netlink.h
64161 +++ b/include/net/netlink.h
64162 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
64163 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
64164 {
64165 if (mark)
64166 - skb_trim(skb, (unsigned char *) mark - skb->data);
64167 + skb_trim(skb, (const unsigned char *) mark - skb->data);
64168 }
64169
64170 /**
64171 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
64172 index bbd023a..97c6d0d 100644
64173 --- a/include/net/netns/ipv4.h
64174 +++ b/include/net/netns/ipv4.h
64175 @@ -57,8 +57,8 @@ struct netns_ipv4 {
64176 unsigned int sysctl_ping_group_range[2];
64177 long sysctl_tcp_mem[3];
64178
64179 - atomic_t rt_genid;
64180 - atomic_t dev_addr_genid;
64181 + atomic_unchecked_t rt_genid;
64182 + atomic_unchecked_t dev_addr_genid;
64183
64184 #ifdef CONFIG_IP_MROUTE
64185 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
64186 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
64187 index d368561..96aaa17 100644
64188 --- a/include/net/sctp/sctp.h
64189 +++ b/include/net/sctp/sctp.h
64190 @@ -318,9 +318,9 @@ do { \
64191
64192 #else /* SCTP_DEBUG */
64193
64194 -#define SCTP_DEBUG_PRINTK(whatever...)
64195 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
64196 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
64197 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
64198 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
64199 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
64200 #define SCTP_ENABLE_DEBUG
64201 #define SCTP_DISABLE_DEBUG
64202 #define SCTP_ASSERT(expr, str, func)
64203 diff --git a/include/net/sock.h b/include/net/sock.h
64204 index 91c1c8b..15ae923 100644
64205 --- a/include/net/sock.h
64206 +++ b/include/net/sock.h
64207 @@ -299,7 +299,7 @@ struct sock {
64208 #ifdef CONFIG_RPS
64209 __u32 sk_rxhash;
64210 #endif
64211 - atomic_t sk_drops;
64212 + atomic_unchecked_t sk_drops;
64213 int sk_rcvbuf;
64214
64215 struct sk_filter __rcu *sk_filter;
64216 @@ -1660,7 +1660,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
64217 }
64218
64219 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64220 - char __user *from, char *to,
64221 + char __user *from, unsigned char *to,
64222 int copy, int offset)
64223 {
64224 if (skb->ip_summed == CHECKSUM_NONE) {
64225 diff --git a/include/net/tcp.h b/include/net/tcp.h
64226 index 2d80c29..aa07caf 100644
64227 --- a/include/net/tcp.h
64228 +++ b/include/net/tcp.h
64229 @@ -1426,7 +1426,7 @@ struct tcp_seq_afinfo {
64230 char *name;
64231 sa_family_t family;
64232 const struct file_operations *seq_fops;
64233 - struct seq_operations seq_ops;
64234 + seq_operations_no_const seq_ops;
64235 };
64236
64237 struct tcp_iter_state {
64238 diff --git a/include/net/udp.h b/include/net/udp.h
64239 index e39592f..fef9680 100644
64240 --- a/include/net/udp.h
64241 +++ b/include/net/udp.h
64242 @@ -243,7 +243,7 @@ struct udp_seq_afinfo {
64243 sa_family_t family;
64244 struct udp_table *udp_table;
64245 const struct file_operations *seq_fops;
64246 - struct seq_operations seq_ops;
64247 + seq_operations_no_const seq_ops;
64248 };
64249
64250 struct udp_iter_state {
64251 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64252 index 89174e2..1f82598 100644
64253 --- a/include/net/xfrm.h
64254 +++ b/include/net/xfrm.h
64255 @@ -505,7 +505,7 @@ struct xfrm_policy {
64256 struct timer_list timer;
64257
64258 struct flow_cache_object flo;
64259 - atomic_t genid;
64260 + atomic_unchecked_t genid;
64261 u32 priority;
64262 u32 index;
64263 struct xfrm_mark mark;
64264 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64265 index 1a046b1..ee0bef0 100644
64266 --- a/include/rdma/iw_cm.h
64267 +++ b/include/rdma/iw_cm.h
64268 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
64269 int backlog);
64270
64271 int (*destroy_listen)(struct iw_cm_id *cm_id);
64272 -};
64273 +} __no_const;
64274
64275 /**
64276 * iw_create_cm_id - Create an IW CM identifier.
64277 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64278 index 6a3922f..0b73022 100644
64279 --- a/include/scsi/libfc.h
64280 +++ b/include/scsi/libfc.h
64281 @@ -748,6 +748,7 @@ struct libfc_function_template {
64282 */
64283 void (*disc_stop_final) (struct fc_lport *);
64284 };
64285 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64286
64287 /**
64288 * struct fc_disc - Discovery context
64289 @@ -851,7 +852,7 @@ struct fc_lport {
64290 struct fc_vport *vport;
64291
64292 /* Operational Information */
64293 - struct libfc_function_template tt;
64294 + libfc_function_template_no_const tt;
64295 u8 link_up;
64296 u8 qfull;
64297 enum fc_lport_state state;
64298 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64299 index 77273f2..dd4031f 100644
64300 --- a/include/scsi/scsi_device.h
64301 +++ b/include/scsi/scsi_device.h
64302 @@ -161,9 +161,9 @@ struct scsi_device {
64303 unsigned int max_device_blocked; /* what device_blocked counts down from */
64304 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64305
64306 - atomic_t iorequest_cnt;
64307 - atomic_t iodone_cnt;
64308 - atomic_t ioerr_cnt;
64309 + atomic_unchecked_t iorequest_cnt;
64310 + atomic_unchecked_t iodone_cnt;
64311 + atomic_unchecked_t ioerr_cnt;
64312
64313 struct device sdev_gendev,
64314 sdev_dev;
64315 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64316 index 2a65167..91e01f8 100644
64317 --- a/include/scsi/scsi_transport_fc.h
64318 +++ b/include/scsi/scsi_transport_fc.h
64319 @@ -711,7 +711,7 @@ struct fc_function_template {
64320 unsigned long show_host_system_hostname:1;
64321
64322 unsigned long disable_target_scan:1;
64323 -};
64324 +} __do_const;
64325
64326
64327 /**
64328 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64329 index 030b87c..98a6954 100644
64330 --- a/include/sound/ak4xxx-adda.h
64331 +++ b/include/sound/ak4xxx-adda.h
64332 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64333 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64334 unsigned char val);
64335 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64336 -};
64337 +} __no_const;
64338
64339 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64340
64341 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64342 index 8c05e47..2b5df97 100644
64343 --- a/include/sound/hwdep.h
64344 +++ b/include/sound/hwdep.h
64345 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64346 struct snd_hwdep_dsp_status *status);
64347 int (*dsp_load)(struct snd_hwdep *hw,
64348 struct snd_hwdep_dsp_image *image);
64349 -};
64350 +} __no_const;
64351
64352 struct snd_hwdep {
64353 struct snd_card *card;
64354 diff --git a/include/sound/info.h b/include/sound/info.h
64355 index 9ca1a49..aba1728 100644
64356 --- a/include/sound/info.h
64357 +++ b/include/sound/info.h
64358 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
64359 struct snd_info_buffer *buffer);
64360 void (*write)(struct snd_info_entry *entry,
64361 struct snd_info_buffer *buffer);
64362 -};
64363 +} __no_const;
64364
64365 struct snd_info_entry_ops {
64366 int (*open)(struct snd_info_entry *entry,
64367 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64368 index 0cf91b2..b70cae4 100644
64369 --- a/include/sound/pcm.h
64370 +++ b/include/sound/pcm.h
64371 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
64372 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64373 int (*ack)(struct snd_pcm_substream *substream);
64374 };
64375 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
64376
64377 /*
64378 *
64379 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64380 index af1b49e..a5d55a5 100644
64381 --- a/include/sound/sb16_csp.h
64382 +++ b/include/sound/sb16_csp.h
64383 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64384 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64385 int (*csp_stop) (struct snd_sb_csp * p);
64386 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64387 -};
64388 +} __no_const;
64389
64390 /*
64391 * CSP private data
64392 diff --git a/include/sound/soc.h b/include/sound/soc.h
64393 index 0992dff..bb366fe 100644
64394 --- a/include/sound/soc.h
64395 +++ b/include/sound/soc.h
64396 @@ -682,7 +682,7 @@ struct snd_soc_platform_driver {
64397 /* platform IO - used for platform DAPM */
64398 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64399 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64400 -};
64401 +} __do_const;
64402
64403 struct snd_soc_platform {
64404 const char *name;
64405 @@ -852,7 +852,7 @@ struct snd_soc_pcm_runtime {
64406 struct snd_soc_dai_link *dai_link;
64407 struct mutex pcm_mutex;
64408 enum snd_soc_pcm_subclass pcm_subclass;
64409 - struct snd_pcm_ops ops;
64410 + snd_pcm_ops_no_const ops;
64411
64412 unsigned int complete:1;
64413 unsigned int dev_registered:1;
64414 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64415 index 444cd6b..3327cc5 100644
64416 --- a/include/sound/ymfpci.h
64417 +++ b/include/sound/ymfpci.h
64418 @@ -358,7 +358,7 @@ struct snd_ymfpci {
64419 spinlock_t reg_lock;
64420 spinlock_t voice_lock;
64421 wait_queue_head_t interrupt_sleep;
64422 - atomic_t interrupt_sleep_count;
64423 + atomic_unchecked_t interrupt_sleep_count;
64424 struct snd_info_entry *proc_entry;
64425 const struct firmware *dsp_microcode;
64426 const struct firmware *controller_microcode;
64427 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64428 index fe73eb8..56388b1 100644
64429 --- a/include/target/target_core_base.h
64430 +++ b/include/target/target_core_base.h
64431 @@ -443,7 +443,7 @@ struct t10_reservation_ops {
64432 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64433 int (*t10_pr_register)(struct se_cmd *);
64434 int (*t10_pr_clear)(struct se_cmd *);
64435 -};
64436 +} __no_const;
64437
64438 struct t10_reservation {
64439 /* Reservation effects all target ports */
64440 @@ -561,8 +561,8 @@ struct se_cmd {
64441 atomic_t t_se_count;
64442 atomic_t t_task_cdbs_left;
64443 atomic_t t_task_cdbs_ex_left;
64444 - atomic_t t_task_cdbs_sent;
64445 - atomic_t t_transport_aborted;
64446 + atomic_unchecked_t t_task_cdbs_sent;
64447 + atomic_unchecked_t t_transport_aborted;
64448 atomic_t t_transport_active;
64449 atomic_t t_transport_complete;
64450 atomic_t t_transport_queue_active;
64451 @@ -799,7 +799,7 @@ struct se_device {
64452 spinlock_t stats_lock;
64453 /* Active commands on this virtual SE device */
64454 atomic_t simple_cmds;
64455 - atomic_t dev_ordered_id;
64456 + atomic_unchecked_t dev_ordered_id;
64457 atomic_t execute_tasks;
64458 atomic_t dev_ordered_sync;
64459 atomic_t dev_qf_count;
64460 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64461 index 1c09820..7f5ec79 100644
64462 --- a/include/trace/events/irq.h
64463 +++ b/include/trace/events/irq.h
64464 @@ -36,7 +36,7 @@ struct softirq_action;
64465 */
64466 TRACE_EVENT(irq_handler_entry,
64467
64468 - TP_PROTO(int irq, struct irqaction *action),
64469 + TP_PROTO(int irq, const struct irqaction *action),
64470
64471 TP_ARGS(irq, action),
64472
64473 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
64474 */
64475 TRACE_EVENT(irq_handler_exit,
64476
64477 - TP_PROTO(int irq, struct irqaction *action, int ret),
64478 + TP_PROTO(int irq, const struct irqaction *action, int ret),
64479
64480 TP_ARGS(irq, action, ret),
64481
64482 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64483 index c41f308..6918de3 100644
64484 --- a/include/video/udlfb.h
64485 +++ b/include/video/udlfb.h
64486 @@ -52,10 +52,10 @@ struct dlfb_data {
64487 u32 pseudo_palette[256];
64488 int blank_mode; /*one of FB_BLANK_ */
64489 /* blit-only rendering path metrics, exposed through sysfs */
64490 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64491 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64492 - atomic_t bytes_sent; /* to usb, after compression including overhead */
64493 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64494 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64495 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64496 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64497 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64498 };
64499
64500 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
64501 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64502 index 0993a22..32ba2fe 100644
64503 --- a/include/video/uvesafb.h
64504 +++ b/include/video/uvesafb.h
64505 @@ -177,6 +177,7 @@ struct uvesafb_par {
64506 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64507 u8 pmi_setpal; /* PMI for palette changes */
64508 u16 *pmi_base; /* protected mode interface location */
64509 + u8 *pmi_code; /* protected mode code location */
64510 void *pmi_start;
64511 void *pmi_pal;
64512 u8 *vbe_state_orig; /*
64513 diff --git a/init/Kconfig b/init/Kconfig
64514 index 3f42cd6..613f41d 100644
64515 --- a/init/Kconfig
64516 +++ b/init/Kconfig
64517 @@ -799,6 +799,7 @@ endif # CGROUPS
64518
64519 config CHECKPOINT_RESTORE
64520 bool "Checkpoint/restore support" if EXPERT
64521 + depends on !GRKERNSEC
64522 default n
64523 help
64524 Enables additional kernel features in a sake of checkpoint/restore.
64525 @@ -1249,7 +1250,7 @@ config SLUB_DEBUG
64526
64527 config COMPAT_BRK
64528 bool "Disable heap randomization"
64529 - default y
64530 + default n
64531 help
64532 Randomizing heap placement makes heap exploits harder, but it
64533 also breaks ancient binaries (including anything libc5 based).
64534 diff --git a/init/do_mounts.c b/init/do_mounts.c
64535 index bf6edbf..4e5809c 100644
64536 --- a/init/do_mounts.c
64537 +++ b/init/do_mounts.c
64538 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
64539 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64540 {
64541 struct super_block *s;
64542 - int err = sys_mount(name, "/root", fs, flags, data);
64543 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64544 if (err)
64545 return err;
64546
64547 - sys_chdir((const char __user __force *)"/root");
64548 + sys_chdir((const char __force_user *)"/root");
64549 s = current->fs->pwd.dentry->d_sb;
64550 ROOT_DEV = s->s_dev;
64551 printk(KERN_INFO
64552 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
64553 va_start(args, fmt);
64554 vsprintf(buf, fmt, args);
64555 va_end(args);
64556 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64557 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64558 if (fd >= 0) {
64559 sys_ioctl(fd, FDEJECT, 0);
64560 sys_close(fd);
64561 }
64562 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64563 - fd = sys_open("/dev/console", O_RDWR, 0);
64564 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64565 if (fd >= 0) {
64566 sys_ioctl(fd, TCGETS, (long)&termios);
64567 termios.c_lflag &= ~ICANON;
64568 sys_ioctl(fd, TCSETSF, (long)&termios);
64569 - sys_read(fd, &c, 1);
64570 + sys_read(fd, (char __user *)&c, 1);
64571 termios.c_lflag |= ICANON;
64572 sys_ioctl(fd, TCSETSF, (long)&termios);
64573 sys_close(fd);
64574 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
64575 mount_root();
64576 out:
64577 devtmpfs_mount("dev");
64578 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64579 - sys_chroot((const char __user __force *)".");
64580 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64581 + sys_chroot((const char __force_user *)".");
64582 }
64583 diff --git a/init/do_mounts.h b/init/do_mounts.h
64584 index f5b978a..69dbfe8 100644
64585 --- a/init/do_mounts.h
64586 +++ b/init/do_mounts.h
64587 @@ -15,15 +15,15 @@ extern int root_mountflags;
64588
64589 static inline int create_dev(char *name, dev_t dev)
64590 {
64591 - sys_unlink(name);
64592 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64593 + sys_unlink((char __force_user *)name);
64594 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64595 }
64596
64597 #if BITS_PER_LONG == 32
64598 static inline u32 bstat(char *name)
64599 {
64600 struct stat64 stat;
64601 - if (sys_stat64(name, &stat) != 0)
64602 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64603 return 0;
64604 if (!S_ISBLK(stat.st_mode))
64605 return 0;
64606 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64607 static inline u32 bstat(char *name)
64608 {
64609 struct stat stat;
64610 - if (sys_newstat(name, &stat) != 0)
64611 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64612 return 0;
64613 if (!S_ISBLK(stat.st_mode))
64614 return 0;
64615 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64616 index 3098a38..253064e 100644
64617 --- a/init/do_mounts_initrd.c
64618 +++ b/init/do_mounts_initrd.c
64619 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
64620 create_dev("/dev/root.old", Root_RAM0);
64621 /* mount initrd on rootfs' /root */
64622 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64623 - sys_mkdir("/old", 0700);
64624 - root_fd = sys_open("/", 0, 0);
64625 - old_fd = sys_open("/old", 0, 0);
64626 + sys_mkdir((const char __force_user *)"/old", 0700);
64627 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
64628 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64629 /* move initrd over / and chdir/chroot in initrd root */
64630 - sys_chdir("/root");
64631 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64632 - sys_chroot(".");
64633 + sys_chdir((const char __force_user *)"/root");
64634 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64635 + sys_chroot((const char __force_user *)".");
64636
64637 /*
64638 * In case that a resume from disk is carried out by linuxrc or one of
64639 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
64640
64641 /* move initrd to rootfs' /old */
64642 sys_fchdir(old_fd);
64643 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
64644 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64645 /* switch root and cwd back to / of rootfs */
64646 sys_fchdir(root_fd);
64647 - sys_chroot(".");
64648 + sys_chroot((const char __force_user *)".");
64649 sys_close(old_fd);
64650 sys_close(root_fd);
64651
64652 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64653 - sys_chdir("/old");
64654 + sys_chdir((const char __force_user *)"/old");
64655 return;
64656 }
64657
64658 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
64659 mount_root();
64660
64661 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64662 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64663 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64664 if (!error)
64665 printk("okay\n");
64666 else {
64667 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
64668 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64669 if (error == -ENOENT)
64670 printk("/initrd does not exist. Ignored.\n");
64671 else
64672 printk("failed\n");
64673 printk(KERN_NOTICE "Unmounting old root\n");
64674 - sys_umount("/old", MNT_DETACH);
64675 + sys_umount((char __force_user *)"/old", MNT_DETACH);
64676 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64677 if (fd < 0) {
64678 error = fd;
64679 @@ -116,11 +116,11 @@ int __init initrd_load(void)
64680 * mounted in the normal path.
64681 */
64682 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64683 - sys_unlink("/initrd.image");
64684 + sys_unlink((const char __force_user *)"/initrd.image");
64685 handle_initrd();
64686 return 1;
64687 }
64688 }
64689 - sys_unlink("/initrd.image");
64690 + sys_unlink((const char __force_user *)"/initrd.image");
64691 return 0;
64692 }
64693 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64694 index 32c4799..c27ee74 100644
64695 --- a/init/do_mounts_md.c
64696 +++ b/init/do_mounts_md.c
64697 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64698 partitioned ? "_d" : "", minor,
64699 md_setup_args[ent].device_names);
64700
64701 - fd = sys_open(name, 0, 0);
64702 + fd = sys_open((char __force_user *)name, 0, 0);
64703 if (fd < 0) {
64704 printk(KERN_ERR "md: open failed - cannot start "
64705 "array %s\n", name);
64706 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64707 * array without it
64708 */
64709 sys_close(fd);
64710 - fd = sys_open(name, 0, 0);
64711 + fd = sys_open((char __force_user *)name, 0, 0);
64712 sys_ioctl(fd, BLKRRPART, 0);
64713 }
64714 sys_close(fd);
64715 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64716
64717 wait_for_device_probe();
64718
64719 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64720 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64721 if (fd >= 0) {
64722 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64723 sys_close(fd);
64724 diff --git a/init/initramfs.c b/init/initramfs.c
64725 index 8216c30..25e8e32 100644
64726 --- a/init/initramfs.c
64727 +++ b/init/initramfs.c
64728 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64729 }
64730 }
64731
64732 -static long __init do_utime(char __user *filename, time_t mtime)
64733 +static long __init do_utime(__force char __user *filename, time_t mtime)
64734 {
64735 struct timespec t[2];
64736
64737 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64738 struct dir_entry *de, *tmp;
64739 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64740 list_del(&de->list);
64741 - do_utime(de->name, de->mtime);
64742 + do_utime((char __force_user *)de->name, de->mtime);
64743 kfree(de->name);
64744 kfree(de);
64745 }
64746 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64747 if (nlink >= 2) {
64748 char *old = find_link(major, minor, ino, mode, collected);
64749 if (old)
64750 - return (sys_link(old, collected) < 0) ? -1 : 1;
64751 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64752 }
64753 return 0;
64754 }
64755 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64756 {
64757 struct stat st;
64758
64759 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64760 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64761 if (S_ISDIR(st.st_mode))
64762 - sys_rmdir(path);
64763 + sys_rmdir((char __force_user *)path);
64764 else
64765 - sys_unlink(path);
64766 + sys_unlink((char __force_user *)path);
64767 }
64768 }
64769
64770 @@ -305,7 +305,7 @@ static int __init do_name(void)
64771 int openflags = O_WRONLY|O_CREAT;
64772 if (ml != 1)
64773 openflags |= O_TRUNC;
64774 - wfd = sys_open(collected, openflags, mode);
64775 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64776
64777 if (wfd >= 0) {
64778 sys_fchown(wfd, uid, gid);
64779 @@ -317,17 +317,17 @@ static int __init do_name(void)
64780 }
64781 }
64782 } else if (S_ISDIR(mode)) {
64783 - sys_mkdir(collected, mode);
64784 - sys_chown(collected, uid, gid);
64785 - sys_chmod(collected, mode);
64786 + sys_mkdir((char __force_user *)collected, mode);
64787 + sys_chown((char __force_user *)collected, uid, gid);
64788 + sys_chmod((char __force_user *)collected, mode);
64789 dir_add(collected, mtime);
64790 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64791 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64792 if (maybe_link() == 0) {
64793 - sys_mknod(collected, mode, rdev);
64794 - sys_chown(collected, uid, gid);
64795 - sys_chmod(collected, mode);
64796 - do_utime(collected, mtime);
64797 + sys_mknod((char __force_user *)collected, mode, rdev);
64798 + sys_chown((char __force_user *)collected, uid, gid);
64799 + sys_chmod((char __force_user *)collected, mode);
64800 + do_utime((char __force_user *)collected, mtime);
64801 }
64802 }
64803 return 0;
64804 @@ -336,15 +336,15 @@ static int __init do_name(void)
64805 static int __init do_copy(void)
64806 {
64807 if (count >= body_len) {
64808 - sys_write(wfd, victim, body_len);
64809 + sys_write(wfd, (char __force_user *)victim, body_len);
64810 sys_close(wfd);
64811 - do_utime(vcollected, mtime);
64812 + do_utime((char __force_user *)vcollected, mtime);
64813 kfree(vcollected);
64814 eat(body_len);
64815 state = SkipIt;
64816 return 0;
64817 } else {
64818 - sys_write(wfd, victim, count);
64819 + sys_write(wfd, (char __force_user *)victim, count);
64820 body_len -= count;
64821 eat(count);
64822 return 1;
64823 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64824 {
64825 collected[N_ALIGN(name_len) + body_len] = '\0';
64826 clean_path(collected, 0);
64827 - sys_symlink(collected + N_ALIGN(name_len), collected);
64828 - sys_lchown(collected, uid, gid);
64829 - do_utime(collected, mtime);
64830 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64831 + sys_lchown((char __force_user *)collected, uid, gid);
64832 + do_utime((char __force_user *)collected, mtime);
64833 state = SkipIt;
64834 next_state = Reset;
64835 return 0;
64836 diff --git a/init/main.c b/init/main.c
64837 index ff49a6d..5fa0429 100644
64838 --- a/init/main.c
64839 +++ b/init/main.c
64840 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
64841 extern void tc_init(void);
64842 #endif
64843
64844 +extern void grsecurity_init(void);
64845 +
64846 /*
64847 * Debug helper: via this flag we know that we are in 'early bootup code'
64848 * where only the boot processor is running with IRQ disabled. This means
64849 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
64850
64851 __setup("reset_devices", set_reset_devices);
64852
64853 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64854 +extern char pax_enter_kernel_user[];
64855 +extern char pax_exit_kernel_user[];
64856 +extern pgdval_t clone_pgd_mask;
64857 +#endif
64858 +
64859 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64860 +static int __init setup_pax_nouderef(char *str)
64861 +{
64862 +#ifdef CONFIG_X86_32
64863 + unsigned int cpu;
64864 + struct desc_struct *gdt;
64865 +
64866 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64867 + gdt = get_cpu_gdt_table(cpu);
64868 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64869 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64870 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64871 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64872 + }
64873 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64874 +#else
64875 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64876 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64877 + clone_pgd_mask = ~(pgdval_t)0UL;
64878 +#endif
64879 +
64880 + return 0;
64881 +}
64882 +early_param("pax_nouderef", setup_pax_nouderef);
64883 +#endif
64884 +
64885 +#ifdef CONFIG_PAX_SOFTMODE
64886 +int pax_softmode;
64887 +
64888 +static int __init setup_pax_softmode(char *str)
64889 +{
64890 + get_option(&str, &pax_softmode);
64891 + return 1;
64892 +}
64893 +__setup("pax_softmode=", setup_pax_softmode);
64894 +#endif
64895 +
64896 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64897 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64898 static const char *panic_later, *panic_param;
64899 @@ -675,6 +720,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64900 {
64901 int count = preempt_count();
64902 int ret;
64903 + const char *msg1 = "", *msg2 = "";
64904
64905 if (initcall_debug)
64906 ret = do_one_initcall_debug(fn);
64907 @@ -687,15 +733,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64908 sprintf(msgbuf, "error code %d ", ret);
64909
64910 if (preempt_count() != count) {
64911 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64912 + msg1 = " preemption imbalance";
64913 preempt_count() = count;
64914 }
64915 if (irqs_disabled()) {
64916 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64917 + msg2 = " disabled interrupts";
64918 local_irq_enable();
64919 }
64920 - if (msgbuf[0]) {
64921 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64922 + if (msgbuf[0] || *msg1 || *msg2) {
64923 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64924 }
64925
64926 return ret;
64927 @@ -814,7 +860,7 @@ static int __init kernel_init(void * unused)
64928 do_basic_setup();
64929
64930 /* Open the /dev/console on the rootfs, this should never fail */
64931 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64932 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64933 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64934
64935 (void) sys_dup(0);
64936 @@ -827,11 +873,13 @@ static int __init kernel_init(void * unused)
64937 if (!ramdisk_execute_command)
64938 ramdisk_execute_command = "/init";
64939
64940 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64941 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64942 ramdisk_execute_command = NULL;
64943 prepare_namespace();
64944 }
64945
64946 + grsecurity_init();
64947 +
64948 /*
64949 * Ok, we have completed the initial bootup, and
64950 * we're essentially up and running. Get rid of the
64951 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64952 index 86ee272..773d937 100644
64953 --- a/ipc/mqueue.c
64954 +++ b/ipc/mqueue.c
64955 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64956 mq_bytes = (mq_msg_tblsz +
64957 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64958
64959 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64960 spin_lock(&mq_lock);
64961 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64962 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
64963 diff --git a/ipc/msg.c b/ipc/msg.c
64964 index 7385de2..a8180e08 100644
64965 --- a/ipc/msg.c
64966 +++ b/ipc/msg.c
64967 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64968 return security_msg_queue_associate(msq, msgflg);
64969 }
64970
64971 +static struct ipc_ops msg_ops = {
64972 + .getnew = newque,
64973 + .associate = msg_security,
64974 + .more_checks = NULL
64975 +};
64976 +
64977 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64978 {
64979 struct ipc_namespace *ns;
64980 - struct ipc_ops msg_ops;
64981 struct ipc_params msg_params;
64982
64983 ns = current->nsproxy->ipc_ns;
64984
64985 - msg_ops.getnew = newque;
64986 - msg_ops.associate = msg_security;
64987 - msg_ops.more_checks = NULL;
64988 -
64989 msg_params.key = key;
64990 msg_params.flg = msgflg;
64991
64992 diff --git a/ipc/sem.c b/ipc/sem.c
64993 index 5215a81..cfc0cac 100644
64994 --- a/ipc/sem.c
64995 +++ b/ipc/sem.c
64996 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64997 return 0;
64998 }
64999
65000 +static struct ipc_ops sem_ops = {
65001 + .getnew = newary,
65002 + .associate = sem_security,
65003 + .more_checks = sem_more_checks
65004 +};
65005 +
65006 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65007 {
65008 struct ipc_namespace *ns;
65009 - struct ipc_ops sem_ops;
65010 struct ipc_params sem_params;
65011
65012 ns = current->nsproxy->ipc_ns;
65013 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65014 if (nsems < 0 || nsems > ns->sc_semmsl)
65015 return -EINVAL;
65016
65017 - sem_ops.getnew = newary;
65018 - sem_ops.associate = sem_security;
65019 - sem_ops.more_checks = sem_more_checks;
65020 -
65021 sem_params.key = key;
65022 sem_params.flg = semflg;
65023 sem_params.u.nsems = nsems;
65024 diff --git a/ipc/shm.c b/ipc/shm.c
65025 index b76be5b..859e750 100644
65026 --- a/ipc/shm.c
65027 +++ b/ipc/shm.c
65028 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
65029 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
65030 #endif
65031
65032 +#ifdef CONFIG_GRKERNSEC
65033 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65034 + const time_t shm_createtime, const uid_t cuid,
65035 + const int shmid);
65036 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65037 + const time_t shm_createtime);
65038 +#endif
65039 +
65040 void shm_init_ns(struct ipc_namespace *ns)
65041 {
65042 ns->shm_ctlmax = SHMMAX;
65043 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
65044 shp->shm_lprid = 0;
65045 shp->shm_atim = shp->shm_dtim = 0;
65046 shp->shm_ctim = get_seconds();
65047 +#ifdef CONFIG_GRKERNSEC
65048 + {
65049 + struct timespec timeval;
65050 + do_posix_clock_monotonic_gettime(&timeval);
65051 +
65052 + shp->shm_createtime = timeval.tv_sec;
65053 + }
65054 +#endif
65055 shp->shm_segsz = size;
65056 shp->shm_nattch = 0;
65057 shp->shm_file = file;
65058 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
65059 return 0;
65060 }
65061
65062 +static struct ipc_ops shm_ops = {
65063 + .getnew = newseg,
65064 + .associate = shm_security,
65065 + .more_checks = shm_more_checks
65066 +};
65067 +
65068 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
65069 {
65070 struct ipc_namespace *ns;
65071 - struct ipc_ops shm_ops;
65072 struct ipc_params shm_params;
65073
65074 ns = current->nsproxy->ipc_ns;
65075
65076 - shm_ops.getnew = newseg;
65077 - shm_ops.associate = shm_security;
65078 - shm_ops.more_checks = shm_more_checks;
65079 -
65080 shm_params.key = key;
65081 shm_params.flg = shmflg;
65082 shm_params.u.size = size;
65083 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65084 f_mode = FMODE_READ | FMODE_WRITE;
65085 }
65086 if (shmflg & SHM_EXEC) {
65087 +
65088 +#ifdef CONFIG_PAX_MPROTECT
65089 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
65090 + goto out;
65091 +#endif
65092 +
65093 prot |= PROT_EXEC;
65094 acc_mode |= S_IXUGO;
65095 }
65096 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65097 if (err)
65098 goto out_unlock;
65099
65100 +#ifdef CONFIG_GRKERNSEC
65101 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
65102 + shp->shm_perm.cuid, shmid) ||
65103 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
65104 + err = -EACCES;
65105 + goto out_unlock;
65106 + }
65107 +#endif
65108 +
65109 path = shp->shm_file->f_path;
65110 path_get(&path);
65111 shp->shm_nattch++;
65112 +#ifdef CONFIG_GRKERNSEC
65113 + shp->shm_lapid = current->pid;
65114 +#endif
65115 size = i_size_read(path.dentry->d_inode);
65116 shm_unlock(shp);
65117
65118 diff --git a/kernel/acct.c b/kernel/acct.c
65119 index 02e6167..54824f7 100644
65120 --- a/kernel/acct.c
65121 +++ b/kernel/acct.c
65122 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
65123 */
65124 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
65125 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
65126 - file->f_op->write(file, (char *)&ac,
65127 + file->f_op->write(file, (char __force_user *)&ac,
65128 sizeof(acct_t), &file->f_pos);
65129 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
65130 set_fs(fs);
65131 diff --git a/kernel/audit.c b/kernel/audit.c
65132 index bb0eb5b..cf2a03a 100644
65133 --- a/kernel/audit.c
65134 +++ b/kernel/audit.c
65135 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
65136 3) suppressed due to audit_rate_limit
65137 4) suppressed due to audit_backlog_limit
65138 */
65139 -static atomic_t audit_lost = ATOMIC_INIT(0);
65140 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65141
65142 /* The netlink socket. */
65143 static struct sock *audit_sock;
65144 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
65145 unsigned long now;
65146 int print;
65147
65148 - atomic_inc(&audit_lost);
65149 + atomic_inc_unchecked(&audit_lost);
65150
65151 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65152
65153 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
65154 printk(KERN_WARNING
65155 "audit: audit_lost=%d audit_rate_limit=%d "
65156 "audit_backlog_limit=%d\n",
65157 - atomic_read(&audit_lost),
65158 + atomic_read_unchecked(&audit_lost),
65159 audit_rate_limit,
65160 audit_backlog_limit);
65161 audit_panic(message);
65162 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
65163 status_set.pid = audit_pid;
65164 status_set.rate_limit = audit_rate_limit;
65165 status_set.backlog_limit = audit_backlog_limit;
65166 - status_set.lost = atomic_read(&audit_lost);
65167 + status_set.lost = atomic_read_unchecked(&audit_lost);
65168 status_set.backlog = skb_queue_len(&audit_skb_queue);
65169 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65170 &status_set, sizeof(status_set));
65171 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
65172 index af1de0f..06dfe57 100644
65173 --- a/kernel/auditsc.c
65174 +++ b/kernel/auditsc.c
65175 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
65176 }
65177
65178 /* global counter which is incremented every time something logs in */
65179 -static atomic_t session_id = ATOMIC_INIT(0);
65180 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65181
65182 /**
65183 * audit_set_loginuid - set current task's audit_context loginuid
65184 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
65185 return -EPERM;
65186 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
65187
65188 - sessionid = atomic_inc_return(&session_id);
65189 + sessionid = atomic_inc_return_unchecked(&session_id);
65190 if (context && context->in_syscall) {
65191 struct audit_buffer *ab;
65192
65193 diff --git a/kernel/capability.c b/kernel/capability.c
65194 index 3f1adb6..c564db0 100644
65195 --- a/kernel/capability.c
65196 +++ b/kernel/capability.c
65197 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
65198 * before modification is attempted and the application
65199 * fails.
65200 */
65201 + if (tocopy > ARRAY_SIZE(kdata))
65202 + return -EFAULT;
65203 +
65204 if (copy_to_user(dataptr, kdata, tocopy
65205 * sizeof(struct __user_cap_data_struct))) {
65206 return -EFAULT;
65207 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
65208 int ret;
65209
65210 rcu_read_lock();
65211 - ret = security_capable(__task_cred(t), ns, cap);
65212 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
65213 + gr_task_is_capable(t, __task_cred(t), cap);
65214 rcu_read_unlock();
65215
65216 - return (ret == 0);
65217 + return ret;
65218 }
65219
65220 /**
65221 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
65222 int ret;
65223
65224 rcu_read_lock();
65225 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
65226 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
65227 rcu_read_unlock();
65228
65229 - return (ret == 0);
65230 + return ret;
65231 }
65232
65233 /**
65234 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
65235 BUG();
65236 }
65237
65238 - if (security_capable(current_cred(), ns, cap) == 0) {
65239 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
65240 current->flags |= PF_SUPERPRIV;
65241 return true;
65242 }
65243 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
65244 }
65245 EXPORT_SYMBOL(ns_capable);
65246
65247 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
65248 +{
65249 + if (unlikely(!cap_valid(cap))) {
65250 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65251 + BUG();
65252 + }
65253 +
65254 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
65255 + current->flags |= PF_SUPERPRIV;
65256 + return true;
65257 + }
65258 + return false;
65259 +}
65260 +EXPORT_SYMBOL(ns_capable_nolog);
65261 +
65262 /**
65263 * capable - Determine if the current task has a superior capability in effect
65264 * @cap: The capability to be tested for
65265 @@ -408,6 +427,12 @@ bool capable(int cap)
65266 }
65267 EXPORT_SYMBOL(capable);
65268
65269 +bool capable_nolog(int cap)
65270 +{
65271 + return ns_capable_nolog(&init_user_ns, cap);
65272 +}
65273 +EXPORT_SYMBOL(capable_nolog);
65274 +
65275 /**
65276 * nsown_capable - Check superior capability to one's own user_ns
65277 * @cap: The capability in question
65278 diff --git a/kernel/compat.c b/kernel/compat.c
65279 index a6d0649..f44fb27 100644
65280 --- a/kernel/compat.c
65281 +++ b/kernel/compat.c
65282 @@ -13,6 +13,7 @@
65283
65284 #include <linux/linkage.h>
65285 #include <linux/compat.h>
65286 +#include <linux/module.h>
65287 #include <linux/errno.h>
65288 #include <linux/time.h>
65289 #include <linux/signal.h>
65290 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65291 mm_segment_t oldfs;
65292 long ret;
65293
65294 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65295 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65296 oldfs = get_fs();
65297 set_fs(KERNEL_DS);
65298 ret = hrtimer_nanosleep_restart(restart);
65299 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65300 oldfs = get_fs();
65301 set_fs(KERNEL_DS);
65302 ret = hrtimer_nanosleep(&tu,
65303 - rmtp ? (struct timespec __user *)&rmt : NULL,
65304 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
65305 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65306 set_fs(oldfs);
65307
65308 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65309 mm_segment_t old_fs = get_fs();
65310
65311 set_fs(KERNEL_DS);
65312 - ret = sys_sigpending((old_sigset_t __user *) &s);
65313 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
65314 set_fs(old_fs);
65315 if (ret == 0)
65316 ret = put_user(s, set);
65317 @@ -399,7 +400,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
65318 mm_segment_t old_fs = get_fs();
65319
65320 set_fs(KERNEL_DS);
65321 - ret = sys_old_getrlimit(resource, &r);
65322 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65323 set_fs(old_fs);
65324
65325 if (!ret) {
65326 @@ -471,7 +472,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
65327 mm_segment_t old_fs = get_fs();
65328
65329 set_fs(KERNEL_DS);
65330 - ret = sys_getrusage(who, (struct rusage __user *) &r);
65331 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65332 set_fs(old_fs);
65333
65334 if (ret)
65335 @@ -498,8 +499,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
65336 set_fs (KERNEL_DS);
65337 ret = sys_wait4(pid,
65338 (stat_addr ?
65339 - (unsigned int __user *) &status : NULL),
65340 - options, (struct rusage __user *) &r);
65341 + (unsigned int __force_user *) &status : NULL),
65342 + options, (struct rusage __force_user *) &r);
65343 set_fs (old_fs);
65344
65345 if (ret > 0) {
65346 @@ -524,8 +525,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
65347 memset(&info, 0, sizeof(info));
65348
65349 set_fs(KERNEL_DS);
65350 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65351 - uru ? (struct rusage __user *)&ru : NULL);
65352 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65353 + uru ? (struct rusage __force_user *)&ru : NULL);
65354 set_fs(old_fs);
65355
65356 if ((ret < 0) || (info.si_signo == 0))
65357 @@ -655,8 +656,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
65358 oldfs = get_fs();
65359 set_fs(KERNEL_DS);
65360 err = sys_timer_settime(timer_id, flags,
65361 - (struct itimerspec __user *) &newts,
65362 - (struct itimerspec __user *) &oldts);
65363 + (struct itimerspec __force_user *) &newts,
65364 + (struct itimerspec __force_user *) &oldts);
65365 set_fs(oldfs);
65366 if (!err && old && put_compat_itimerspec(old, &oldts))
65367 return -EFAULT;
65368 @@ -673,7 +674,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
65369 oldfs = get_fs();
65370 set_fs(KERNEL_DS);
65371 err = sys_timer_gettime(timer_id,
65372 - (struct itimerspec __user *) &ts);
65373 + (struct itimerspec __force_user *) &ts);
65374 set_fs(oldfs);
65375 if (!err && put_compat_itimerspec(setting, &ts))
65376 return -EFAULT;
65377 @@ -692,7 +693,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
65378 oldfs = get_fs();
65379 set_fs(KERNEL_DS);
65380 err = sys_clock_settime(which_clock,
65381 - (struct timespec __user *) &ts);
65382 + (struct timespec __force_user *) &ts);
65383 set_fs(oldfs);
65384 return err;
65385 }
65386 @@ -707,7 +708,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
65387 oldfs = get_fs();
65388 set_fs(KERNEL_DS);
65389 err = sys_clock_gettime(which_clock,
65390 - (struct timespec __user *) &ts);
65391 + (struct timespec __force_user *) &ts);
65392 set_fs(oldfs);
65393 if (!err && put_compat_timespec(&ts, tp))
65394 return -EFAULT;
65395 @@ -727,7 +728,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65396
65397 oldfs = get_fs();
65398 set_fs(KERNEL_DS);
65399 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65400 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65401 set_fs(oldfs);
65402
65403 err = compat_put_timex(utp, &txc);
65404 @@ -747,7 +748,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
65405 oldfs = get_fs();
65406 set_fs(KERNEL_DS);
65407 err = sys_clock_getres(which_clock,
65408 - (struct timespec __user *) &ts);
65409 + (struct timespec __force_user *) &ts);
65410 set_fs(oldfs);
65411 if (!err && tp && put_compat_timespec(&ts, tp))
65412 return -EFAULT;
65413 @@ -759,9 +760,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
65414 long err;
65415 mm_segment_t oldfs;
65416 struct timespec tu;
65417 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65418 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65419
65420 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65421 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65422 oldfs = get_fs();
65423 set_fs(KERNEL_DS);
65424 err = clock_nanosleep_restart(restart);
65425 @@ -793,8 +794,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
65426 oldfs = get_fs();
65427 set_fs(KERNEL_DS);
65428 err = sys_clock_nanosleep(which_clock, flags,
65429 - (struct timespec __user *) &in,
65430 - (struct timespec __user *) &out);
65431 + (struct timespec __force_user *) &in,
65432 + (struct timespec __force_user *) &out);
65433 set_fs(oldfs);
65434
65435 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
65436 diff --git a/kernel/configs.c b/kernel/configs.c
65437 index 42e8fa0..9e7406b 100644
65438 --- a/kernel/configs.c
65439 +++ b/kernel/configs.c
65440 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
65441 struct proc_dir_entry *entry;
65442
65443 /* create the current config file */
65444 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65445 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65446 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65447 + &ikconfig_file_ops);
65448 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65449 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65450 + &ikconfig_file_ops);
65451 +#endif
65452 +#else
65453 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65454 &ikconfig_file_ops);
65455 +#endif
65456 +
65457 if (!entry)
65458 return -ENOMEM;
65459
65460 diff --git a/kernel/cred.c b/kernel/cred.c
65461 index 48c6fd3..8398912 100644
65462 --- a/kernel/cred.c
65463 +++ b/kernel/cred.c
65464 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
65465 validate_creds(cred);
65466 put_cred(cred);
65467 }
65468 +
65469 +#ifdef CONFIG_GRKERNSEC_SETXID
65470 + cred = (struct cred *) tsk->delayed_cred;
65471 + if (cred) {
65472 + tsk->delayed_cred = NULL;
65473 + validate_creds(cred);
65474 + put_cred(cred);
65475 + }
65476 +#endif
65477 }
65478
65479 /**
65480 @@ -472,7 +481,7 @@ error_put:
65481 * Always returns 0 thus allowing this function to be tail-called at the end
65482 * of, say, sys_setgid().
65483 */
65484 -int commit_creds(struct cred *new)
65485 +static int __commit_creds(struct cred *new)
65486 {
65487 struct task_struct *task = current;
65488 const struct cred *old = task->real_cred;
65489 @@ -491,6 +500,8 @@ int commit_creds(struct cred *new)
65490
65491 get_cred(new); /* we will require a ref for the subj creds too */
65492
65493 + gr_set_role_label(task, new->uid, new->gid);
65494 +
65495 /* dumpability changes */
65496 if (old->euid != new->euid ||
65497 old->egid != new->egid ||
65498 @@ -540,6 +551,101 @@ int commit_creds(struct cred *new)
65499 put_cred(old);
65500 return 0;
65501 }
65502 +#ifdef CONFIG_GRKERNSEC_SETXID
65503 +extern int set_user(struct cred *new);
65504 +
65505 +void gr_delayed_cred_worker(void)
65506 +{
65507 + const struct cred *new = current->delayed_cred;
65508 + struct cred *ncred;
65509 +
65510 + current->delayed_cred = NULL;
65511 +
65512 + if (current_uid() && new != NULL) {
65513 + // from doing get_cred on it when queueing this
65514 + put_cred(new);
65515 + return;
65516 + } else if (new == NULL)
65517 + return;
65518 +
65519 + ncred = prepare_creds();
65520 + if (!ncred)
65521 + goto die;
65522 + // uids
65523 + ncred->uid = new->uid;
65524 + ncred->euid = new->euid;
65525 + ncred->suid = new->suid;
65526 + ncred->fsuid = new->fsuid;
65527 + // gids
65528 + ncred->gid = new->gid;
65529 + ncred->egid = new->egid;
65530 + ncred->sgid = new->sgid;
65531 + ncred->fsgid = new->fsgid;
65532 + // groups
65533 + if (set_groups(ncred, new->group_info) < 0) {
65534 + abort_creds(ncred);
65535 + goto die;
65536 + }
65537 + // caps
65538 + ncred->securebits = new->securebits;
65539 + ncred->cap_inheritable = new->cap_inheritable;
65540 + ncred->cap_permitted = new->cap_permitted;
65541 + ncred->cap_effective = new->cap_effective;
65542 + ncred->cap_bset = new->cap_bset;
65543 +
65544 + if (set_user(ncred)) {
65545 + abort_creds(ncred);
65546 + goto die;
65547 + }
65548 +
65549 + // from doing get_cred on it when queueing this
65550 + put_cred(new);
65551 +
65552 + __commit_creds(ncred);
65553 + return;
65554 +die:
65555 + // from doing get_cred on it when queueing this
65556 + put_cred(new);
65557 + do_group_exit(SIGKILL);
65558 +}
65559 +#endif
65560 +
65561 +int commit_creds(struct cred *new)
65562 +{
65563 +#ifdef CONFIG_GRKERNSEC_SETXID
65564 + int ret;
65565 + int schedule_it = 0;
65566 + struct task_struct *t;
65567 +
65568 + /* we won't get called with tasklist_lock held for writing
65569 + and interrupts disabled as the cred struct in that case is
65570 + init_cred
65571 + */
65572 + if (grsec_enable_setxid && !current_is_single_threaded() &&
65573 + !current_uid() && new->uid) {
65574 + schedule_it = 1;
65575 + }
65576 + ret = __commit_creds(new);
65577 + if (schedule_it) {
65578 + rcu_read_lock();
65579 + read_lock(&tasklist_lock);
65580 + for (t = next_thread(current); t != current;
65581 + t = next_thread(t)) {
65582 + if (t->delayed_cred == NULL) {
65583 + t->delayed_cred = get_cred(new);
65584 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
65585 + set_tsk_need_resched(t);
65586 + }
65587 + }
65588 + read_unlock(&tasklist_lock);
65589 + rcu_read_unlock();
65590 + }
65591 + return ret;
65592 +#else
65593 + return __commit_creds(new);
65594 +#endif
65595 +}
65596 +
65597 EXPORT_SYMBOL(commit_creds);
65598
65599 /**
65600 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65601 index 7fda904..59f620c 100644
65602 --- a/kernel/debug/debug_core.c
65603 +++ b/kernel/debug/debug_core.c
65604 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65605 */
65606 static atomic_t masters_in_kgdb;
65607 static atomic_t slaves_in_kgdb;
65608 -static atomic_t kgdb_break_tasklet_var;
65609 +static atomic_unchecked_t kgdb_break_tasklet_var;
65610 atomic_t kgdb_setting_breakpoint;
65611
65612 struct task_struct *kgdb_usethread;
65613 @@ -129,7 +129,7 @@ int kgdb_single_step;
65614 static pid_t kgdb_sstep_pid;
65615
65616 /* to keep track of the CPU which is doing the single stepping*/
65617 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65618 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65619
65620 /*
65621 * If you are debugging a problem where roundup (the collection of
65622 @@ -537,7 +537,7 @@ return_normal:
65623 * kernel will only try for the value of sstep_tries before
65624 * giving up and continuing on.
65625 */
65626 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65627 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65628 (kgdb_info[cpu].task &&
65629 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65630 atomic_set(&kgdb_active, -1);
65631 @@ -631,8 +631,8 @@ cpu_master_loop:
65632 }
65633
65634 kgdb_restore:
65635 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65636 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65637 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65638 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65639 if (kgdb_info[sstep_cpu].task)
65640 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65641 else
65642 @@ -829,18 +829,18 @@ static void kgdb_unregister_callbacks(void)
65643 static void kgdb_tasklet_bpt(unsigned long ing)
65644 {
65645 kgdb_breakpoint();
65646 - atomic_set(&kgdb_break_tasklet_var, 0);
65647 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65648 }
65649
65650 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65651
65652 void kgdb_schedule_breakpoint(void)
65653 {
65654 - if (atomic_read(&kgdb_break_tasklet_var) ||
65655 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65656 atomic_read(&kgdb_active) != -1 ||
65657 atomic_read(&kgdb_setting_breakpoint))
65658 return;
65659 - atomic_inc(&kgdb_break_tasklet_var);
65660 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
65661 tasklet_schedule(&kgdb_tasklet_breakpoint);
65662 }
65663 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65664 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65665 index e2ae734..08a4c5c 100644
65666 --- a/kernel/debug/kdb/kdb_main.c
65667 +++ b/kernel/debug/kdb/kdb_main.c
65668 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
65669 list_for_each_entry(mod, kdb_modules, list) {
65670
65671 kdb_printf("%-20s%8u 0x%p ", mod->name,
65672 - mod->core_size, (void *)mod);
65673 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
65674 #ifdef CONFIG_MODULE_UNLOAD
65675 kdb_printf("%4ld ", module_refcount(mod));
65676 #endif
65677 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
65678 kdb_printf(" (Loading)");
65679 else
65680 kdb_printf(" (Live)");
65681 - kdb_printf(" 0x%p", mod->module_core);
65682 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65683
65684 #ifdef CONFIG_MODULE_UNLOAD
65685 {
65686 diff --git a/kernel/events/core.c b/kernel/events/core.c
65687 index 1b5c081..c375f83 100644
65688 --- a/kernel/events/core.c
65689 +++ b/kernel/events/core.c
65690 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65691 return 0;
65692 }
65693
65694 -static atomic64_t perf_event_id;
65695 +static atomic64_unchecked_t perf_event_id;
65696
65697 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65698 enum event_type_t event_type);
65699 @@ -2581,7 +2581,7 @@ static void __perf_event_read(void *info)
65700
65701 static inline u64 perf_event_count(struct perf_event *event)
65702 {
65703 - return local64_read(&event->count) + atomic64_read(&event->child_count);
65704 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65705 }
65706
65707 static u64 perf_event_read(struct perf_event *event)
65708 @@ -2897,9 +2897,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65709 mutex_lock(&event->child_mutex);
65710 total += perf_event_read(event);
65711 *enabled += event->total_time_enabled +
65712 - atomic64_read(&event->child_total_time_enabled);
65713 + atomic64_read_unchecked(&event->child_total_time_enabled);
65714 *running += event->total_time_running +
65715 - atomic64_read(&event->child_total_time_running);
65716 + atomic64_read_unchecked(&event->child_total_time_running);
65717
65718 list_for_each_entry(child, &event->child_list, child_list) {
65719 total += perf_event_read(child);
65720 @@ -3306,10 +3306,10 @@ void perf_event_update_userpage(struct perf_event *event)
65721 userpg->offset -= local64_read(&event->hw.prev_count);
65722
65723 userpg->time_enabled = enabled +
65724 - atomic64_read(&event->child_total_time_enabled);
65725 + atomic64_read_unchecked(&event->child_total_time_enabled);
65726
65727 userpg->time_running = running +
65728 - atomic64_read(&event->child_total_time_running);
65729 + atomic64_read_unchecked(&event->child_total_time_running);
65730
65731 barrier();
65732 ++userpg->lock;
65733 @@ -3738,11 +3738,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65734 values[n++] = perf_event_count(event);
65735 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65736 values[n++] = enabled +
65737 - atomic64_read(&event->child_total_time_enabled);
65738 + atomic64_read_unchecked(&event->child_total_time_enabled);
65739 }
65740 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65741 values[n++] = running +
65742 - atomic64_read(&event->child_total_time_running);
65743 + atomic64_read_unchecked(&event->child_total_time_running);
65744 }
65745 if (read_format & PERF_FORMAT_ID)
65746 values[n++] = primary_event_id(event);
65747 @@ -4393,12 +4393,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65748 * need to add enough zero bytes after the string to handle
65749 * the 64bit alignment we do later.
65750 */
65751 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65752 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65753 if (!buf) {
65754 name = strncpy(tmp, "//enomem", sizeof(tmp));
65755 goto got_name;
65756 }
65757 - name = d_path(&file->f_path, buf, PATH_MAX);
65758 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65759 if (IS_ERR(name)) {
65760 name = strncpy(tmp, "//toolong", sizeof(tmp));
65761 goto got_name;
65762 @@ -5765,7 +5765,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65763 event->parent = parent_event;
65764
65765 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65766 - event->id = atomic64_inc_return(&perf_event_id);
65767 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65768
65769 event->state = PERF_EVENT_STATE_INACTIVE;
65770
65771 @@ -6287,10 +6287,10 @@ static void sync_child_event(struct perf_event *child_event,
65772 /*
65773 * Add back the child's count to the parent's count:
65774 */
65775 - atomic64_add(child_val, &parent_event->child_count);
65776 - atomic64_add(child_event->total_time_enabled,
65777 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65778 + atomic64_add_unchecked(child_event->total_time_enabled,
65779 &parent_event->child_total_time_enabled);
65780 - atomic64_add(child_event->total_time_running,
65781 + atomic64_add_unchecked(child_event->total_time_running,
65782 &parent_event->child_total_time_running);
65783
65784 /*
65785 diff --git a/kernel/exit.c b/kernel/exit.c
65786 index 46c8b14..d868958 100644
65787 --- a/kernel/exit.c
65788 +++ b/kernel/exit.c
65789 @@ -58,6 +58,10 @@
65790 #include <asm/pgtable.h>
65791 #include <asm/mmu_context.h>
65792
65793 +#ifdef CONFIG_GRKERNSEC
65794 +extern rwlock_t grsec_exec_file_lock;
65795 +#endif
65796 +
65797 static void exit_mm(struct task_struct * tsk);
65798
65799 static void __unhash_process(struct task_struct *p, bool group_dead)
65800 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p)
65801 struct task_struct *leader;
65802 int zap_leader;
65803 repeat:
65804 +#ifdef CONFIG_NET
65805 + gr_del_task_from_ip_table(p);
65806 +#endif
65807 +
65808 /* don't need to get the RCU readlock here - the process is dead and
65809 * can't be modifying its own credentials. But shut RCU-lockdep up */
65810 rcu_read_lock();
65811 @@ -381,7 +389,7 @@ int allow_signal(int sig)
65812 * know it'll be handled, so that they don't get converted to
65813 * SIGKILL or just silently dropped.
65814 */
65815 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65816 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65817 recalc_sigpending();
65818 spin_unlock_irq(&current->sighand->siglock);
65819 return 0;
65820 @@ -417,6 +425,17 @@ void daemonize(const char *name, ...)
65821 vsnprintf(current->comm, sizeof(current->comm), name, args);
65822 va_end(args);
65823
65824 +#ifdef CONFIG_GRKERNSEC
65825 + write_lock(&grsec_exec_file_lock);
65826 + if (current->exec_file) {
65827 + fput(current->exec_file);
65828 + current->exec_file = NULL;
65829 + }
65830 + write_unlock(&grsec_exec_file_lock);
65831 +#endif
65832 +
65833 + gr_set_kernel_label(current);
65834 +
65835 /*
65836 * If we were started as result of loading a module, close all of the
65837 * user space pages. We don't need them, and if we didn't close them
65838 @@ -873,6 +892,8 @@ void do_exit(long code)
65839 struct task_struct *tsk = current;
65840 int group_dead;
65841
65842 + set_fs(USER_DS);
65843 +
65844 profile_task_exit(tsk);
65845
65846 WARN_ON(blk_needs_flush_plug(tsk));
65847 @@ -889,7 +910,6 @@ void do_exit(long code)
65848 * mm_release()->clear_child_tid() from writing to a user-controlled
65849 * kernel address.
65850 */
65851 - set_fs(USER_DS);
65852
65853 ptrace_event(PTRACE_EVENT_EXIT, code);
65854
65855 @@ -950,6 +970,9 @@ void do_exit(long code)
65856 tsk->exit_code = code;
65857 taskstats_exit(tsk, group_dead);
65858
65859 + gr_acl_handle_psacct(tsk, code);
65860 + gr_acl_handle_exit();
65861 +
65862 exit_mm(tsk);
65863
65864 if (group_dead)
65865 @@ -1066,7 +1089,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65866 * Take down every thread in the group. This is called by fatal signals
65867 * as well as by sys_exit_group (below).
65868 */
65869 -void
65870 +__noreturn void
65871 do_group_exit(int exit_code)
65872 {
65873 struct signal_struct *sig = current->signal;
65874 diff --git a/kernel/fork.c b/kernel/fork.c
65875 index 423d5a4..4608ecf 100644
65876 --- a/kernel/fork.c
65877 +++ b/kernel/fork.c
65878 @@ -285,7 +285,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65879 *stackend = STACK_END_MAGIC; /* for overflow detection */
65880
65881 #ifdef CONFIG_CC_STACKPROTECTOR
65882 - tsk->stack_canary = get_random_int();
65883 + tsk->stack_canary = pax_get_random_long();
65884 #endif
65885
65886 /*
65887 @@ -309,13 +309,77 @@ out:
65888 }
65889
65890 #ifdef CONFIG_MMU
65891 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
65892 +{
65893 + struct vm_area_struct *tmp;
65894 + unsigned long charge;
65895 + struct mempolicy *pol;
65896 + struct file *file;
65897 +
65898 + charge = 0;
65899 + if (mpnt->vm_flags & VM_ACCOUNT) {
65900 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65901 + if (security_vm_enough_memory(len))
65902 + goto fail_nomem;
65903 + charge = len;
65904 + }
65905 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65906 + if (!tmp)
65907 + goto fail_nomem;
65908 + *tmp = *mpnt;
65909 + tmp->vm_mm = mm;
65910 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
65911 + pol = mpol_dup(vma_policy(mpnt));
65912 + if (IS_ERR(pol))
65913 + goto fail_nomem_policy;
65914 + vma_set_policy(tmp, pol);
65915 + if (anon_vma_fork(tmp, mpnt))
65916 + goto fail_nomem_anon_vma_fork;
65917 + tmp->vm_flags &= ~VM_LOCKED;
65918 + tmp->vm_next = tmp->vm_prev = NULL;
65919 + tmp->vm_mirror = NULL;
65920 + file = tmp->vm_file;
65921 + if (file) {
65922 + struct inode *inode = file->f_path.dentry->d_inode;
65923 + struct address_space *mapping = file->f_mapping;
65924 +
65925 + get_file(file);
65926 + if (tmp->vm_flags & VM_DENYWRITE)
65927 + atomic_dec(&inode->i_writecount);
65928 + mutex_lock(&mapping->i_mmap_mutex);
65929 + if (tmp->vm_flags & VM_SHARED)
65930 + mapping->i_mmap_writable++;
65931 + flush_dcache_mmap_lock(mapping);
65932 + /* insert tmp into the share list, just after mpnt */
65933 + vma_prio_tree_add(tmp, mpnt);
65934 + flush_dcache_mmap_unlock(mapping);
65935 + mutex_unlock(&mapping->i_mmap_mutex);
65936 + }
65937 +
65938 + /*
65939 + * Clear hugetlb-related page reserves for children. This only
65940 + * affects MAP_PRIVATE mappings. Faults generated by the child
65941 + * are not guaranteed to succeed, even if read-only
65942 + */
65943 + if (is_vm_hugetlb_page(tmp))
65944 + reset_vma_resv_huge_pages(tmp);
65945 +
65946 + return tmp;
65947 +
65948 +fail_nomem_anon_vma_fork:
65949 + mpol_put(pol);
65950 +fail_nomem_policy:
65951 + kmem_cache_free(vm_area_cachep, tmp);
65952 +fail_nomem:
65953 + vm_unacct_memory(charge);
65954 + return NULL;
65955 +}
65956 +
65957 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65958 {
65959 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65960 struct rb_node **rb_link, *rb_parent;
65961 int retval;
65962 - unsigned long charge;
65963 - struct mempolicy *pol;
65964
65965 down_write(&oldmm->mmap_sem);
65966 flush_cache_dup_mm(oldmm);
65967 @@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65968 mm->locked_vm = 0;
65969 mm->mmap = NULL;
65970 mm->mmap_cache = NULL;
65971 - mm->free_area_cache = oldmm->mmap_base;
65972 - mm->cached_hole_size = ~0UL;
65973 + mm->free_area_cache = oldmm->free_area_cache;
65974 + mm->cached_hole_size = oldmm->cached_hole_size;
65975 mm->map_count = 0;
65976 cpumask_clear(mm_cpumask(mm));
65977 mm->mm_rb = RB_ROOT;
65978 @@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65979
65980 prev = NULL;
65981 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65982 - struct file *file;
65983 -
65984 if (mpnt->vm_flags & VM_DONTCOPY) {
65985 long pages = vma_pages(mpnt);
65986 mm->total_vm -= pages;
65987 @@ -353,53 +415,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65988 -pages);
65989 continue;
65990 }
65991 - charge = 0;
65992 - if (mpnt->vm_flags & VM_ACCOUNT) {
65993 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65994 - if (security_vm_enough_memory(len))
65995 - goto fail_nomem;
65996 - charge = len;
65997 + tmp = dup_vma(mm, mpnt);
65998 + if (!tmp) {
65999 + retval = -ENOMEM;
66000 + goto out;
66001 }
66002 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66003 - if (!tmp)
66004 - goto fail_nomem;
66005 - *tmp = *mpnt;
66006 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
66007 - pol = mpol_dup(vma_policy(mpnt));
66008 - retval = PTR_ERR(pol);
66009 - if (IS_ERR(pol))
66010 - goto fail_nomem_policy;
66011 - vma_set_policy(tmp, pol);
66012 - tmp->vm_mm = mm;
66013 - if (anon_vma_fork(tmp, mpnt))
66014 - goto fail_nomem_anon_vma_fork;
66015 - tmp->vm_flags &= ~VM_LOCKED;
66016 - tmp->vm_next = tmp->vm_prev = NULL;
66017 - file = tmp->vm_file;
66018 - if (file) {
66019 - struct inode *inode = file->f_path.dentry->d_inode;
66020 - struct address_space *mapping = file->f_mapping;
66021 -
66022 - get_file(file);
66023 - if (tmp->vm_flags & VM_DENYWRITE)
66024 - atomic_dec(&inode->i_writecount);
66025 - mutex_lock(&mapping->i_mmap_mutex);
66026 - if (tmp->vm_flags & VM_SHARED)
66027 - mapping->i_mmap_writable++;
66028 - flush_dcache_mmap_lock(mapping);
66029 - /* insert tmp into the share list, just after mpnt */
66030 - vma_prio_tree_add(tmp, mpnt);
66031 - flush_dcache_mmap_unlock(mapping);
66032 - mutex_unlock(&mapping->i_mmap_mutex);
66033 - }
66034 -
66035 - /*
66036 - * Clear hugetlb-related page reserves for children. This only
66037 - * affects MAP_PRIVATE mappings. Faults generated by the child
66038 - * are not guaranteed to succeed, even if read-only
66039 - */
66040 - if (is_vm_hugetlb_page(tmp))
66041 - reset_vma_resv_huge_pages(tmp);
66042
66043 /*
66044 * Link in the new vma and copy the page table entries.
66045 @@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66046 if (retval)
66047 goto out;
66048 }
66049 +
66050 +#ifdef CONFIG_PAX_SEGMEXEC
66051 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66052 + struct vm_area_struct *mpnt_m;
66053 +
66054 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66055 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66056 +
66057 + if (!mpnt->vm_mirror)
66058 + continue;
66059 +
66060 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66061 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66062 + mpnt->vm_mirror = mpnt_m;
66063 + } else {
66064 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66065 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66066 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66067 + mpnt->vm_mirror->vm_mirror = mpnt;
66068 + }
66069 + }
66070 + BUG_ON(mpnt_m);
66071 + }
66072 +#endif
66073 +
66074 /* a new mm has just been created */
66075 arch_dup_mmap(oldmm, mm);
66076 retval = 0;
66077 @@ -430,14 +475,6 @@ out:
66078 flush_tlb_mm(oldmm);
66079 up_write(&oldmm->mmap_sem);
66080 return retval;
66081 -fail_nomem_anon_vma_fork:
66082 - mpol_put(pol);
66083 -fail_nomem_policy:
66084 - kmem_cache_free(vm_area_cachep, tmp);
66085 -fail_nomem:
66086 - retval = -ENOMEM;
66087 - vm_unacct_memory(charge);
66088 - goto out;
66089 }
66090
66091 static inline int mm_alloc_pgd(struct mm_struct *mm)
66092 @@ -659,8 +696,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
66093 return ERR_PTR(err);
66094
66095 mm = get_task_mm(task);
66096 - if (mm && mm != current->mm &&
66097 - !ptrace_may_access(task, mode)) {
66098 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
66099 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
66100 mmput(mm);
66101 mm = ERR_PTR(-EACCES);
66102 }
66103 @@ -882,13 +919,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
66104 spin_unlock(&fs->lock);
66105 return -EAGAIN;
66106 }
66107 - fs->users++;
66108 + atomic_inc(&fs->users);
66109 spin_unlock(&fs->lock);
66110 return 0;
66111 }
66112 tsk->fs = copy_fs_struct(fs);
66113 if (!tsk->fs)
66114 return -ENOMEM;
66115 + gr_set_chroot_entries(tsk, &tsk->fs->root);
66116 return 0;
66117 }
66118
66119 @@ -1152,6 +1190,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66120 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66121 #endif
66122 retval = -EAGAIN;
66123 +
66124 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66125 +
66126 if (atomic_read(&p->real_cred->user->processes) >=
66127 task_rlimit(p, RLIMIT_NPROC)) {
66128 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66129 @@ -1307,6 +1348,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66130 if (clone_flags & CLONE_THREAD)
66131 p->tgid = current->tgid;
66132
66133 + gr_copy_label(p);
66134 +
66135 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
66136 /*
66137 * Clear TID on mm_release()?
66138 @@ -1475,6 +1518,8 @@ bad_fork_cleanup_count:
66139 bad_fork_free:
66140 free_task(p);
66141 fork_out:
66142 + gr_log_forkfail(retval);
66143 +
66144 return ERR_PTR(retval);
66145 }
66146
66147 @@ -1575,6 +1620,8 @@ long do_fork(unsigned long clone_flags,
66148 if (clone_flags & CLONE_PARENT_SETTID)
66149 put_user(nr, parent_tidptr);
66150
66151 + gr_handle_brute_check();
66152 +
66153 if (clone_flags & CLONE_VFORK) {
66154 p->vfork_done = &vfork;
66155 init_completion(&vfork);
66156 @@ -1673,7 +1720,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
66157 return 0;
66158
66159 /* don't need lock here; in the worst case we'll do useless copy */
66160 - if (fs->users == 1)
66161 + if (atomic_read(&fs->users) == 1)
66162 return 0;
66163
66164 *new_fsp = copy_fs_struct(fs);
66165 @@ -1762,7 +1809,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
66166 fs = current->fs;
66167 spin_lock(&fs->lock);
66168 current->fs = new_fs;
66169 - if (--fs->users)
66170 + gr_set_chroot_entries(current, &current->fs->root);
66171 + if (atomic_dec_return(&fs->users))
66172 new_fs = NULL;
66173 else
66174 new_fs = fs;
66175 diff --git a/kernel/futex.c b/kernel/futex.c
66176 index 866c9d5..5c5f828 100644
66177 --- a/kernel/futex.c
66178 +++ b/kernel/futex.c
66179 @@ -54,6 +54,7 @@
66180 #include <linux/mount.h>
66181 #include <linux/pagemap.h>
66182 #include <linux/syscalls.h>
66183 +#include <linux/ptrace.h>
66184 #include <linux/signal.h>
66185 #include <linux/export.h>
66186 #include <linux/magic.h>
66187 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
66188 struct page *page, *page_head;
66189 int err, ro = 0;
66190
66191 +#ifdef CONFIG_PAX_SEGMEXEC
66192 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66193 + return -EFAULT;
66194 +#endif
66195 +
66196 /*
66197 * The futex address must be "naturally" aligned.
66198 */
66199 @@ -2721,6 +2727,7 @@ static int __init futex_init(void)
66200 {
66201 u32 curval;
66202 int i;
66203 + mm_segment_t oldfs;
66204
66205 /*
66206 * This will fail and we want it. Some arch implementations do
66207 @@ -2732,8 +2739,11 @@ static int __init futex_init(void)
66208 * implementation, the non-functional ones will return
66209 * -ENOSYS.
66210 */
66211 + oldfs = get_fs();
66212 + set_fs(USER_DS);
66213 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66214 futex_cmpxchg_enabled = 1;
66215 + set_fs(oldfs);
66216
66217 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66218 plist_head_init(&futex_queues[i].chain);
66219 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66220 index 9b22d03..6295b62 100644
66221 --- a/kernel/gcov/base.c
66222 +++ b/kernel/gcov/base.c
66223 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
66224 }
66225
66226 #ifdef CONFIG_MODULES
66227 -static inline int within(void *addr, void *start, unsigned long size)
66228 -{
66229 - return ((addr >= start) && (addr < start + size));
66230 -}
66231 -
66232 /* Update list and generate events when modules are unloaded. */
66233 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66234 void *data)
66235 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66236 prev = NULL;
66237 /* Remove entries located in module from linked list. */
66238 for (info = gcov_info_head; info; info = info->next) {
66239 - if (within(info, mod->module_core, mod->core_size)) {
66240 + if (within_module_core_rw((unsigned long)info, mod)) {
66241 if (prev)
66242 prev->next = info->next;
66243 else
66244 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66245 index ae34bf5..4e2f3d0 100644
66246 --- a/kernel/hrtimer.c
66247 +++ b/kernel/hrtimer.c
66248 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
66249 local_irq_restore(flags);
66250 }
66251
66252 -static void run_hrtimer_softirq(struct softirq_action *h)
66253 +static void run_hrtimer_softirq(void)
66254 {
66255 hrtimer_peek_ahead_timers();
66256 }
66257 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66258 index 01d3b70..9e4d098 100644
66259 --- a/kernel/jump_label.c
66260 +++ b/kernel/jump_label.c
66261 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66262
66263 size = (((unsigned long)stop - (unsigned long)start)
66264 / sizeof(struct jump_entry));
66265 + pax_open_kernel();
66266 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66267 + pax_close_kernel();
66268 }
66269
66270 static void jump_label_update(struct jump_label_key *key, int enable);
66271 @@ -340,10 +342,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
66272 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66273 struct jump_entry *iter;
66274
66275 + pax_open_kernel();
66276 for (iter = iter_start; iter < iter_stop; iter++) {
66277 if (within_module_init(iter->code, mod))
66278 iter->code = 0;
66279 }
66280 + pax_close_kernel();
66281 }
66282
66283 static int
66284 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66285 index 079f1d3..a407562 100644
66286 --- a/kernel/kallsyms.c
66287 +++ b/kernel/kallsyms.c
66288 @@ -11,6 +11,9 @@
66289 * Changed the compression method from stem compression to "table lookup"
66290 * compression (see scripts/kallsyms.c for a more complete description)
66291 */
66292 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66293 +#define __INCLUDED_BY_HIDESYM 1
66294 +#endif
66295 #include <linux/kallsyms.h>
66296 #include <linux/module.h>
66297 #include <linux/init.h>
66298 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66299
66300 static inline int is_kernel_inittext(unsigned long addr)
66301 {
66302 + if (system_state != SYSTEM_BOOTING)
66303 + return 0;
66304 +
66305 if (addr >= (unsigned long)_sinittext
66306 && addr <= (unsigned long)_einittext)
66307 return 1;
66308 return 0;
66309 }
66310
66311 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66312 +#ifdef CONFIG_MODULES
66313 +static inline int is_module_text(unsigned long addr)
66314 +{
66315 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66316 + return 1;
66317 +
66318 + addr = ktla_ktva(addr);
66319 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66320 +}
66321 +#else
66322 +static inline int is_module_text(unsigned long addr)
66323 +{
66324 + return 0;
66325 +}
66326 +#endif
66327 +#endif
66328 +
66329 static inline int is_kernel_text(unsigned long addr)
66330 {
66331 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
66332 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
66333
66334 static inline int is_kernel(unsigned long addr)
66335 {
66336 +
66337 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66338 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
66339 + return 1;
66340 +
66341 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66342 +#else
66343 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
66344 +#endif
66345 +
66346 return 1;
66347 return in_gate_area_no_mm(addr);
66348 }
66349
66350 static int is_ksym_addr(unsigned long addr)
66351 {
66352 +
66353 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66354 + if (is_module_text(addr))
66355 + return 0;
66356 +#endif
66357 +
66358 if (all_var)
66359 return is_kernel(addr);
66360
66361 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
66362
66363 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66364 {
66365 - iter->name[0] = '\0';
66366 iter->nameoff = get_symbol_offset(new_pos);
66367 iter->pos = new_pos;
66368 }
66369 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
66370 {
66371 struct kallsym_iter *iter = m->private;
66372
66373 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66374 + if (current_uid())
66375 + return 0;
66376 +#endif
66377 +
66378 /* Some debugging symbols have no name. Ignore them. */
66379 if (!iter->name[0])
66380 return 0;
66381 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
66382 struct kallsym_iter *iter;
66383 int ret;
66384
66385 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66386 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66387 if (!iter)
66388 return -ENOMEM;
66389 reset_iter(iter, 0);
66390 diff --git a/kernel/kexec.c b/kernel/kexec.c
66391 index 7b08867..3bac516 100644
66392 --- a/kernel/kexec.c
66393 +++ b/kernel/kexec.c
66394 @@ -1047,7 +1047,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
66395 unsigned long flags)
66396 {
66397 struct compat_kexec_segment in;
66398 - struct kexec_segment out, __user *ksegments;
66399 + struct kexec_segment out;
66400 + struct kexec_segment __user *ksegments;
66401 unsigned long i, result;
66402
66403 /* Don't allow clients that don't understand the native
66404 diff --git a/kernel/kmod.c b/kernel/kmod.c
66405 index a3a46cb..f2e42f8 100644
66406 --- a/kernel/kmod.c
66407 +++ b/kernel/kmod.c
66408 @@ -75,13 +75,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
66409 * If module auto-loading support is disabled then this function
66410 * becomes a no-operation.
66411 */
66412 -int __request_module(bool wait, const char *fmt, ...)
66413 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66414 {
66415 - va_list args;
66416 char module_name[MODULE_NAME_LEN];
66417 unsigned int max_modprobes;
66418 int ret;
66419 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
66420 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
66421 static char *envp[] = { "HOME=/",
66422 "TERM=linux",
66423 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
66424 @@ -90,9 +89,7 @@ int __request_module(bool wait, const char *fmt, ...)
66425 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66426 static int kmod_loop_msg;
66427
66428 - va_start(args, fmt);
66429 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66430 - va_end(args);
66431 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66432 if (ret >= MODULE_NAME_LEN)
66433 return -ENAMETOOLONG;
66434
66435 @@ -100,6 +97,20 @@ int __request_module(bool wait, const char *fmt, ...)
66436 if (ret)
66437 return ret;
66438
66439 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66440 + if (!current_uid()) {
66441 + /* hack to workaround consolekit/udisks stupidity */
66442 + read_lock(&tasklist_lock);
66443 + if (!strcmp(current->comm, "mount") &&
66444 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66445 + read_unlock(&tasklist_lock);
66446 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66447 + return -EPERM;
66448 + }
66449 + read_unlock(&tasklist_lock);
66450 + }
66451 +#endif
66452 +
66453 /* If modprobe needs a service that is in a module, we get a recursive
66454 * loop. Limit the number of running kmod threads to max_threads/2 or
66455 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
66456 @@ -135,6 +146,47 @@ int __request_module(bool wait, const char *fmt, ...)
66457 atomic_dec(&kmod_concurrent);
66458 return ret;
66459 }
66460 +
66461 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66462 +{
66463 + va_list args;
66464 + int ret;
66465 +
66466 + va_start(args, fmt);
66467 + ret = ____request_module(wait, module_param, fmt, args);
66468 + va_end(args);
66469 +
66470 + return ret;
66471 +}
66472 +
66473 +int __request_module(bool wait, const char *fmt, ...)
66474 +{
66475 + va_list args;
66476 + int ret;
66477 +
66478 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66479 + if (current_uid()) {
66480 + char module_param[MODULE_NAME_LEN];
66481 +
66482 + memset(module_param, 0, sizeof(module_param));
66483 +
66484 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66485 +
66486 + va_start(args, fmt);
66487 + ret = ____request_module(wait, module_param, fmt, args);
66488 + va_end(args);
66489 +
66490 + return ret;
66491 + }
66492 +#endif
66493 +
66494 + va_start(args, fmt);
66495 + ret = ____request_module(wait, NULL, fmt, args);
66496 + va_end(args);
66497 +
66498 + return ret;
66499 +}
66500 +
66501 EXPORT_SYMBOL(__request_module);
66502 #endif /* CONFIG_MODULES */
66503
66504 @@ -224,7 +276,7 @@ static int wait_for_helper(void *data)
66505 *
66506 * Thus the __user pointer cast is valid here.
66507 */
66508 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
66509 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66510
66511 /*
66512 * If ret is 0, either ____call_usermodehelper failed and the
66513 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66514 index c62b854..cb67968 100644
66515 --- a/kernel/kprobes.c
66516 +++ b/kernel/kprobes.c
66517 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66518 * kernel image and loaded module images reside. This is required
66519 * so x86_64 can correctly handle the %rip-relative fixups.
66520 */
66521 - kip->insns = module_alloc(PAGE_SIZE);
66522 + kip->insns = module_alloc_exec(PAGE_SIZE);
66523 if (!kip->insns) {
66524 kfree(kip);
66525 return NULL;
66526 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66527 */
66528 if (!list_is_singular(&kip->list)) {
66529 list_del(&kip->list);
66530 - module_free(NULL, kip->insns);
66531 + module_free_exec(NULL, kip->insns);
66532 kfree(kip);
66533 }
66534 return 1;
66535 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
66536 {
66537 int i, err = 0;
66538 unsigned long offset = 0, size = 0;
66539 - char *modname, namebuf[128];
66540 + char *modname, namebuf[KSYM_NAME_LEN];
66541 const char *symbol_name;
66542 void *addr;
66543 struct kprobe_blackpoint *kb;
66544 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66545 const char *sym = NULL;
66546 unsigned int i = *(loff_t *) v;
66547 unsigned long offset = 0;
66548 - char *modname, namebuf[128];
66549 + char *modname, namebuf[KSYM_NAME_LEN];
66550
66551 head = &kprobe_table[i];
66552 preempt_disable();
66553 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
66554 index 4e316e1..5501eef 100644
66555 --- a/kernel/ksysfs.c
66556 +++ b/kernel/ksysfs.c
66557 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
66558 {
66559 if (count+1 > UEVENT_HELPER_PATH_LEN)
66560 return -ENOENT;
66561 + if (!capable(CAP_SYS_ADMIN))
66562 + return -EPERM;
66563 memcpy(uevent_helper, buf, count);
66564 uevent_helper[count] = '\0';
66565 if (count && uevent_helper[count-1] == '\n')
66566 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66567 index 8889f7d..95319b7 100644
66568 --- a/kernel/lockdep.c
66569 +++ b/kernel/lockdep.c
66570 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
66571 end = (unsigned long) &_end,
66572 addr = (unsigned long) obj;
66573
66574 +#ifdef CONFIG_PAX_KERNEXEC
66575 + start = ktla_ktva(start);
66576 +#endif
66577 +
66578 /*
66579 * static variable?
66580 */
66581 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66582 if (!static_obj(lock->key)) {
66583 debug_locks_off();
66584 printk("INFO: trying to register non-static key.\n");
66585 + printk("lock:%pS key:%pS.\n", lock, lock->key);
66586 printk("the code is fine but needs lockdep annotation.\n");
66587 printk("turning off the locking correctness validator.\n");
66588 dump_stack();
66589 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66590 if (!class)
66591 return 0;
66592 }
66593 - atomic_inc((atomic_t *)&class->ops);
66594 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66595 if (very_verbose(class)) {
66596 printk("\nacquire class [%p] %s", class->key, class->name);
66597 if (class->name_version > 1)
66598 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66599 index 91c32a0..b2c71c5 100644
66600 --- a/kernel/lockdep_proc.c
66601 +++ b/kernel/lockdep_proc.c
66602 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66603
66604 static void print_name(struct seq_file *m, struct lock_class *class)
66605 {
66606 - char str[128];
66607 + char str[KSYM_NAME_LEN];
66608 const char *name = class->name;
66609
66610 if (!name) {
66611 diff --git a/kernel/module.c b/kernel/module.c
66612 index 3d56b6f..2a22bd0 100644
66613 --- a/kernel/module.c
66614 +++ b/kernel/module.c
66615 @@ -58,6 +58,7 @@
66616 #include <linux/jump_label.h>
66617 #include <linux/pfn.h>
66618 #include <linux/bsearch.h>
66619 +#include <linux/grsecurity.h>
66620
66621 #define CREATE_TRACE_POINTS
66622 #include <trace/events/module.h>
66623 @@ -113,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66624
66625 /* Bounds of module allocation, for speeding __module_address.
66626 * Protected by module_mutex. */
66627 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66628 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66629 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66630
66631 int register_module_notifier(struct notifier_block * nb)
66632 {
66633 @@ -277,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66634 return true;
66635
66636 list_for_each_entry_rcu(mod, &modules, list) {
66637 - struct symsearch arr[] = {
66638 + struct symsearch modarr[] = {
66639 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66640 NOT_GPL_ONLY, false },
66641 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66642 @@ -299,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66643 #endif
66644 };
66645
66646 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66647 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66648 return true;
66649 }
66650 return false;
66651 @@ -431,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66652 static int percpu_modalloc(struct module *mod,
66653 unsigned long size, unsigned long align)
66654 {
66655 - if (align > PAGE_SIZE) {
66656 + if (align-1 >= PAGE_SIZE) {
66657 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66658 mod->name, align, PAGE_SIZE);
66659 align = PAGE_SIZE;
66660 @@ -1001,7 +1003,7 @@ struct module_attribute module_uevent =
66661 static ssize_t show_coresize(struct module_attribute *mattr,
66662 struct module_kobject *mk, char *buffer)
66663 {
66664 - return sprintf(buffer, "%u\n", mk->mod->core_size);
66665 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66666 }
66667
66668 static struct module_attribute modinfo_coresize =
66669 @@ -1010,7 +1012,7 @@ static struct module_attribute modinfo_coresize =
66670 static ssize_t show_initsize(struct module_attribute *mattr,
66671 struct module_kobject *mk, char *buffer)
66672 {
66673 - return sprintf(buffer, "%u\n", mk->mod->init_size);
66674 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66675 }
66676
66677 static struct module_attribute modinfo_initsize =
66678 @@ -1224,7 +1226,7 @@ resolve_symbol_wait(struct module *mod,
66679 */
66680 #ifdef CONFIG_SYSFS
66681
66682 -#ifdef CONFIG_KALLSYMS
66683 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66684 static inline bool sect_empty(const Elf_Shdr *sect)
66685 {
66686 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66687 @@ -1690,21 +1692,21 @@ static void set_section_ro_nx(void *base,
66688
66689 static void unset_module_core_ro_nx(struct module *mod)
66690 {
66691 - set_page_attributes(mod->module_core + mod->core_text_size,
66692 - mod->module_core + mod->core_size,
66693 + set_page_attributes(mod->module_core_rw,
66694 + mod->module_core_rw + mod->core_size_rw,
66695 set_memory_x);
66696 - set_page_attributes(mod->module_core,
66697 - mod->module_core + mod->core_ro_size,
66698 + set_page_attributes(mod->module_core_rx,
66699 + mod->module_core_rx + mod->core_size_rx,
66700 set_memory_rw);
66701 }
66702
66703 static void unset_module_init_ro_nx(struct module *mod)
66704 {
66705 - set_page_attributes(mod->module_init + mod->init_text_size,
66706 - mod->module_init + mod->init_size,
66707 + set_page_attributes(mod->module_init_rw,
66708 + mod->module_init_rw + mod->init_size_rw,
66709 set_memory_x);
66710 - set_page_attributes(mod->module_init,
66711 - mod->module_init + mod->init_ro_size,
66712 + set_page_attributes(mod->module_init_rx,
66713 + mod->module_init_rx + mod->init_size_rx,
66714 set_memory_rw);
66715 }
66716
66717 @@ -1715,14 +1717,14 @@ void set_all_modules_text_rw(void)
66718
66719 mutex_lock(&module_mutex);
66720 list_for_each_entry_rcu(mod, &modules, list) {
66721 - if ((mod->module_core) && (mod->core_text_size)) {
66722 - set_page_attributes(mod->module_core,
66723 - mod->module_core + mod->core_text_size,
66724 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66725 + set_page_attributes(mod->module_core_rx,
66726 + mod->module_core_rx + mod->core_size_rx,
66727 set_memory_rw);
66728 }
66729 - if ((mod->module_init) && (mod->init_text_size)) {
66730 - set_page_attributes(mod->module_init,
66731 - mod->module_init + mod->init_text_size,
66732 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66733 + set_page_attributes(mod->module_init_rx,
66734 + mod->module_init_rx + mod->init_size_rx,
66735 set_memory_rw);
66736 }
66737 }
66738 @@ -1736,14 +1738,14 @@ void set_all_modules_text_ro(void)
66739
66740 mutex_lock(&module_mutex);
66741 list_for_each_entry_rcu(mod, &modules, list) {
66742 - if ((mod->module_core) && (mod->core_text_size)) {
66743 - set_page_attributes(mod->module_core,
66744 - mod->module_core + mod->core_text_size,
66745 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66746 + set_page_attributes(mod->module_core_rx,
66747 + mod->module_core_rx + mod->core_size_rx,
66748 set_memory_ro);
66749 }
66750 - if ((mod->module_init) && (mod->init_text_size)) {
66751 - set_page_attributes(mod->module_init,
66752 - mod->module_init + mod->init_text_size,
66753 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66754 + set_page_attributes(mod->module_init_rx,
66755 + mod->module_init_rx + mod->init_size_rx,
66756 set_memory_ro);
66757 }
66758 }
66759 @@ -1789,16 +1791,19 @@ static void free_module(struct module *mod)
66760
66761 /* This may be NULL, but that's OK */
66762 unset_module_init_ro_nx(mod);
66763 - module_free(mod, mod->module_init);
66764 + module_free(mod, mod->module_init_rw);
66765 + module_free_exec(mod, mod->module_init_rx);
66766 kfree(mod->args);
66767 percpu_modfree(mod);
66768
66769 /* Free lock-classes: */
66770 - lockdep_free_key_range(mod->module_core, mod->core_size);
66771 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66772 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66773
66774 /* Finally, free the core (containing the module structure) */
66775 unset_module_core_ro_nx(mod);
66776 - module_free(mod, mod->module_core);
66777 + module_free_exec(mod, mod->module_core_rx);
66778 + module_free(mod, mod->module_core_rw);
66779
66780 #ifdef CONFIG_MPU
66781 update_protections(current->mm);
66782 @@ -1867,10 +1872,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66783 unsigned int i;
66784 int ret = 0;
66785 const struct kernel_symbol *ksym;
66786 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66787 + int is_fs_load = 0;
66788 + int register_filesystem_found = 0;
66789 + char *p;
66790 +
66791 + p = strstr(mod->args, "grsec_modharden_fs");
66792 + if (p) {
66793 + char *endptr = p + strlen("grsec_modharden_fs");
66794 + /* copy \0 as well */
66795 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66796 + is_fs_load = 1;
66797 + }
66798 +#endif
66799
66800 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66801 const char *name = info->strtab + sym[i].st_name;
66802
66803 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66804 + /* it's a real shame this will never get ripped and copied
66805 + upstream! ;(
66806 + */
66807 + if (is_fs_load && !strcmp(name, "register_filesystem"))
66808 + register_filesystem_found = 1;
66809 +#endif
66810 +
66811 switch (sym[i].st_shndx) {
66812 case SHN_COMMON:
66813 /* We compiled with -fno-common. These are not
66814 @@ -1891,7 +1917,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66815 ksym = resolve_symbol_wait(mod, info, name);
66816 /* Ok if resolved. */
66817 if (ksym && !IS_ERR(ksym)) {
66818 + pax_open_kernel();
66819 sym[i].st_value = ksym->value;
66820 + pax_close_kernel();
66821 break;
66822 }
66823
66824 @@ -1910,11 +1938,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66825 secbase = (unsigned long)mod_percpu(mod);
66826 else
66827 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66828 + pax_open_kernel();
66829 sym[i].st_value += secbase;
66830 + pax_close_kernel();
66831 break;
66832 }
66833 }
66834
66835 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66836 + if (is_fs_load && !register_filesystem_found) {
66837 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66838 + ret = -EPERM;
66839 + }
66840 +#endif
66841 +
66842 return ret;
66843 }
66844
66845 @@ -2018,22 +2055,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66846 || s->sh_entsize != ~0UL
66847 || strstarts(sname, ".init"))
66848 continue;
66849 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66850 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66851 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66852 + else
66853 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66854 pr_debug("\t%s\n", sname);
66855 }
66856 - switch (m) {
66857 - case 0: /* executable */
66858 - mod->core_size = debug_align(mod->core_size);
66859 - mod->core_text_size = mod->core_size;
66860 - break;
66861 - case 1: /* RO: text and ro-data */
66862 - mod->core_size = debug_align(mod->core_size);
66863 - mod->core_ro_size = mod->core_size;
66864 - break;
66865 - case 3: /* whole core */
66866 - mod->core_size = debug_align(mod->core_size);
66867 - break;
66868 - }
66869 }
66870
66871 pr_debug("Init section allocation order:\n");
66872 @@ -2047,23 +2074,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66873 || s->sh_entsize != ~0UL
66874 || !strstarts(sname, ".init"))
66875 continue;
66876 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66877 - | INIT_OFFSET_MASK);
66878 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66879 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66880 + else
66881 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66882 + s->sh_entsize |= INIT_OFFSET_MASK;
66883 pr_debug("\t%s\n", sname);
66884 }
66885 - switch (m) {
66886 - case 0: /* executable */
66887 - mod->init_size = debug_align(mod->init_size);
66888 - mod->init_text_size = mod->init_size;
66889 - break;
66890 - case 1: /* RO: text and ro-data */
66891 - mod->init_size = debug_align(mod->init_size);
66892 - mod->init_ro_size = mod->init_size;
66893 - break;
66894 - case 3: /* whole init */
66895 - mod->init_size = debug_align(mod->init_size);
66896 - break;
66897 - }
66898 }
66899 }
66900
66901 @@ -2235,7 +2252,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66902
66903 /* Put symbol section at end of init part of module. */
66904 symsect->sh_flags |= SHF_ALLOC;
66905 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66906 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66907 info->index.sym) | INIT_OFFSET_MASK;
66908 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
66909
66910 @@ -2250,13 +2267,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66911 }
66912
66913 /* Append room for core symbols at end of core part. */
66914 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66915 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66916 - mod->core_size += strtab_size;
66917 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66918 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66919 + mod->core_size_rx += strtab_size;
66920
66921 /* Put string table section at end of init part of module. */
66922 strsect->sh_flags |= SHF_ALLOC;
66923 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66924 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66925 info->index.str) | INIT_OFFSET_MASK;
66926 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
66927 }
66928 @@ -2274,12 +2291,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66929 /* Make sure we get permanent strtab: don't use info->strtab. */
66930 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66931
66932 + pax_open_kernel();
66933 +
66934 /* Set types up while we still have access to sections. */
66935 for (i = 0; i < mod->num_symtab; i++)
66936 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66937
66938 - mod->core_symtab = dst = mod->module_core + info->symoffs;
66939 - mod->core_strtab = s = mod->module_core + info->stroffs;
66940 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66941 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66942 src = mod->symtab;
66943 *dst = *src;
66944 *s++ = 0;
66945 @@ -2292,6 +2311,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66946 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
66947 }
66948 mod->core_num_syms = ndst;
66949 +
66950 + pax_close_kernel();
66951 }
66952 #else
66953 static inline void layout_symtab(struct module *mod, struct load_info *info)
66954 @@ -2325,17 +2346,33 @@ void * __weak module_alloc(unsigned long size)
66955 return size == 0 ? NULL : vmalloc_exec(size);
66956 }
66957
66958 -static void *module_alloc_update_bounds(unsigned long size)
66959 +static void *module_alloc_update_bounds_rw(unsigned long size)
66960 {
66961 void *ret = module_alloc(size);
66962
66963 if (ret) {
66964 mutex_lock(&module_mutex);
66965 /* Update module bounds. */
66966 - if ((unsigned long)ret < module_addr_min)
66967 - module_addr_min = (unsigned long)ret;
66968 - if ((unsigned long)ret + size > module_addr_max)
66969 - module_addr_max = (unsigned long)ret + size;
66970 + if ((unsigned long)ret < module_addr_min_rw)
66971 + module_addr_min_rw = (unsigned long)ret;
66972 + if ((unsigned long)ret + size > module_addr_max_rw)
66973 + module_addr_max_rw = (unsigned long)ret + size;
66974 + mutex_unlock(&module_mutex);
66975 + }
66976 + return ret;
66977 +}
66978 +
66979 +static void *module_alloc_update_bounds_rx(unsigned long size)
66980 +{
66981 + void *ret = module_alloc_exec(size);
66982 +
66983 + if (ret) {
66984 + mutex_lock(&module_mutex);
66985 + /* Update module bounds. */
66986 + if ((unsigned long)ret < module_addr_min_rx)
66987 + module_addr_min_rx = (unsigned long)ret;
66988 + if ((unsigned long)ret + size > module_addr_max_rx)
66989 + module_addr_max_rx = (unsigned long)ret + size;
66990 mutex_unlock(&module_mutex);
66991 }
66992 return ret;
66993 @@ -2512,8 +2549,14 @@ static struct module *setup_load_info(struct load_info *info)
66994 static int check_modinfo(struct module *mod, struct load_info *info)
66995 {
66996 const char *modmagic = get_modinfo(info, "vermagic");
66997 + const char *license = get_modinfo(info, "license");
66998 int err;
66999
67000 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
67001 + if (!license || !license_is_gpl_compatible(license))
67002 + return -ENOEXEC;
67003 +#endif
67004 +
67005 /* This is allowed: modprobe --force will invalidate it. */
67006 if (!modmagic) {
67007 err = try_to_force_load(mod, "bad vermagic");
67008 @@ -2536,7 +2579,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
67009 }
67010
67011 /* Set up license info based on the info section */
67012 - set_license(mod, get_modinfo(info, "license"));
67013 + set_license(mod, license);
67014
67015 return 0;
67016 }
67017 @@ -2630,7 +2673,7 @@ static int move_module(struct module *mod, struct load_info *info)
67018 void *ptr;
67019
67020 /* Do the allocs. */
67021 - ptr = module_alloc_update_bounds(mod->core_size);
67022 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67023 /*
67024 * The pointer to this block is stored in the module structure
67025 * which is inside the block. Just mark it as not being a
67026 @@ -2640,23 +2683,50 @@ static int move_module(struct module *mod, struct load_info *info)
67027 if (!ptr)
67028 return -ENOMEM;
67029
67030 - memset(ptr, 0, mod->core_size);
67031 - mod->module_core = ptr;
67032 + memset(ptr, 0, mod->core_size_rw);
67033 + mod->module_core_rw = ptr;
67034
67035 - ptr = module_alloc_update_bounds(mod->init_size);
67036 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
67037 /*
67038 * The pointer to this block is stored in the module structure
67039 * which is inside the block. This block doesn't need to be
67040 * scanned as it contains data and code that will be freed
67041 * after the module is initialized.
67042 */
67043 - kmemleak_ignore(ptr);
67044 - if (!ptr && mod->init_size) {
67045 - module_free(mod, mod->module_core);
67046 + kmemleak_not_leak(ptr);
67047 + if (!ptr && mod->init_size_rw) {
67048 + module_free(mod, mod->module_core_rw);
67049 return -ENOMEM;
67050 }
67051 - memset(ptr, 0, mod->init_size);
67052 - mod->module_init = ptr;
67053 + memset(ptr, 0, mod->init_size_rw);
67054 + mod->module_init_rw = ptr;
67055 +
67056 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
67057 + kmemleak_not_leak(ptr);
67058 + if (!ptr) {
67059 + module_free(mod, mod->module_init_rw);
67060 + module_free(mod, mod->module_core_rw);
67061 + return -ENOMEM;
67062 + }
67063 +
67064 + pax_open_kernel();
67065 + memset(ptr, 0, mod->core_size_rx);
67066 + pax_close_kernel();
67067 + mod->module_core_rx = ptr;
67068 +
67069 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67070 + kmemleak_not_leak(ptr);
67071 + if (!ptr && mod->init_size_rx) {
67072 + module_free_exec(mod, mod->module_core_rx);
67073 + module_free(mod, mod->module_init_rw);
67074 + module_free(mod, mod->module_core_rw);
67075 + return -ENOMEM;
67076 + }
67077 +
67078 + pax_open_kernel();
67079 + memset(ptr, 0, mod->init_size_rx);
67080 + pax_close_kernel();
67081 + mod->module_init_rx = ptr;
67082
67083 /* Transfer each section which specifies SHF_ALLOC */
67084 pr_debug("final section addresses:\n");
67085 @@ -2667,16 +2737,45 @@ static int move_module(struct module *mod, struct load_info *info)
67086 if (!(shdr->sh_flags & SHF_ALLOC))
67087 continue;
67088
67089 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
67090 - dest = mod->module_init
67091 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67092 - else
67093 - dest = mod->module_core + shdr->sh_entsize;
67094 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67095 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67096 + dest = mod->module_init_rw
67097 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67098 + else
67099 + dest = mod->module_init_rx
67100 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67101 + } else {
67102 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67103 + dest = mod->module_core_rw + shdr->sh_entsize;
67104 + else
67105 + dest = mod->module_core_rx + shdr->sh_entsize;
67106 + }
67107 +
67108 + if (shdr->sh_type != SHT_NOBITS) {
67109 +
67110 +#ifdef CONFIG_PAX_KERNEXEC
67111 +#ifdef CONFIG_X86_64
67112 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67113 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67114 +#endif
67115 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
67116 + pax_open_kernel();
67117 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67118 + pax_close_kernel();
67119 + } else
67120 +#endif
67121
67122 - if (shdr->sh_type != SHT_NOBITS)
67123 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67124 + }
67125 /* Update sh_addr to point to copy in image. */
67126 - shdr->sh_addr = (unsigned long)dest;
67127 +
67128 +#ifdef CONFIG_PAX_KERNEXEC
67129 + if (shdr->sh_flags & SHF_EXECINSTR)
67130 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
67131 + else
67132 +#endif
67133 +
67134 + shdr->sh_addr = (unsigned long)dest;
67135 pr_debug("\t0x%lx %s\n",
67136 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
67137 }
67138 @@ -2727,12 +2826,12 @@ static void flush_module_icache(const struct module *mod)
67139 * Do it before processing of module parameters, so the module
67140 * can provide parameter accessor functions of its own.
67141 */
67142 - if (mod->module_init)
67143 - flush_icache_range((unsigned long)mod->module_init,
67144 - (unsigned long)mod->module_init
67145 - + mod->init_size);
67146 - flush_icache_range((unsigned long)mod->module_core,
67147 - (unsigned long)mod->module_core + mod->core_size);
67148 + if (mod->module_init_rx)
67149 + flush_icache_range((unsigned long)mod->module_init_rx,
67150 + (unsigned long)mod->module_init_rx
67151 + + mod->init_size_rx);
67152 + flush_icache_range((unsigned long)mod->module_core_rx,
67153 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
67154
67155 set_fs(old_fs);
67156 }
67157 @@ -2802,8 +2901,10 @@ out:
67158 static void module_deallocate(struct module *mod, struct load_info *info)
67159 {
67160 percpu_modfree(mod);
67161 - module_free(mod, mod->module_init);
67162 - module_free(mod, mod->module_core);
67163 + module_free_exec(mod, mod->module_init_rx);
67164 + module_free_exec(mod, mod->module_core_rx);
67165 + module_free(mod, mod->module_init_rw);
67166 + module_free(mod, mod->module_core_rw);
67167 }
67168
67169 int __weak module_finalize(const Elf_Ehdr *hdr,
67170 @@ -2867,9 +2968,38 @@ static struct module *load_module(void __user *umod,
67171 if (err)
67172 goto free_unload;
67173
67174 + /* Now copy in args */
67175 + mod->args = strndup_user(uargs, ~0UL >> 1);
67176 + if (IS_ERR(mod->args)) {
67177 + err = PTR_ERR(mod->args);
67178 + goto free_unload;
67179 + }
67180 +
67181 /* Set up MODINFO_ATTR fields */
67182 setup_modinfo(mod, &info);
67183
67184 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67185 + {
67186 + char *p, *p2;
67187 +
67188 + if (strstr(mod->args, "grsec_modharden_netdev")) {
67189 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67190 + err = -EPERM;
67191 + goto free_modinfo;
67192 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67193 + p += strlen("grsec_modharden_normal");
67194 + p2 = strstr(p, "_");
67195 + if (p2) {
67196 + *p2 = '\0';
67197 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67198 + *p2 = '_';
67199 + }
67200 + err = -EPERM;
67201 + goto free_modinfo;
67202 + }
67203 + }
67204 +#endif
67205 +
67206 /* Fix up syms, so that st_value is a pointer to location. */
67207 err = simplify_symbols(mod, &info);
67208 if (err < 0)
67209 @@ -2885,13 +3015,6 @@ static struct module *load_module(void __user *umod,
67210
67211 flush_module_icache(mod);
67212
67213 - /* Now copy in args */
67214 - mod->args = strndup_user(uargs, ~0UL >> 1);
67215 - if (IS_ERR(mod->args)) {
67216 - err = PTR_ERR(mod->args);
67217 - goto free_arch_cleanup;
67218 - }
67219 -
67220 /* Mark state as coming so strong_try_module_get() ignores us. */
67221 mod->state = MODULE_STATE_COMING;
67222
67223 @@ -2948,11 +3071,10 @@ static struct module *load_module(void __user *umod,
67224 unlock:
67225 mutex_unlock(&module_mutex);
67226 synchronize_sched();
67227 - kfree(mod->args);
67228 - free_arch_cleanup:
67229 module_arch_cleanup(mod);
67230 free_modinfo:
67231 free_modinfo(mod);
67232 + kfree(mod->args);
67233 free_unload:
67234 module_unload_free(mod);
67235 free_module:
67236 @@ -2993,16 +3115,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67237 MODULE_STATE_COMING, mod);
67238
67239 /* Set RO and NX regions for core */
67240 - set_section_ro_nx(mod->module_core,
67241 - mod->core_text_size,
67242 - mod->core_ro_size,
67243 - mod->core_size);
67244 + set_section_ro_nx(mod->module_core_rx,
67245 + mod->core_size_rx,
67246 + mod->core_size_rx,
67247 + mod->core_size_rx);
67248
67249 /* Set RO and NX regions for init */
67250 - set_section_ro_nx(mod->module_init,
67251 - mod->init_text_size,
67252 - mod->init_ro_size,
67253 - mod->init_size);
67254 + set_section_ro_nx(mod->module_init_rx,
67255 + mod->init_size_rx,
67256 + mod->init_size_rx,
67257 + mod->init_size_rx);
67258
67259 do_mod_ctors(mod);
67260 /* Start the module */
67261 @@ -3048,11 +3170,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67262 mod->strtab = mod->core_strtab;
67263 #endif
67264 unset_module_init_ro_nx(mod);
67265 - module_free(mod, mod->module_init);
67266 - mod->module_init = NULL;
67267 - mod->init_size = 0;
67268 - mod->init_ro_size = 0;
67269 - mod->init_text_size = 0;
67270 + module_free(mod, mod->module_init_rw);
67271 + module_free_exec(mod, mod->module_init_rx);
67272 + mod->module_init_rw = NULL;
67273 + mod->module_init_rx = NULL;
67274 + mod->init_size_rw = 0;
67275 + mod->init_size_rx = 0;
67276 mutex_unlock(&module_mutex);
67277
67278 return 0;
67279 @@ -3083,10 +3206,16 @@ static const char *get_ksymbol(struct module *mod,
67280 unsigned long nextval;
67281
67282 /* At worse, next value is at end of module */
67283 - if (within_module_init(addr, mod))
67284 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
67285 + if (within_module_init_rx(addr, mod))
67286 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67287 + else if (within_module_init_rw(addr, mod))
67288 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67289 + else if (within_module_core_rx(addr, mod))
67290 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67291 + else if (within_module_core_rw(addr, mod))
67292 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67293 else
67294 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
67295 + return NULL;
67296
67297 /* Scan for closest preceding symbol, and next symbol. (ELF
67298 starts real symbols at 1). */
67299 @@ -3321,7 +3450,7 @@ static int m_show(struct seq_file *m, void *p)
67300 char buf[8];
67301
67302 seq_printf(m, "%s %u",
67303 - mod->name, mod->init_size + mod->core_size);
67304 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67305 print_unload_info(m, mod);
67306
67307 /* Informative for users. */
67308 @@ -3330,7 +3459,7 @@ static int m_show(struct seq_file *m, void *p)
67309 mod->state == MODULE_STATE_COMING ? "Loading":
67310 "Live");
67311 /* Used by oprofile and other similar tools. */
67312 - seq_printf(m, " 0x%pK", mod->module_core);
67313 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
67314
67315 /* Taints info */
67316 if (mod->taints)
67317 @@ -3366,7 +3495,17 @@ static const struct file_operations proc_modules_operations = {
67318
67319 static int __init proc_modules_init(void)
67320 {
67321 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67322 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67323 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67324 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67325 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67326 +#else
67327 proc_create("modules", 0, NULL, &proc_modules_operations);
67328 +#endif
67329 +#else
67330 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67331 +#endif
67332 return 0;
67333 }
67334 module_init(proc_modules_init);
67335 @@ -3425,12 +3564,12 @@ struct module *__module_address(unsigned long addr)
67336 {
67337 struct module *mod;
67338
67339 - if (addr < module_addr_min || addr > module_addr_max)
67340 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67341 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
67342 return NULL;
67343
67344 list_for_each_entry_rcu(mod, &modules, list)
67345 - if (within_module_core(addr, mod)
67346 - || within_module_init(addr, mod))
67347 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
67348 return mod;
67349 return NULL;
67350 }
67351 @@ -3464,11 +3603,20 @@ bool is_module_text_address(unsigned long addr)
67352 */
67353 struct module *__module_text_address(unsigned long addr)
67354 {
67355 - struct module *mod = __module_address(addr);
67356 + struct module *mod;
67357 +
67358 +#ifdef CONFIG_X86_32
67359 + addr = ktla_ktva(addr);
67360 +#endif
67361 +
67362 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67363 + return NULL;
67364 +
67365 + mod = __module_address(addr);
67366 +
67367 if (mod) {
67368 /* Make sure it's within the text section. */
67369 - if (!within(addr, mod->module_init, mod->init_text_size)
67370 - && !within(addr, mod->module_core, mod->core_text_size))
67371 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67372 mod = NULL;
67373 }
67374 return mod;
67375 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67376 index 7e3443f..b2a1e6b 100644
67377 --- a/kernel/mutex-debug.c
67378 +++ b/kernel/mutex-debug.c
67379 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
67380 }
67381
67382 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67383 - struct thread_info *ti)
67384 + struct task_struct *task)
67385 {
67386 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67387
67388 /* Mark the current thread as blocked on the lock: */
67389 - ti->task->blocked_on = waiter;
67390 + task->blocked_on = waiter;
67391 }
67392
67393 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67394 - struct thread_info *ti)
67395 + struct task_struct *task)
67396 {
67397 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67398 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67399 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67400 - ti->task->blocked_on = NULL;
67401 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
67402 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
67403 + task->blocked_on = NULL;
67404
67405 list_del_init(&waiter->list);
67406 waiter->task = NULL;
67407 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67408 index 0799fd3..d06ae3b 100644
67409 --- a/kernel/mutex-debug.h
67410 +++ b/kernel/mutex-debug.h
67411 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
67412 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67413 extern void debug_mutex_add_waiter(struct mutex *lock,
67414 struct mutex_waiter *waiter,
67415 - struct thread_info *ti);
67416 + struct task_struct *task);
67417 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67418 - struct thread_info *ti);
67419 + struct task_struct *task);
67420 extern void debug_mutex_unlock(struct mutex *lock);
67421 extern void debug_mutex_init(struct mutex *lock, const char *name,
67422 struct lock_class_key *key);
67423 diff --git a/kernel/mutex.c b/kernel/mutex.c
67424 index 89096dd..f91ebc5 100644
67425 --- a/kernel/mutex.c
67426 +++ b/kernel/mutex.c
67427 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67428 spin_lock_mutex(&lock->wait_lock, flags);
67429
67430 debug_mutex_lock_common(lock, &waiter);
67431 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67432 + debug_mutex_add_waiter(lock, &waiter, task);
67433
67434 /* add waiting tasks to the end of the waitqueue (FIFO): */
67435 list_add_tail(&waiter.list, &lock->wait_list);
67436 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67437 * TASK_UNINTERRUPTIBLE case.)
67438 */
67439 if (unlikely(signal_pending_state(state, task))) {
67440 - mutex_remove_waiter(lock, &waiter,
67441 - task_thread_info(task));
67442 + mutex_remove_waiter(lock, &waiter, task);
67443 mutex_release(&lock->dep_map, 1, ip);
67444 spin_unlock_mutex(&lock->wait_lock, flags);
67445
67446 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67447 done:
67448 lock_acquired(&lock->dep_map, ip);
67449 /* got the lock - rejoice! */
67450 - mutex_remove_waiter(lock, &waiter, current_thread_info());
67451 + mutex_remove_waiter(lock, &waiter, task);
67452 mutex_set_owner(lock);
67453
67454 /* set it to 0 if there are no waiters left: */
67455 diff --git a/kernel/padata.c b/kernel/padata.c
67456 index b452599..5d68f4e 100644
67457 --- a/kernel/padata.c
67458 +++ b/kernel/padata.c
67459 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
67460 padata->pd = pd;
67461 padata->cb_cpu = cb_cpu;
67462
67463 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
67464 - atomic_set(&pd->seq_nr, -1);
67465 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
67466 + atomic_set_unchecked(&pd->seq_nr, -1);
67467
67468 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
67469 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
67470
67471 target_cpu = padata_cpu_hash(padata);
67472 queue = per_cpu_ptr(pd->pqueue, target_cpu);
67473 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
67474 padata_init_pqueues(pd);
67475 padata_init_squeues(pd);
67476 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
67477 - atomic_set(&pd->seq_nr, -1);
67478 + atomic_set_unchecked(&pd->seq_nr, -1);
67479 atomic_set(&pd->reorder_objects, 0);
67480 atomic_set(&pd->refcnt, 0);
67481 pd->pinst = pinst;
67482 diff --git a/kernel/panic.c b/kernel/panic.c
67483 index 8ed89a1..e83856a 100644
67484 --- a/kernel/panic.c
67485 +++ b/kernel/panic.c
67486 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
67487 const char *board;
67488
67489 printk(KERN_WARNING "------------[ cut here ]------------\n");
67490 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67491 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67492 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67493 if (board)
67494 printk(KERN_WARNING "Hardware name: %s\n", board);
67495 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67496 */
67497 void __stack_chk_fail(void)
67498 {
67499 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
67500 + dump_stack();
67501 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67502 __builtin_return_address(0));
67503 }
67504 EXPORT_SYMBOL(__stack_chk_fail);
67505 diff --git a/kernel/pid.c b/kernel/pid.c
67506 index 9f08dfa..6765c40 100644
67507 --- a/kernel/pid.c
67508 +++ b/kernel/pid.c
67509 @@ -33,6 +33,7 @@
67510 #include <linux/rculist.h>
67511 #include <linux/bootmem.h>
67512 #include <linux/hash.h>
67513 +#include <linux/security.h>
67514 #include <linux/pid_namespace.h>
67515 #include <linux/init_task.h>
67516 #include <linux/syscalls.h>
67517 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67518
67519 int pid_max = PID_MAX_DEFAULT;
67520
67521 -#define RESERVED_PIDS 300
67522 +#define RESERVED_PIDS 500
67523
67524 int pid_max_min = RESERVED_PIDS + 1;
67525 int pid_max_max = PID_MAX_LIMIT;
67526 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
67527 */
67528 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67529 {
67530 + struct task_struct *task;
67531 +
67532 rcu_lockdep_assert(rcu_read_lock_held(),
67533 "find_task_by_pid_ns() needs rcu_read_lock()"
67534 " protection");
67535 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67536 +
67537 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67538 +
67539 + if (gr_pid_is_chrooted(task))
67540 + return NULL;
67541 +
67542 + return task;
67543 }
67544
67545 struct task_struct *find_task_by_vpid(pid_t vnr)
67546 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67547 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67548 }
67549
67550 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67551 +{
67552 + rcu_lockdep_assert(rcu_read_lock_held(),
67553 + "find_task_by_pid_ns() needs rcu_read_lock()"
67554 + " protection");
67555 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67556 +}
67557 +
67558 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67559 {
67560 struct pid *pid;
67561 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67562 index 125cb67..a4d1c30 100644
67563 --- a/kernel/posix-cpu-timers.c
67564 +++ b/kernel/posix-cpu-timers.c
67565 @@ -6,6 +6,7 @@
67566 #include <linux/posix-timers.h>
67567 #include <linux/errno.h>
67568 #include <linux/math64.h>
67569 +#include <linux/security.h>
67570 #include <asm/uaccess.h>
67571 #include <linux/kernel_stat.h>
67572 #include <trace/events/timer.h>
67573 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
67574
67575 static __init int init_posix_cpu_timers(void)
67576 {
67577 - struct k_clock process = {
67578 + static struct k_clock process = {
67579 .clock_getres = process_cpu_clock_getres,
67580 .clock_get = process_cpu_clock_get,
67581 .timer_create = process_cpu_timer_create,
67582 .nsleep = process_cpu_nsleep,
67583 .nsleep_restart = process_cpu_nsleep_restart,
67584 };
67585 - struct k_clock thread = {
67586 + static struct k_clock thread = {
67587 .clock_getres = thread_cpu_clock_getres,
67588 .clock_get = thread_cpu_clock_get,
67589 .timer_create = thread_cpu_timer_create,
67590 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67591 index 69185ae..cc2847a 100644
67592 --- a/kernel/posix-timers.c
67593 +++ b/kernel/posix-timers.c
67594 @@ -43,6 +43,7 @@
67595 #include <linux/idr.h>
67596 #include <linux/posix-clock.h>
67597 #include <linux/posix-timers.h>
67598 +#include <linux/grsecurity.h>
67599 #include <linux/syscalls.h>
67600 #include <linux/wait.h>
67601 #include <linux/workqueue.h>
67602 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67603 * which we beg off on and pass to do_sys_settimeofday().
67604 */
67605
67606 -static struct k_clock posix_clocks[MAX_CLOCKS];
67607 +static struct k_clock *posix_clocks[MAX_CLOCKS];
67608
67609 /*
67610 * These ones are defined below.
67611 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67612 */
67613 static __init int init_posix_timers(void)
67614 {
67615 - struct k_clock clock_realtime = {
67616 + static struct k_clock clock_realtime = {
67617 .clock_getres = hrtimer_get_res,
67618 .clock_get = posix_clock_realtime_get,
67619 .clock_set = posix_clock_realtime_set,
67620 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67621 .timer_get = common_timer_get,
67622 .timer_del = common_timer_del,
67623 };
67624 - struct k_clock clock_monotonic = {
67625 + static struct k_clock clock_monotonic = {
67626 .clock_getres = hrtimer_get_res,
67627 .clock_get = posix_ktime_get_ts,
67628 .nsleep = common_nsleep,
67629 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67630 .timer_get = common_timer_get,
67631 .timer_del = common_timer_del,
67632 };
67633 - struct k_clock clock_monotonic_raw = {
67634 + static struct k_clock clock_monotonic_raw = {
67635 .clock_getres = hrtimer_get_res,
67636 .clock_get = posix_get_monotonic_raw,
67637 };
67638 - struct k_clock clock_realtime_coarse = {
67639 + static struct k_clock clock_realtime_coarse = {
67640 .clock_getres = posix_get_coarse_res,
67641 .clock_get = posix_get_realtime_coarse,
67642 };
67643 - struct k_clock clock_monotonic_coarse = {
67644 + static struct k_clock clock_monotonic_coarse = {
67645 .clock_getres = posix_get_coarse_res,
67646 .clock_get = posix_get_monotonic_coarse,
67647 };
67648 - struct k_clock clock_boottime = {
67649 + static struct k_clock clock_boottime = {
67650 .clock_getres = hrtimer_get_res,
67651 .clock_get = posix_get_boottime,
67652 .nsleep = common_nsleep,
67653 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67654 return;
67655 }
67656
67657 - posix_clocks[clock_id] = *new_clock;
67658 + posix_clocks[clock_id] = new_clock;
67659 }
67660 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67661
67662 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67663 return (id & CLOCKFD_MASK) == CLOCKFD ?
67664 &clock_posix_dynamic : &clock_posix_cpu;
67665
67666 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67667 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67668 return NULL;
67669 - return &posix_clocks[id];
67670 + return posix_clocks[id];
67671 }
67672
67673 static int common_timer_create(struct k_itimer *new_timer)
67674 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67675 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67676 return -EFAULT;
67677
67678 + /* only the CLOCK_REALTIME clock can be set, all other clocks
67679 + have their clock_set fptr set to a nosettime dummy function
67680 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67681 + call common_clock_set, which calls do_sys_settimeofday, which
67682 + we hook
67683 + */
67684 +
67685 return kc->clock_set(which_clock, &new_tp);
67686 }
67687
67688 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67689 index d523593..68197a4 100644
67690 --- a/kernel/power/poweroff.c
67691 +++ b/kernel/power/poweroff.c
67692 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67693 .enable_mask = SYSRQ_ENABLE_BOOT,
67694 };
67695
67696 -static int pm_sysrq_init(void)
67697 +static int __init pm_sysrq_init(void)
67698 {
67699 register_sysrq_key('o', &sysrq_poweroff_op);
67700 return 0;
67701 diff --git a/kernel/power/process.c b/kernel/power/process.c
67702 index 7aac07a..2d3c6dc 100644
67703 --- a/kernel/power/process.c
67704 +++ b/kernel/power/process.c
67705 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
67706 u64 elapsed_csecs64;
67707 unsigned int elapsed_csecs;
67708 bool wakeup = false;
67709 + bool timedout = false;
67710
67711 do_gettimeofday(&start);
67712
67713 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67714
67715 while (true) {
67716 todo = 0;
67717 + if (time_after(jiffies, end_time))
67718 + timedout = true;
67719 read_lock(&tasklist_lock);
67720 do_each_thread(g, p) {
67721 if (p == current || !freeze_task(p))
67722 @@ -60,9 +63,13 @@ static int try_to_freeze_tasks(bool user_only)
67723 * try_to_stop() after schedule() in ptrace/signal
67724 * stop sees TIF_FREEZE.
67725 */
67726 - if (!task_is_stopped_or_traced(p) &&
67727 - !freezer_should_skip(p))
67728 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67729 todo++;
67730 + if (timedout) {
67731 + printk(KERN_ERR "Task refusing to freeze:\n");
67732 + sched_show_task(p);
67733 + }
67734 + }
67735 } while_each_thread(g, p);
67736 read_unlock(&tasklist_lock);
67737
67738 @@ -71,7 +78,7 @@ static int try_to_freeze_tasks(bool user_only)
67739 todo += wq_busy;
67740 }
67741
67742 - if (!todo || time_after(jiffies, end_time))
67743 + if (!todo || timedout)
67744 break;
67745
67746 if (pm_wakeup_pending()) {
67747 diff --git a/kernel/printk.c b/kernel/printk.c
67748 index 32690a0..cd7c798 100644
67749 --- a/kernel/printk.c
67750 +++ b/kernel/printk.c
67751 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
67752 if (from_file && type != SYSLOG_ACTION_OPEN)
67753 return 0;
67754
67755 +#ifdef CONFIG_GRKERNSEC_DMESG
67756 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67757 + return -EPERM;
67758 +#endif
67759 +
67760 if (syslog_action_restricted(type)) {
67761 if (capable(CAP_SYSLOG))
67762 return 0;
67763 diff --git a/kernel/profile.c b/kernel/profile.c
67764 index 76b8e77..a2930e8 100644
67765 --- a/kernel/profile.c
67766 +++ b/kernel/profile.c
67767 @@ -39,7 +39,7 @@ struct profile_hit {
67768 /* Oprofile timer tick hook */
67769 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67770
67771 -static atomic_t *prof_buffer;
67772 +static atomic_unchecked_t *prof_buffer;
67773 static unsigned long prof_len, prof_shift;
67774
67775 int prof_on __read_mostly;
67776 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67777 hits[i].pc = 0;
67778 continue;
67779 }
67780 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67781 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67782 hits[i].hits = hits[i].pc = 0;
67783 }
67784 }
67785 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67786 * Add the current hit(s) and flush the write-queue out
67787 * to the global buffer:
67788 */
67789 - atomic_add(nr_hits, &prof_buffer[pc]);
67790 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67791 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67792 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67793 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67794 hits[i].pc = hits[i].hits = 0;
67795 }
67796 out:
67797 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67798 {
67799 unsigned long pc;
67800 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67801 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67802 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67803 }
67804 #endif /* !CONFIG_SMP */
67805
67806 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67807 return -EFAULT;
67808 buf++; p++; count--; read++;
67809 }
67810 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67811 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67812 if (copy_to_user(buf, (void *)pnt, count))
67813 return -EFAULT;
67814 read += count;
67815 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67816 }
67817 #endif
67818 profile_discard_flip_buffers();
67819 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67820 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67821 return count;
67822 }
67823
67824 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67825 index 00ab2ca..d237f61 100644
67826 --- a/kernel/ptrace.c
67827 +++ b/kernel/ptrace.c
67828 @@ -285,7 +285,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67829 task->ptrace = PT_PTRACED;
67830 if (seize)
67831 task->ptrace |= PT_SEIZED;
67832 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
67833 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
67834 task->ptrace |= PT_PTRACE_CAP;
67835
67836 __ptrace_link(task, current);
67837 @@ -491,7 +491,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67838 break;
67839 return -EIO;
67840 }
67841 - if (copy_to_user(dst, buf, retval))
67842 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67843 return -EFAULT;
67844 copied += retval;
67845 src += retval;
67846 @@ -688,7 +688,7 @@ int ptrace_request(struct task_struct *child, long request,
67847 bool seized = child->ptrace & PT_SEIZED;
67848 int ret = -EIO;
67849 siginfo_t siginfo, *si;
67850 - void __user *datavp = (void __user *) data;
67851 + void __user *datavp = (__force void __user *) data;
67852 unsigned long __user *datalp = datavp;
67853 unsigned long flags;
67854
67855 @@ -890,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67856 goto out;
67857 }
67858
67859 + if (gr_handle_ptrace(child, request)) {
67860 + ret = -EPERM;
67861 + goto out_put_task_struct;
67862 + }
67863 +
67864 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67865 ret = ptrace_attach(child, request, data);
67866 /*
67867 * Some architectures need to do book-keeping after
67868 * a ptrace attach.
67869 */
67870 - if (!ret)
67871 + if (!ret) {
67872 arch_ptrace_attach(child);
67873 + gr_audit_ptrace(child);
67874 + }
67875 goto out_put_task_struct;
67876 }
67877
67878 @@ -923,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67879 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67880 if (copied != sizeof(tmp))
67881 return -EIO;
67882 - return put_user(tmp, (unsigned long __user *)data);
67883 + return put_user(tmp, (__force unsigned long __user *)data);
67884 }
67885
67886 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67887 @@ -1033,14 +1040,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67888 goto out;
67889 }
67890
67891 + if (gr_handle_ptrace(child, request)) {
67892 + ret = -EPERM;
67893 + goto out_put_task_struct;
67894 + }
67895 +
67896 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67897 ret = ptrace_attach(child, request, data);
67898 /*
67899 * Some architectures need to do book-keeping after
67900 * a ptrace attach.
67901 */
67902 - if (!ret)
67903 + if (!ret) {
67904 arch_ptrace_attach(child);
67905 + gr_audit_ptrace(child);
67906 + }
67907 goto out_put_task_struct;
67908 }
67909
67910 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
67911 index 977296d..c4744dc 100644
67912 --- a/kernel/rcutiny.c
67913 +++ b/kernel/rcutiny.c
67914 @@ -46,7 +46,7 @@
67915 struct rcu_ctrlblk;
67916 static void invoke_rcu_callbacks(void);
67917 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
67918 -static void rcu_process_callbacks(struct softirq_action *unused);
67919 +static void rcu_process_callbacks(void);
67920 static void __call_rcu(struct rcu_head *head,
67921 void (*func)(struct rcu_head *rcu),
67922 struct rcu_ctrlblk *rcp);
67923 @@ -297,7 +297,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
67924 rcu_is_callbacks_kthread()));
67925 }
67926
67927 -static void rcu_process_callbacks(struct softirq_action *unused)
67928 +static void rcu_process_callbacks(void)
67929 {
67930 __rcu_process_callbacks(&rcu_sched_ctrlblk);
67931 __rcu_process_callbacks(&rcu_bh_ctrlblk);
67932 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
67933 index 9cb1ae4..aac7d3e 100644
67934 --- a/kernel/rcutiny_plugin.h
67935 +++ b/kernel/rcutiny_plugin.h
67936 @@ -920,7 +920,7 @@ static int rcu_kthread(void *arg)
67937 have_rcu_kthread_work = morework;
67938 local_irq_restore(flags);
67939 if (work)
67940 - rcu_process_callbacks(NULL);
67941 + rcu_process_callbacks();
67942 schedule_timeout_interruptible(1); /* Leave CPU for others. */
67943 }
67944
67945 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67946 index a58ac28..196a3d8 100644
67947 --- a/kernel/rcutorture.c
67948 +++ b/kernel/rcutorture.c
67949 @@ -148,12 +148,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67950 { 0 };
67951 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67952 { 0 };
67953 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67954 -static atomic_t n_rcu_torture_alloc;
67955 -static atomic_t n_rcu_torture_alloc_fail;
67956 -static atomic_t n_rcu_torture_free;
67957 -static atomic_t n_rcu_torture_mberror;
67958 -static atomic_t n_rcu_torture_error;
67959 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67960 +static atomic_unchecked_t n_rcu_torture_alloc;
67961 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
67962 +static atomic_unchecked_t n_rcu_torture_free;
67963 +static atomic_unchecked_t n_rcu_torture_mberror;
67964 +static atomic_unchecked_t n_rcu_torture_error;
67965 static long n_rcu_torture_boost_ktrerror;
67966 static long n_rcu_torture_boost_rterror;
67967 static long n_rcu_torture_boost_failure;
67968 @@ -243,11 +243,11 @@ rcu_torture_alloc(void)
67969
67970 spin_lock_bh(&rcu_torture_lock);
67971 if (list_empty(&rcu_torture_freelist)) {
67972 - atomic_inc(&n_rcu_torture_alloc_fail);
67973 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67974 spin_unlock_bh(&rcu_torture_lock);
67975 return NULL;
67976 }
67977 - atomic_inc(&n_rcu_torture_alloc);
67978 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67979 p = rcu_torture_freelist.next;
67980 list_del_init(p);
67981 spin_unlock_bh(&rcu_torture_lock);
67982 @@ -260,7 +260,7 @@ rcu_torture_alloc(void)
67983 static void
67984 rcu_torture_free(struct rcu_torture *p)
67985 {
67986 - atomic_inc(&n_rcu_torture_free);
67987 + atomic_inc_unchecked(&n_rcu_torture_free);
67988 spin_lock_bh(&rcu_torture_lock);
67989 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67990 spin_unlock_bh(&rcu_torture_lock);
67991 @@ -380,7 +380,7 @@ rcu_torture_cb(struct rcu_head *p)
67992 i = rp->rtort_pipe_count;
67993 if (i > RCU_TORTURE_PIPE_LEN)
67994 i = RCU_TORTURE_PIPE_LEN;
67995 - atomic_inc(&rcu_torture_wcount[i]);
67996 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67997 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67998 rp->rtort_mbtest = 0;
67999 rcu_torture_free(rp);
68000 @@ -427,7 +427,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
68001 i = rp->rtort_pipe_count;
68002 if (i > RCU_TORTURE_PIPE_LEN)
68003 i = RCU_TORTURE_PIPE_LEN;
68004 - atomic_inc(&rcu_torture_wcount[i]);
68005 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68006 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68007 rp->rtort_mbtest = 0;
68008 list_del(&rp->rtort_free);
68009 @@ -916,7 +916,7 @@ rcu_torture_writer(void *arg)
68010 i = old_rp->rtort_pipe_count;
68011 if (i > RCU_TORTURE_PIPE_LEN)
68012 i = RCU_TORTURE_PIPE_LEN;
68013 - atomic_inc(&rcu_torture_wcount[i]);
68014 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68015 old_rp->rtort_pipe_count++;
68016 cur_ops->deferred_free(old_rp);
68017 }
68018 @@ -997,7 +997,7 @@ static void rcu_torture_timer(unsigned long unused)
68019 return;
68020 }
68021 if (p->rtort_mbtest == 0)
68022 - atomic_inc(&n_rcu_torture_mberror);
68023 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68024 spin_lock(&rand_lock);
68025 cur_ops->read_delay(&rand);
68026 n_rcu_torture_timers++;
68027 @@ -1061,7 +1061,7 @@ rcu_torture_reader(void *arg)
68028 continue;
68029 }
68030 if (p->rtort_mbtest == 0)
68031 - atomic_inc(&n_rcu_torture_mberror);
68032 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68033 cur_ops->read_delay(&rand);
68034 preempt_disable();
68035 pipe_count = p->rtort_pipe_count;
68036 @@ -1123,10 +1123,10 @@ rcu_torture_printk(char *page)
68037 rcu_torture_current,
68038 rcu_torture_current_version,
68039 list_empty(&rcu_torture_freelist),
68040 - atomic_read(&n_rcu_torture_alloc),
68041 - atomic_read(&n_rcu_torture_alloc_fail),
68042 - atomic_read(&n_rcu_torture_free),
68043 - atomic_read(&n_rcu_torture_mberror),
68044 + atomic_read_unchecked(&n_rcu_torture_alloc),
68045 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
68046 + atomic_read_unchecked(&n_rcu_torture_free),
68047 + atomic_read_unchecked(&n_rcu_torture_mberror),
68048 n_rcu_torture_boost_ktrerror,
68049 n_rcu_torture_boost_rterror,
68050 n_rcu_torture_boost_failure,
68051 @@ -1136,7 +1136,7 @@ rcu_torture_printk(char *page)
68052 n_online_attempts,
68053 n_offline_successes,
68054 n_offline_attempts);
68055 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
68056 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
68057 n_rcu_torture_boost_ktrerror != 0 ||
68058 n_rcu_torture_boost_rterror != 0 ||
68059 n_rcu_torture_boost_failure != 0)
68060 @@ -1144,7 +1144,7 @@ rcu_torture_printk(char *page)
68061 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
68062 if (i > 1) {
68063 cnt += sprintf(&page[cnt], "!!! ");
68064 - atomic_inc(&n_rcu_torture_error);
68065 + atomic_inc_unchecked(&n_rcu_torture_error);
68066 WARN_ON_ONCE(1);
68067 }
68068 cnt += sprintf(&page[cnt], "Reader Pipe: ");
68069 @@ -1158,7 +1158,7 @@ rcu_torture_printk(char *page)
68070 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
68071 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68072 cnt += sprintf(&page[cnt], " %d",
68073 - atomic_read(&rcu_torture_wcount[i]));
68074 + atomic_read_unchecked(&rcu_torture_wcount[i]));
68075 }
68076 cnt += sprintf(&page[cnt], "\n");
68077 if (cur_ops->stats)
68078 @@ -1600,7 +1600,7 @@ rcu_torture_cleanup(void)
68079
68080 if (cur_ops->cleanup)
68081 cur_ops->cleanup();
68082 - if (atomic_read(&n_rcu_torture_error))
68083 + if (atomic_read_unchecked(&n_rcu_torture_error))
68084 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
68085 else
68086 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
68087 @@ -1664,17 +1664,17 @@ rcu_torture_init(void)
68088
68089 rcu_torture_current = NULL;
68090 rcu_torture_current_version = 0;
68091 - atomic_set(&n_rcu_torture_alloc, 0);
68092 - atomic_set(&n_rcu_torture_alloc_fail, 0);
68093 - atomic_set(&n_rcu_torture_free, 0);
68094 - atomic_set(&n_rcu_torture_mberror, 0);
68095 - atomic_set(&n_rcu_torture_error, 0);
68096 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68097 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68098 + atomic_set_unchecked(&n_rcu_torture_free, 0);
68099 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68100 + atomic_set_unchecked(&n_rcu_torture_error, 0);
68101 n_rcu_torture_boost_ktrerror = 0;
68102 n_rcu_torture_boost_rterror = 0;
68103 n_rcu_torture_boost_failure = 0;
68104 n_rcu_torture_boosts = 0;
68105 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68106 - atomic_set(&rcu_torture_wcount[i], 0);
68107 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68108 for_each_possible_cpu(cpu) {
68109 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68110 per_cpu(rcu_torture_count, cpu)[i] = 0;
68111 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68112 index 6c4a672..70f3202 100644
68113 --- a/kernel/rcutree.c
68114 +++ b/kernel/rcutree.c
68115 @@ -363,9 +363,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
68116 rcu_prepare_for_idle(smp_processor_id());
68117 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68118 smp_mb__before_atomic_inc(); /* See above. */
68119 - atomic_inc(&rdtp->dynticks);
68120 + atomic_inc_unchecked(&rdtp->dynticks);
68121 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68122 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68123 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68124 }
68125
68126 /**
68127 @@ -438,10 +438,10 @@ void rcu_irq_exit(void)
68128 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
68129 {
68130 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68131 - atomic_inc(&rdtp->dynticks);
68132 + atomic_inc_unchecked(&rdtp->dynticks);
68133 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68134 smp_mb__after_atomic_inc(); /* See above. */
68135 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68136 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68137 rcu_cleanup_after_idle(smp_processor_id());
68138 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
68139 if (!is_idle_task(current)) {
68140 @@ -531,14 +531,14 @@ void rcu_nmi_enter(void)
68141 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68142
68143 if (rdtp->dynticks_nmi_nesting == 0 &&
68144 - (atomic_read(&rdtp->dynticks) & 0x1))
68145 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68146 return;
68147 rdtp->dynticks_nmi_nesting++;
68148 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68149 - atomic_inc(&rdtp->dynticks);
68150 + atomic_inc_unchecked(&rdtp->dynticks);
68151 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68152 smp_mb__after_atomic_inc(); /* See above. */
68153 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68154 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68155 }
68156
68157 /**
68158 @@ -557,9 +557,9 @@ void rcu_nmi_exit(void)
68159 return;
68160 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68161 smp_mb__before_atomic_inc(); /* See above. */
68162 - atomic_inc(&rdtp->dynticks);
68163 + atomic_inc_unchecked(&rdtp->dynticks);
68164 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68165 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68166 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68167 }
68168
68169 #ifdef CONFIG_PROVE_RCU
68170 @@ -575,7 +575,7 @@ int rcu_is_cpu_idle(void)
68171 int ret;
68172
68173 preempt_disable();
68174 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68175 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68176 preempt_enable();
68177 return ret;
68178 }
68179 @@ -604,7 +604,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
68180 */
68181 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68182 {
68183 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68184 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68185 return (rdp->dynticks_snap & 0x1) == 0;
68186 }
68187
68188 @@ -619,7 +619,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68189 unsigned int curr;
68190 unsigned int snap;
68191
68192 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
68193 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68194 snap = (unsigned int)rdp->dynticks_snap;
68195
68196 /*
68197 @@ -1667,7 +1667,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
68198 /*
68199 * Do RCU core processing for the current CPU.
68200 */
68201 -static void rcu_process_callbacks(struct softirq_action *unused)
68202 +static void rcu_process_callbacks(void)
68203 {
68204 trace_rcu_utilization("Start RCU core");
68205 __rcu_process_callbacks(&rcu_sched_state,
68206 @@ -2030,7 +2030,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
68207 rdp->qlen = 0;
68208 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
68209 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
68210 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
68211 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
68212 rdp->cpu = cpu;
68213 rdp->rsp = rsp;
68214 raw_spin_unlock_irqrestore(&rnp->lock, flags);
68215 @@ -2058,8 +2058,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
68216 rdp->n_force_qs_snap = rsp->n_force_qs;
68217 rdp->blimit = blimit;
68218 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
68219 - atomic_set(&rdp->dynticks->dynticks,
68220 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
68221 + atomic_set_unchecked(&rdp->dynticks->dynticks,
68222 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
68223 rcu_prepare_for_idle_init(cpu);
68224 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
68225
68226 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68227 index fddff92..2c08359 100644
68228 --- a/kernel/rcutree.h
68229 +++ b/kernel/rcutree.h
68230 @@ -87,7 +87,7 @@ struct rcu_dynticks {
68231 long long dynticks_nesting; /* Track irq/process nesting level. */
68232 /* Process level is worth LLONG_MAX/2. */
68233 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68234 - atomic_t dynticks; /* Even value for idle, else odd. */
68235 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
68236 };
68237
68238 /* RCU's kthread states for tracing. */
68239 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68240 index 8bb35d7..6ea0a463 100644
68241 --- a/kernel/rcutree_plugin.h
68242 +++ b/kernel/rcutree_plugin.h
68243 @@ -850,7 +850,7 @@ void synchronize_rcu_expedited(void)
68244
68245 /* Clean up and exit. */
68246 smp_mb(); /* ensure expedited GP seen before counter increment. */
68247 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68248 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68249 unlock_mb_ret:
68250 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68251 mb_ret:
68252 @@ -1833,8 +1833,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
68253
68254 #else /* #ifndef CONFIG_SMP */
68255
68256 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68257 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68258 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68259 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68260
68261 static int synchronize_sched_expedited_cpu_stop(void *data)
68262 {
68263 @@ -1889,7 +1889,7 @@ void synchronize_sched_expedited(void)
68264 int firstsnap, s, snap, trycount = 0;
68265
68266 /* Note that atomic_inc_return() implies full memory barrier. */
68267 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68268 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68269 get_online_cpus();
68270
68271 /*
68272 @@ -1910,7 +1910,7 @@ void synchronize_sched_expedited(void)
68273 }
68274
68275 /* Check to see if someone else did our work for us. */
68276 - s = atomic_read(&sync_sched_expedited_done);
68277 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68278 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68279 smp_mb(); /* ensure test happens before caller kfree */
68280 return;
68281 @@ -1925,7 +1925,7 @@ void synchronize_sched_expedited(void)
68282 * grace period works for us.
68283 */
68284 get_online_cpus();
68285 - snap = atomic_read(&sync_sched_expedited_started);
68286 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
68287 smp_mb(); /* ensure read is before try_stop_cpus(). */
68288 }
68289
68290 @@ -1936,12 +1936,12 @@ void synchronize_sched_expedited(void)
68291 * than we did beat us to the punch.
68292 */
68293 do {
68294 - s = atomic_read(&sync_sched_expedited_done);
68295 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68296 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68297 smp_mb(); /* ensure test happens before caller kfree */
68298 break;
68299 }
68300 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68301 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68302
68303 put_online_cpus();
68304 }
68305 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68306 index 654cfe6..c0b28e2 100644
68307 --- a/kernel/rcutree_trace.c
68308 +++ b/kernel/rcutree_trace.c
68309 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68310 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68311 rdp->qs_pending);
68312 seq_printf(m, " dt=%d/%llx/%d df=%lu",
68313 - atomic_read(&rdp->dynticks->dynticks),
68314 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68315 rdp->dynticks->dynticks_nesting,
68316 rdp->dynticks->dynticks_nmi_nesting,
68317 rdp->dynticks_fqs);
68318 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68319 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68320 rdp->qs_pending);
68321 seq_printf(m, ",%d,%llx,%d,%lu",
68322 - atomic_read(&rdp->dynticks->dynticks),
68323 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68324 rdp->dynticks->dynticks_nesting,
68325 rdp->dynticks->dynticks_nmi_nesting,
68326 rdp->dynticks_fqs);
68327 diff --git a/kernel/resource.c b/kernel/resource.c
68328 index 7640b3a..5879283 100644
68329 --- a/kernel/resource.c
68330 +++ b/kernel/resource.c
68331 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
68332
68333 static int __init ioresources_init(void)
68334 {
68335 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68336 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68337 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68338 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68339 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68340 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68341 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68342 +#endif
68343 +#else
68344 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68345 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68346 +#endif
68347 return 0;
68348 }
68349 __initcall(ioresources_init);
68350 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68351 index 98ec494..4241d6d 100644
68352 --- a/kernel/rtmutex-tester.c
68353 +++ b/kernel/rtmutex-tester.c
68354 @@ -20,7 +20,7 @@
68355 #define MAX_RT_TEST_MUTEXES 8
68356
68357 static spinlock_t rttest_lock;
68358 -static atomic_t rttest_event;
68359 +static atomic_unchecked_t rttest_event;
68360
68361 struct test_thread_data {
68362 int opcode;
68363 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68364
68365 case RTTEST_LOCKCONT:
68366 td->mutexes[td->opdata] = 1;
68367 - td->event = atomic_add_return(1, &rttest_event);
68368 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68369 return 0;
68370
68371 case RTTEST_RESET:
68372 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68373 return 0;
68374
68375 case RTTEST_RESETEVENT:
68376 - atomic_set(&rttest_event, 0);
68377 + atomic_set_unchecked(&rttest_event, 0);
68378 return 0;
68379
68380 default:
68381 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68382 return ret;
68383
68384 td->mutexes[id] = 1;
68385 - td->event = atomic_add_return(1, &rttest_event);
68386 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68387 rt_mutex_lock(&mutexes[id]);
68388 - td->event = atomic_add_return(1, &rttest_event);
68389 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68390 td->mutexes[id] = 4;
68391 return 0;
68392
68393 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68394 return ret;
68395
68396 td->mutexes[id] = 1;
68397 - td->event = atomic_add_return(1, &rttest_event);
68398 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68399 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68400 - td->event = atomic_add_return(1, &rttest_event);
68401 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68402 td->mutexes[id] = ret ? 0 : 4;
68403 return ret ? -EINTR : 0;
68404
68405 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68406 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68407 return ret;
68408
68409 - td->event = atomic_add_return(1, &rttest_event);
68410 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68411 rt_mutex_unlock(&mutexes[id]);
68412 - td->event = atomic_add_return(1, &rttest_event);
68413 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68414 td->mutexes[id] = 0;
68415 return 0;
68416
68417 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68418 break;
68419
68420 td->mutexes[dat] = 2;
68421 - td->event = atomic_add_return(1, &rttest_event);
68422 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68423 break;
68424
68425 default:
68426 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68427 return;
68428
68429 td->mutexes[dat] = 3;
68430 - td->event = atomic_add_return(1, &rttest_event);
68431 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68432 break;
68433
68434 case RTTEST_LOCKNOWAIT:
68435 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68436 return;
68437
68438 td->mutexes[dat] = 1;
68439 - td->event = atomic_add_return(1, &rttest_event);
68440 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68441 return;
68442
68443 default:
68444 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
68445 index e8a1f83..363d17d 100644
68446 --- a/kernel/sched/auto_group.c
68447 +++ b/kernel/sched/auto_group.c
68448 @@ -11,7 +11,7 @@
68449
68450 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68451 static struct autogroup autogroup_default;
68452 -static atomic_t autogroup_seq_nr;
68453 +static atomic_unchecked_t autogroup_seq_nr;
68454
68455 void __init autogroup_init(struct task_struct *init_task)
68456 {
68457 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68458
68459 kref_init(&ag->kref);
68460 init_rwsem(&ag->lock);
68461 - ag->id = atomic_inc_return(&autogroup_seq_nr);
68462 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68463 ag->tg = tg;
68464 #ifdef CONFIG_RT_GROUP_SCHED
68465 /*
68466 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
68467 index 478a04c..e16339a 100644
68468 --- a/kernel/sched/core.c
68469 +++ b/kernel/sched/core.c
68470 @@ -3851,6 +3851,8 @@ int can_nice(const struct task_struct *p, const int nice)
68471 /* convert nice value [19,-20] to rlimit style value [1,40] */
68472 int nice_rlim = 20 - nice;
68473
68474 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68475 +
68476 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
68477 capable(CAP_SYS_NICE));
68478 }
68479 @@ -3884,7 +3886,8 @@ SYSCALL_DEFINE1(nice, int, increment)
68480 if (nice > 19)
68481 nice = 19;
68482
68483 - if (increment < 0 && !can_nice(current, nice))
68484 + if (increment < 0 && (!can_nice(current, nice) ||
68485 + gr_handle_chroot_nice()))
68486 return -EPERM;
68487
68488 retval = security_task_setnice(current, nice);
68489 @@ -4041,6 +4044,7 @@ recheck:
68490 unsigned long rlim_rtprio =
68491 task_rlimit(p, RLIMIT_RTPRIO);
68492
68493 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68494 /* can't set/change the rt policy */
68495 if (policy != p->policy && !rlim_rtprio)
68496 return -EPERM;
68497 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
68498 index aca16b8..8e3acc4 100644
68499 --- a/kernel/sched/fair.c
68500 +++ b/kernel/sched/fair.c
68501 @@ -5147,7 +5147,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68502 * run_rebalance_domains is triggered when needed from the scheduler tick.
68503 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68504 */
68505 -static void run_rebalance_domains(struct softirq_action *h)
68506 +static void run_rebalance_domains(void)
68507 {
68508 int this_cpu = smp_processor_id();
68509 struct rq *this_rq = cpu_rq(this_cpu);
68510 diff --git a/kernel/signal.c b/kernel/signal.c
68511 index b09cf3b..b291c66 100644
68512 --- a/kernel/signal.c
68513 +++ b/kernel/signal.c
68514 @@ -46,12 +46,12 @@ static struct kmem_cache *sigqueue_cachep;
68515
68516 int print_fatal_signals __read_mostly;
68517
68518 -static void __user *sig_handler(struct task_struct *t, int sig)
68519 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
68520 {
68521 return t->sighand->action[sig - 1].sa.sa_handler;
68522 }
68523
68524 -static int sig_handler_ignored(void __user *handler, int sig)
68525 +static int sig_handler_ignored(__sighandler_t handler, int sig)
68526 {
68527 /* Is it explicitly or implicitly ignored? */
68528 return handler == SIG_IGN ||
68529 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68530 static int sig_task_ignored(struct task_struct *t, int sig,
68531 int from_ancestor_ns)
68532 {
68533 - void __user *handler;
68534 + __sighandler_t handler;
68535
68536 handler = sig_handler(t, sig);
68537
68538 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68539 atomic_inc(&user->sigpending);
68540 rcu_read_unlock();
68541
68542 + if (!override_rlimit)
68543 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68544 +
68545 if (override_rlimit ||
68546 atomic_read(&user->sigpending) <=
68547 task_rlimit(t, RLIMIT_SIGPENDING)) {
68548 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68549
68550 int unhandled_signal(struct task_struct *tsk, int sig)
68551 {
68552 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68553 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68554 if (is_global_init(tsk))
68555 return 1;
68556 if (handler != SIG_IGN && handler != SIG_DFL)
68557 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68558 }
68559 }
68560
68561 + /* allow glibc communication via tgkill to other threads in our
68562 + thread group */
68563 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68564 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68565 + && gr_handle_signal(t, sig))
68566 + return -EPERM;
68567 +
68568 return security_task_kill(t, info, sig, 0);
68569 }
68570
68571 @@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68572 return send_signal(sig, info, p, 1);
68573 }
68574
68575 -static int
68576 +int
68577 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68578 {
68579 return send_signal(sig, info, t, 0);
68580 @@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68581 unsigned long int flags;
68582 int ret, blocked, ignored;
68583 struct k_sigaction *action;
68584 + int is_unhandled = 0;
68585
68586 spin_lock_irqsave(&t->sighand->siglock, flags);
68587 action = &t->sighand->action[sig-1];
68588 @@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68589 }
68590 if (action->sa.sa_handler == SIG_DFL)
68591 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68592 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68593 + is_unhandled = 1;
68594 ret = specific_send_sig_info(sig, info, t);
68595 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68596
68597 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
68598 + normal operation */
68599 + if (is_unhandled) {
68600 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68601 + gr_handle_crash(t, sig);
68602 + }
68603 +
68604 return ret;
68605 }
68606
68607 @@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68608 ret = check_kill_permission(sig, info, p);
68609 rcu_read_unlock();
68610
68611 - if (!ret && sig)
68612 + if (!ret && sig) {
68613 ret = do_send_sig_info(sig, info, p, true);
68614 + if (!ret)
68615 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68616 + }
68617
68618 return ret;
68619 }
68620 @@ -2829,7 +2852,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68621 int error = -ESRCH;
68622
68623 rcu_read_lock();
68624 - p = find_task_by_vpid(pid);
68625 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68626 + /* allow glibc communication via tgkill to other threads in our
68627 + thread group */
68628 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68629 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
68630 + p = find_task_by_vpid_unrestricted(pid);
68631 + else
68632 +#endif
68633 + p = find_task_by_vpid(pid);
68634 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68635 error = check_kill_permission(sig, info, p);
68636 /*
68637 diff --git a/kernel/smp.c b/kernel/smp.c
68638 index db197d6..17aef0b 100644
68639 --- a/kernel/smp.c
68640 +++ b/kernel/smp.c
68641 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68642 }
68643 EXPORT_SYMBOL(smp_call_function);
68644
68645 -void ipi_call_lock(void)
68646 +void ipi_call_lock(void) __acquires(call_function.lock)
68647 {
68648 raw_spin_lock(&call_function.lock);
68649 }
68650
68651 -void ipi_call_unlock(void)
68652 +void ipi_call_unlock(void) __releases(call_function.lock)
68653 {
68654 raw_spin_unlock(&call_function.lock);
68655 }
68656
68657 -void ipi_call_lock_irq(void)
68658 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68659 {
68660 raw_spin_lock_irq(&call_function.lock);
68661 }
68662
68663 -void ipi_call_unlock_irq(void)
68664 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68665 {
68666 raw_spin_unlock_irq(&call_function.lock);
68667 }
68668 diff --git a/kernel/softirq.c b/kernel/softirq.c
68669 index 4eb3a0f..6f1fa81 100644
68670 --- a/kernel/softirq.c
68671 +++ b/kernel/softirq.c
68672 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68673
68674 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68675
68676 -char *softirq_to_name[NR_SOFTIRQS] = {
68677 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68678 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68679 "TASKLET", "SCHED", "HRTIMER", "RCU"
68680 };
68681 @@ -235,7 +235,7 @@ restart:
68682 kstat_incr_softirqs_this_cpu(vec_nr);
68683
68684 trace_softirq_entry(vec_nr);
68685 - h->action(h);
68686 + h->action();
68687 trace_softirq_exit(vec_nr);
68688 if (unlikely(prev_count != preempt_count())) {
68689 printk(KERN_ERR "huh, entered softirq %u %s %p"
68690 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
68691 local_irq_restore(flags);
68692 }
68693
68694 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68695 +void open_softirq(int nr, void (*action)(void))
68696 {
68697 - softirq_vec[nr].action = action;
68698 + pax_open_kernel();
68699 + *(void **)&softirq_vec[nr].action = action;
68700 + pax_close_kernel();
68701 }
68702
68703 /*
68704 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68705
68706 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68707
68708 -static void tasklet_action(struct softirq_action *a)
68709 +static void tasklet_action(void)
68710 {
68711 struct tasklet_struct *list;
68712
68713 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
68714 }
68715 }
68716
68717 -static void tasklet_hi_action(struct softirq_action *a)
68718 +static void tasklet_hi_action(void)
68719 {
68720 struct tasklet_struct *list;
68721
68722 diff --git a/kernel/sys.c b/kernel/sys.c
68723 index 888d227..f04b318 100644
68724 --- a/kernel/sys.c
68725 +++ b/kernel/sys.c
68726 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68727 error = -EACCES;
68728 goto out;
68729 }
68730 +
68731 + if (gr_handle_chroot_setpriority(p, niceval)) {
68732 + error = -EACCES;
68733 + goto out;
68734 + }
68735 +
68736 no_nice = security_task_setnice(p, niceval);
68737 if (no_nice) {
68738 error = no_nice;
68739 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68740 goto error;
68741 }
68742
68743 + if (gr_check_group_change(new->gid, new->egid, -1))
68744 + goto error;
68745 +
68746 if (rgid != (gid_t) -1 ||
68747 (egid != (gid_t) -1 && egid != old->gid))
68748 new->sgid = new->egid;
68749 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68750 old = current_cred();
68751
68752 retval = -EPERM;
68753 +
68754 + if (gr_check_group_change(gid, gid, gid))
68755 + goto error;
68756 +
68757 if (nsown_capable(CAP_SETGID))
68758 new->gid = new->egid = new->sgid = new->fsgid = gid;
68759 else if (gid == old->gid || gid == old->sgid)
68760 @@ -618,7 +631,7 @@ error:
68761 /*
68762 * change the user struct in a credentials set to match the new UID
68763 */
68764 -static int set_user(struct cred *new)
68765 +int set_user(struct cred *new)
68766 {
68767 struct user_struct *new_user;
68768
68769 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68770 goto error;
68771 }
68772
68773 + if (gr_check_user_change(new->uid, new->euid, -1))
68774 + goto error;
68775 +
68776 if (new->uid != old->uid) {
68777 retval = set_user(new);
68778 if (retval < 0)
68779 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68780 old = current_cred();
68781
68782 retval = -EPERM;
68783 +
68784 + if (gr_check_crash_uid(uid))
68785 + goto error;
68786 + if (gr_check_user_change(uid, uid, uid))
68787 + goto error;
68788 +
68789 if (nsown_capable(CAP_SETUID)) {
68790 new->suid = new->uid = uid;
68791 if (uid != old->uid) {
68792 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68793 goto error;
68794 }
68795
68796 + if (gr_check_user_change(ruid, euid, -1))
68797 + goto error;
68798 +
68799 if (ruid != (uid_t) -1) {
68800 new->uid = ruid;
68801 if (ruid != old->uid) {
68802 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68803 goto error;
68804 }
68805
68806 + if (gr_check_group_change(rgid, egid, -1))
68807 + goto error;
68808 +
68809 if (rgid != (gid_t) -1)
68810 new->gid = rgid;
68811 if (egid != (gid_t) -1)
68812 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68813 old = current_cred();
68814 old_fsuid = old->fsuid;
68815
68816 + if (gr_check_user_change(-1, -1, uid))
68817 + goto error;
68818 +
68819 if (uid == old->uid || uid == old->euid ||
68820 uid == old->suid || uid == old->fsuid ||
68821 nsown_capable(CAP_SETUID)) {
68822 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68823 }
68824 }
68825
68826 +error:
68827 abort_creds(new);
68828 return old_fsuid;
68829
68830 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68831 if (gid == old->gid || gid == old->egid ||
68832 gid == old->sgid || gid == old->fsgid ||
68833 nsown_capable(CAP_SETGID)) {
68834 + if (gr_check_group_change(-1, -1, gid))
68835 + goto error;
68836 +
68837 if (gid != old_fsgid) {
68838 new->fsgid = gid;
68839 goto change_okay;
68840 }
68841 }
68842
68843 +error:
68844 abort_creds(new);
68845 return old_fsgid;
68846
68847 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
68848 }
68849 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68850 snprintf(buf, len, "2.6.%u%s", v, rest);
68851 - ret = copy_to_user(release, buf, len);
68852 + if (len > sizeof(buf))
68853 + ret = -EFAULT;
68854 + else
68855 + ret = copy_to_user(release, buf, len);
68856 }
68857 return ret;
68858 }
68859 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68860 return -EFAULT;
68861
68862 down_read(&uts_sem);
68863 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
68864 + error = __copy_to_user(name->sysname, &utsname()->sysname,
68865 __OLD_UTS_LEN);
68866 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68867 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68868 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
68869 __OLD_UTS_LEN);
68870 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68871 - error |= __copy_to_user(&name->release, &utsname()->release,
68872 + error |= __copy_to_user(name->release, &utsname()->release,
68873 __OLD_UTS_LEN);
68874 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68875 - error |= __copy_to_user(&name->version, &utsname()->version,
68876 + error |= __copy_to_user(name->version, &utsname()->version,
68877 __OLD_UTS_LEN);
68878 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68879 - error |= __copy_to_user(&name->machine, &utsname()->machine,
68880 + error |= __copy_to_user(name->machine, &utsname()->machine,
68881 __OLD_UTS_LEN);
68882 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68883 up_read(&uts_sem);
68884 @@ -1838,7 +1877,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68885 error = get_dumpable(me->mm);
68886 break;
68887 case PR_SET_DUMPABLE:
68888 - if (arg2 < 0 || arg2 > 1) {
68889 + if (arg2 > 1) {
68890 error = -EINVAL;
68891 break;
68892 }
68893 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68894 index f03a6ef..5fcc8af 100644
68895 --- a/kernel/sysctl.c
68896 +++ b/kernel/sysctl.c
68897 @@ -86,6 +86,13 @@
68898
68899
68900 #if defined(CONFIG_SYSCTL)
68901 +#include <linux/grsecurity.h>
68902 +#include <linux/grinternal.h>
68903 +
68904 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
68905 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
68906 + const int op);
68907 +extern int gr_handle_chroot_sysctl(const int op);
68908
68909 /* External variables not in a header file. */
68910 extern int sysctl_overcommit_memory;
68911 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68912 }
68913
68914 #endif
68915 +extern struct ctl_table grsecurity_table[];
68916
68917 static struct ctl_table root_table[];
68918 static struct ctl_table_root sysctl_table_root;
68919 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
68920 int sysctl_legacy_va_layout;
68921 #endif
68922
68923 +#ifdef CONFIG_PAX_SOFTMODE
68924 +static ctl_table pax_table[] = {
68925 + {
68926 + .procname = "softmode",
68927 + .data = &pax_softmode,
68928 + .maxlen = sizeof(unsigned int),
68929 + .mode = 0600,
68930 + .proc_handler = &proc_dointvec,
68931 + },
68932 +
68933 + { }
68934 +};
68935 +#endif
68936 +
68937 /* The default sysctl tables: */
68938
68939 static struct ctl_table root_table[] = {
68940 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
68941 #endif
68942
68943 static struct ctl_table kern_table[] = {
68944 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68945 + {
68946 + .procname = "grsecurity",
68947 + .mode = 0500,
68948 + .child = grsecurity_table,
68949 + },
68950 +#endif
68951 +
68952 +#ifdef CONFIG_PAX_SOFTMODE
68953 + {
68954 + .procname = "pax",
68955 + .mode = 0500,
68956 + .child = pax_table,
68957 + },
68958 +#endif
68959 +
68960 {
68961 .procname = "sched_child_runs_first",
68962 .data = &sysctl_sched_child_runs_first,
68963 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
68964 .data = &modprobe_path,
68965 .maxlen = KMOD_PATH_LEN,
68966 .mode = 0644,
68967 - .proc_handler = proc_dostring,
68968 + .proc_handler = proc_dostring_modpriv,
68969 },
68970 {
68971 .procname = "modules_disabled",
68972 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
68973 .extra1 = &zero,
68974 .extra2 = &one,
68975 },
68976 +#endif
68977 {
68978 .procname = "kptr_restrict",
68979 .data = &kptr_restrict,
68980 .maxlen = sizeof(int),
68981 .mode = 0644,
68982 .proc_handler = proc_dointvec_minmax_sysadmin,
68983 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68984 + .extra1 = &two,
68985 +#else
68986 .extra1 = &zero,
68987 +#endif
68988 .extra2 = &two,
68989 },
68990 -#endif
68991 {
68992 .procname = "ngroups_max",
68993 .data = &ngroups_max,
68994 @@ -1225,6 +1267,13 @@ static struct ctl_table vm_table[] = {
68995 .proc_handler = proc_dointvec_minmax,
68996 .extra1 = &zero,
68997 },
68998 + {
68999 + .procname = "heap_stack_gap",
69000 + .data = &sysctl_heap_stack_gap,
69001 + .maxlen = sizeof(sysctl_heap_stack_gap),
69002 + .mode = 0644,
69003 + .proc_handler = proc_doulongvec_minmax,
69004 + },
69005 #else
69006 {
69007 .procname = "nr_trim_pages",
69008 @@ -1729,6 +1778,17 @@ static int test_perm(int mode, int op)
69009 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
69010 {
69011 int mode;
69012 + int error;
69013 +
69014 + if (table->parent != NULL && table->parent->procname != NULL &&
69015 + table->procname != NULL &&
69016 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
69017 + return -EACCES;
69018 + if (gr_handle_chroot_sysctl(op))
69019 + return -EACCES;
69020 + error = gr_handle_sysctl(table, op);
69021 + if (error)
69022 + return error;
69023
69024 if (root->permissions)
69025 mode = root->permissions(root, current->nsproxy, table);
69026 @@ -2133,6 +2193,16 @@ int proc_dostring(struct ctl_table *table, int write,
69027 buffer, lenp, ppos);
69028 }
69029
69030 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69031 + void __user *buffer, size_t *lenp, loff_t *ppos)
69032 +{
69033 + if (write && !capable(CAP_SYS_MODULE))
69034 + return -EPERM;
69035 +
69036 + return _proc_do_string(table->data, table->maxlen, write,
69037 + buffer, lenp, ppos);
69038 +}
69039 +
69040 static size_t proc_skip_spaces(char **buf)
69041 {
69042 size_t ret;
69043 @@ -2238,6 +2308,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
69044 len = strlen(tmp);
69045 if (len > *size)
69046 len = *size;
69047 + if (len > sizeof(tmp))
69048 + len = sizeof(tmp);
69049 if (copy_to_user(*buf, tmp, len))
69050 return -EFAULT;
69051 *size -= len;
69052 @@ -2554,8 +2626,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
69053 *i = val;
69054 } else {
69055 val = convdiv * (*i) / convmul;
69056 - if (!first)
69057 + if (!first) {
69058 err = proc_put_char(&buffer, &left, '\t');
69059 + if (err)
69060 + break;
69061 + }
69062 err = proc_put_long(&buffer, &left, val, false);
69063 if (err)
69064 break;
69065 @@ -2950,6 +3025,12 @@ int proc_dostring(struct ctl_table *table, int write,
69066 return -ENOSYS;
69067 }
69068
69069 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69070 + void __user *buffer, size_t *lenp, loff_t *ppos)
69071 +{
69072 + return -ENOSYS;
69073 +}
69074 +
69075 int proc_dointvec(struct ctl_table *table, int write,
69076 void __user *buffer, size_t *lenp, loff_t *ppos)
69077 {
69078 @@ -3006,6 +3087,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
69079 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69080 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69081 EXPORT_SYMBOL(proc_dostring);
69082 +EXPORT_SYMBOL(proc_dostring_modpriv);
69083 EXPORT_SYMBOL(proc_doulongvec_minmax);
69084 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
69085 EXPORT_SYMBOL(register_sysctl_table);
69086 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
69087 index a650694..aaeeb20 100644
69088 --- a/kernel/sysctl_binary.c
69089 +++ b/kernel/sysctl_binary.c
69090 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69091 int i;
69092
69093 set_fs(KERNEL_DS);
69094 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69095 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69096 set_fs(old_fs);
69097 if (result < 0)
69098 goto out_kfree;
69099 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69100 }
69101
69102 set_fs(KERNEL_DS);
69103 - result = vfs_write(file, buffer, str - buffer, &pos);
69104 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69105 set_fs(old_fs);
69106 if (result < 0)
69107 goto out_kfree;
69108 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69109 int i;
69110
69111 set_fs(KERNEL_DS);
69112 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69113 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69114 set_fs(old_fs);
69115 if (result < 0)
69116 goto out_kfree;
69117 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69118 }
69119
69120 set_fs(KERNEL_DS);
69121 - result = vfs_write(file, buffer, str - buffer, &pos);
69122 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69123 set_fs(old_fs);
69124 if (result < 0)
69125 goto out_kfree;
69126 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69127 int i;
69128
69129 set_fs(KERNEL_DS);
69130 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69131 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69132 set_fs(old_fs);
69133 if (result < 0)
69134 goto out;
69135 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69136 __le16 dnaddr;
69137
69138 set_fs(KERNEL_DS);
69139 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69140 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69141 set_fs(old_fs);
69142 if (result < 0)
69143 goto out;
69144 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69145 le16_to_cpu(dnaddr) & 0x3ff);
69146
69147 set_fs(KERNEL_DS);
69148 - result = vfs_write(file, buf, len, &pos);
69149 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69150 set_fs(old_fs);
69151 if (result < 0)
69152 goto out;
69153 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
69154 index 362da65..ab8ef8c 100644
69155 --- a/kernel/sysctl_check.c
69156 +++ b/kernel/sysctl_check.c
69157 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
69158 set_fail(&fail, table, "Directory with extra2");
69159 } else {
69160 if ((table->proc_handler == proc_dostring) ||
69161 + (table->proc_handler == proc_dostring_modpriv) ||
69162 (table->proc_handler == proc_dointvec) ||
69163 (table->proc_handler == proc_dointvec_minmax) ||
69164 (table->proc_handler == proc_dointvec_jiffies) ||
69165 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69166 index e660464..c8b9e67 100644
69167 --- a/kernel/taskstats.c
69168 +++ b/kernel/taskstats.c
69169 @@ -27,9 +27,12 @@
69170 #include <linux/cgroup.h>
69171 #include <linux/fs.h>
69172 #include <linux/file.h>
69173 +#include <linux/grsecurity.h>
69174 #include <net/genetlink.h>
69175 #include <linux/atomic.h>
69176
69177 +extern int gr_is_taskstats_denied(int pid);
69178 +
69179 /*
69180 * Maximum length of a cpumask that can be specified in
69181 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
69182 @@ -556,6 +559,9 @@ err:
69183
69184 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69185 {
69186 + if (gr_is_taskstats_denied(current->pid))
69187 + return -EACCES;
69188 +
69189 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69190 return cmd_attr_register_cpumask(info);
69191 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
69192 diff --git a/kernel/time.c b/kernel/time.c
69193 index 73e416d..cfc6f69 100644
69194 --- a/kernel/time.c
69195 +++ b/kernel/time.c
69196 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69197 return error;
69198
69199 if (tz) {
69200 + /* we log in do_settimeofday called below, so don't log twice
69201 + */
69202 + if (!tv)
69203 + gr_log_timechange();
69204 +
69205 /* SMP safe, global irq locking makes it work. */
69206 sys_tz = *tz;
69207 update_vsyscall_tz();
69208 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69209 index 8a46f5d..bbe6f9c 100644
69210 --- a/kernel/time/alarmtimer.c
69211 +++ b/kernel/time/alarmtimer.c
69212 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
69213 struct platform_device *pdev;
69214 int error = 0;
69215 int i;
69216 - struct k_clock alarm_clock = {
69217 + static struct k_clock alarm_clock = {
69218 .clock_getres = alarm_clock_getres,
69219 .clock_get = alarm_clock_get,
69220 .timer_create = alarm_timer_create,
69221 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69222 index fd4a7b1..fae5c2a 100644
69223 --- a/kernel/time/tick-broadcast.c
69224 +++ b/kernel/time/tick-broadcast.c
69225 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
69226 * then clear the broadcast bit.
69227 */
69228 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69229 - int cpu = smp_processor_id();
69230 + cpu = smp_processor_id();
69231
69232 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69233 tick_broadcast_clear_oneshot(cpu);
69234 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69235 index 0c63581..e25dcb6 100644
69236 --- a/kernel/time/timekeeping.c
69237 +++ b/kernel/time/timekeeping.c
69238 @@ -14,6 +14,7 @@
69239 #include <linux/init.h>
69240 #include <linux/mm.h>
69241 #include <linux/sched.h>
69242 +#include <linux/grsecurity.h>
69243 #include <linux/syscore_ops.h>
69244 #include <linux/clocksource.h>
69245 #include <linux/jiffies.h>
69246 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
69247 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69248 return -EINVAL;
69249
69250 + gr_log_timechange();
69251 +
69252 write_seqlock_irqsave(&xtime_lock, flags);
69253
69254 timekeeping_forward_now();
69255 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69256 index 3258455..f35227d 100644
69257 --- a/kernel/time/timer_list.c
69258 +++ b/kernel/time/timer_list.c
69259 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69260
69261 static void print_name_offset(struct seq_file *m, void *sym)
69262 {
69263 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69264 + SEQ_printf(m, "<%p>", NULL);
69265 +#else
69266 char symname[KSYM_NAME_LEN];
69267
69268 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69269 SEQ_printf(m, "<%pK>", sym);
69270 else
69271 SEQ_printf(m, "%s", symname);
69272 +#endif
69273 }
69274
69275 static void
69276 @@ -112,7 +116,11 @@ next_one:
69277 static void
69278 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69279 {
69280 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69281 + SEQ_printf(m, " .base: %p\n", NULL);
69282 +#else
69283 SEQ_printf(m, " .base: %pK\n", base);
69284 +#endif
69285 SEQ_printf(m, " .index: %d\n",
69286 base->index);
69287 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69288 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69289 {
69290 struct proc_dir_entry *pe;
69291
69292 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69293 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69294 +#else
69295 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69296 +#endif
69297 if (!pe)
69298 return -ENOMEM;
69299 return 0;
69300 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69301 index 0b537f2..9e71eca 100644
69302 --- a/kernel/time/timer_stats.c
69303 +++ b/kernel/time/timer_stats.c
69304 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69305 static unsigned long nr_entries;
69306 static struct entry entries[MAX_ENTRIES];
69307
69308 -static atomic_t overflow_count;
69309 +static atomic_unchecked_t overflow_count;
69310
69311 /*
69312 * The entries are in a hash-table, for fast lookup:
69313 @@ -140,7 +140,7 @@ static void reset_entries(void)
69314 nr_entries = 0;
69315 memset(entries, 0, sizeof(entries));
69316 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69317 - atomic_set(&overflow_count, 0);
69318 + atomic_set_unchecked(&overflow_count, 0);
69319 }
69320
69321 static struct entry *alloc_entry(void)
69322 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69323 if (likely(entry))
69324 entry->count++;
69325 else
69326 - atomic_inc(&overflow_count);
69327 + atomic_inc_unchecked(&overflow_count);
69328
69329 out_unlock:
69330 raw_spin_unlock_irqrestore(lock, flags);
69331 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69332
69333 static void print_name_offset(struct seq_file *m, unsigned long addr)
69334 {
69335 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69336 + seq_printf(m, "<%p>", NULL);
69337 +#else
69338 char symname[KSYM_NAME_LEN];
69339
69340 if (lookup_symbol_name(addr, symname) < 0)
69341 seq_printf(m, "<%p>", (void *)addr);
69342 else
69343 seq_printf(m, "%s", symname);
69344 +#endif
69345 }
69346
69347 static int tstats_show(struct seq_file *m, void *v)
69348 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
69349
69350 seq_puts(m, "Timer Stats Version: v0.2\n");
69351 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69352 - if (atomic_read(&overflow_count))
69353 + if (atomic_read_unchecked(&overflow_count))
69354 seq_printf(m, "Overflow: %d entries\n",
69355 - atomic_read(&overflow_count));
69356 + atomic_read_unchecked(&overflow_count));
69357
69358 for (i = 0; i < nr_entries; i++) {
69359 entry = entries + i;
69360 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
69361 {
69362 struct proc_dir_entry *pe;
69363
69364 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69365 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69366 +#else
69367 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69368 +#endif
69369 if (!pe)
69370 return -ENOMEM;
69371 return 0;
69372 diff --git a/kernel/timer.c b/kernel/timer.c
69373 index a297ffc..5e16b0b 100644
69374 --- a/kernel/timer.c
69375 +++ b/kernel/timer.c
69376 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
69377 /*
69378 * This function runs timers and the timer-tq in bottom half context.
69379 */
69380 -static void run_timer_softirq(struct softirq_action *h)
69381 +static void run_timer_softirq(void)
69382 {
69383 struct tvec_base *base = __this_cpu_read(tvec_bases);
69384
69385 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69386 index cdea7b5..9b820d4 100644
69387 --- a/kernel/trace/blktrace.c
69388 +++ b/kernel/trace/blktrace.c
69389 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
69390 struct blk_trace *bt = filp->private_data;
69391 char buf[16];
69392
69393 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69394 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69395
69396 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69397 }
69398 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
69399 return 1;
69400
69401 bt = buf->chan->private_data;
69402 - atomic_inc(&bt->dropped);
69403 + atomic_inc_unchecked(&bt->dropped);
69404 return 0;
69405 }
69406
69407 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
69408
69409 bt->dir = dir;
69410 bt->dev = dev;
69411 - atomic_set(&bt->dropped, 0);
69412 + atomic_set_unchecked(&bt->dropped, 0);
69413
69414 ret = -EIO;
69415 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
69416 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69417 index 683d559..d70d914 100644
69418 --- a/kernel/trace/ftrace.c
69419 +++ b/kernel/trace/ftrace.c
69420 @@ -1726,12 +1726,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
69421 if (unlikely(ftrace_disabled))
69422 return 0;
69423
69424 + ret = ftrace_arch_code_modify_prepare();
69425 + FTRACE_WARN_ON(ret);
69426 + if (ret)
69427 + return 0;
69428 +
69429 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69430 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69431 if (ret) {
69432 ftrace_bug(ret, ip);
69433 - return 0;
69434 }
69435 - return 1;
69436 + return ret ? 0 : 1;
69437 }
69438
69439 /*
69440 @@ -2843,7 +2848,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
69441
69442 int
69443 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69444 - void *data)
69445 + void *data)
69446 {
69447 struct ftrace_func_probe *entry;
69448 struct ftrace_page *pg;
69449 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69450 index c4579f1..6a439da 100644
69451 --- a/kernel/trace/trace.c
69452 +++ b/kernel/trace/trace.c
69453 @@ -4258,10 +4258,9 @@ static const struct file_operations tracing_dyn_info_fops = {
69454 };
69455 #endif
69456
69457 -static struct dentry *d_tracer;
69458 -
69459 struct dentry *tracing_init_dentry(void)
69460 {
69461 + static struct dentry *d_tracer;
69462 static int once;
69463
69464 if (d_tracer)
69465 @@ -4281,10 +4280,9 @@ struct dentry *tracing_init_dentry(void)
69466 return d_tracer;
69467 }
69468
69469 -static struct dentry *d_percpu;
69470 -
69471 struct dentry *tracing_dentry_percpu(void)
69472 {
69473 + static struct dentry *d_percpu;
69474 static int once;
69475 struct dentry *d_tracer;
69476
69477 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69478 index c212a7f..7b02394 100644
69479 --- a/kernel/trace/trace_events.c
69480 +++ b/kernel/trace/trace_events.c
69481 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
69482 struct ftrace_module_file_ops {
69483 struct list_head list;
69484 struct module *mod;
69485 - struct file_operations id;
69486 - struct file_operations enable;
69487 - struct file_operations format;
69488 - struct file_operations filter;
69489 };
69490
69491 static struct ftrace_module_file_ops *
69492 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
69493
69494 file_ops->mod = mod;
69495
69496 - file_ops->id = ftrace_event_id_fops;
69497 - file_ops->id.owner = mod;
69498 -
69499 - file_ops->enable = ftrace_enable_fops;
69500 - file_ops->enable.owner = mod;
69501 -
69502 - file_ops->filter = ftrace_event_filter_fops;
69503 - file_ops->filter.owner = mod;
69504 -
69505 - file_ops->format = ftrace_event_format_fops;
69506 - file_ops->format.owner = mod;
69507 + pax_open_kernel();
69508 + *(void **)&mod->trace_id.owner = mod;
69509 + *(void **)&mod->trace_enable.owner = mod;
69510 + *(void **)&mod->trace_filter.owner = mod;
69511 + *(void **)&mod->trace_format.owner = mod;
69512 + pax_close_kernel();
69513
69514 list_add(&file_ops->list, &ftrace_module_file_list);
69515
69516 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
69517
69518 for_each_event(call, start, end) {
69519 __trace_add_event_call(*call, mod,
69520 - &file_ops->id, &file_ops->enable,
69521 - &file_ops->filter, &file_ops->format);
69522 + &mod->trace_id, &mod->trace_enable,
69523 + &mod->trace_filter, &mod->trace_format);
69524 }
69525 }
69526
69527 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69528 index 00d527c..7c5b1a3 100644
69529 --- a/kernel/trace/trace_kprobe.c
69530 +++ b/kernel/trace/trace_kprobe.c
69531 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69532 long ret;
69533 int maxlen = get_rloc_len(*(u32 *)dest);
69534 u8 *dst = get_rloc_data(dest);
69535 - u8 *src = addr;
69536 + const u8 __user *src = (const u8 __force_user *)addr;
69537 mm_segment_t old_fs = get_fs();
69538 if (!maxlen)
69539 return;
69540 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69541 pagefault_disable();
69542 do
69543 ret = __copy_from_user_inatomic(dst++, src++, 1);
69544 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69545 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69546 dst[-1] = '\0';
69547 pagefault_enable();
69548 set_fs(old_fs);
69549 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69550 ((u8 *)get_rloc_data(dest))[0] = '\0';
69551 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69552 } else
69553 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69554 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69555 get_rloc_offs(*(u32 *)dest));
69556 }
69557 /* Return the length of string -- including null terminal byte */
69558 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69559 set_fs(KERNEL_DS);
69560 pagefault_disable();
69561 do {
69562 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69563 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69564 len++;
69565 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69566 pagefault_enable();
69567 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69568 index fd3c8aa..5f324a6 100644
69569 --- a/kernel/trace/trace_mmiotrace.c
69570 +++ b/kernel/trace/trace_mmiotrace.c
69571 @@ -24,7 +24,7 @@ struct header_iter {
69572 static struct trace_array *mmio_trace_array;
69573 static bool overrun_detected;
69574 static unsigned long prev_overruns;
69575 -static atomic_t dropped_count;
69576 +static atomic_unchecked_t dropped_count;
69577
69578 static void mmio_reset_data(struct trace_array *tr)
69579 {
69580 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69581
69582 static unsigned long count_overruns(struct trace_iterator *iter)
69583 {
69584 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
69585 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69586 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69587
69588 if (over > prev_overruns)
69589 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69590 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69591 sizeof(*entry), 0, pc);
69592 if (!event) {
69593 - atomic_inc(&dropped_count);
69594 + atomic_inc_unchecked(&dropped_count);
69595 return;
69596 }
69597 entry = ring_buffer_event_data(event);
69598 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69599 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69600 sizeof(*entry), 0, pc);
69601 if (!event) {
69602 - atomic_inc(&dropped_count);
69603 + atomic_inc_unchecked(&dropped_count);
69604 return;
69605 }
69606 entry = ring_buffer_event_data(event);
69607 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69608 index d9c07f0..c1eeceb 100644
69609 --- a/kernel/trace/trace_output.c
69610 +++ b/kernel/trace/trace_output.c
69611 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
69612
69613 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69614 if (!IS_ERR(p)) {
69615 - p = mangle_path(s->buffer + s->len, p, "\n");
69616 + p = mangle_path(s->buffer + s->len, p, "\n\\");
69617 if (p) {
69618 s->len = p - s->buffer;
69619 return 1;
69620 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69621 index d4545f4..a9010a1 100644
69622 --- a/kernel/trace/trace_stack.c
69623 +++ b/kernel/trace/trace_stack.c
69624 @@ -53,7 +53,7 @@ static inline void check_stack(void)
69625 return;
69626
69627 /* we do not handle interrupt stacks yet */
69628 - if (!object_is_on_stack(&this_size))
69629 + if (!object_starts_on_stack(&this_size))
69630 return;
69631
69632 local_irq_save(flags);
69633 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69634 index 209b379..7f76423 100644
69635 --- a/kernel/trace/trace_workqueue.c
69636 +++ b/kernel/trace/trace_workqueue.c
69637 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69638 int cpu;
69639 pid_t pid;
69640 /* Can be inserted from interrupt or user context, need to be atomic */
69641 - atomic_t inserted;
69642 + atomic_unchecked_t inserted;
69643 /*
69644 * Don't need to be atomic, works are serialized in a single workqueue thread
69645 * on a single CPU.
69646 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69647 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69648 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69649 if (node->pid == wq_thread->pid) {
69650 - atomic_inc(&node->inserted);
69651 + atomic_inc_unchecked(&node->inserted);
69652 goto found;
69653 }
69654 }
69655 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69656 tsk = get_pid_task(pid, PIDTYPE_PID);
69657 if (tsk) {
69658 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69659 - atomic_read(&cws->inserted), cws->executed,
69660 + atomic_read_unchecked(&cws->inserted), cws->executed,
69661 tsk->comm);
69662 put_task_struct(tsk);
69663 }
69664 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69665 index 8745ac7..d144e37 100644
69666 --- a/lib/Kconfig.debug
69667 +++ b/lib/Kconfig.debug
69668 @@ -1103,6 +1103,7 @@ config LATENCYTOP
69669 depends on DEBUG_KERNEL
69670 depends on STACKTRACE_SUPPORT
69671 depends on PROC_FS
69672 + depends on !GRKERNSEC_HIDESYM
69673 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69674 select KALLSYMS
69675 select KALLSYMS_ALL
69676 diff --git a/lib/bitmap.c b/lib/bitmap.c
69677 index 0d4a127..33a06c7 100644
69678 --- a/lib/bitmap.c
69679 +++ b/lib/bitmap.c
69680 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69681 {
69682 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69683 u32 chunk;
69684 - const char __user __force *ubuf = (const char __user __force *)buf;
69685 + const char __user *ubuf = (const char __force_user *)buf;
69686
69687 bitmap_zero(maskp, nmaskbits);
69688
69689 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
69690 {
69691 if (!access_ok(VERIFY_READ, ubuf, ulen))
69692 return -EFAULT;
69693 - return __bitmap_parse((const char __force *)ubuf,
69694 + return __bitmap_parse((const char __force_kernel *)ubuf,
69695 ulen, 1, maskp, nmaskbits);
69696
69697 }
69698 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69699 {
69700 unsigned a, b;
69701 int c, old_c, totaldigits;
69702 - const char __user __force *ubuf = (const char __user __force *)buf;
69703 + const char __user *ubuf = (const char __force_user *)buf;
69704 int exp_digit, in_range;
69705
69706 totaldigits = c = 0;
69707 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69708 {
69709 if (!access_ok(VERIFY_READ, ubuf, ulen))
69710 return -EFAULT;
69711 - return __bitmap_parselist((const char __force *)ubuf,
69712 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69713 ulen, 1, maskp, nmaskbits);
69714 }
69715 EXPORT_SYMBOL(bitmap_parselist_user);
69716 diff --git a/lib/bug.c b/lib/bug.c
69717 index a28c141..2bd3d95 100644
69718 --- a/lib/bug.c
69719 +++ b/lib/bug.c
69720 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69721 return BUG_TRAP_TYPE_NONE;
69722
69723 bug = find_bug(bugaddr);
69724 + if (!bug)
69725 + return BUG_TRAP_TYPE_NONE;
69726
69727 file = NULL;
69728 line = 0;
69729 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69730 index 0ab9ae8..f01ceca 100644
69731 --- a/lib/debugobjects.c
69732 +++ b/lib/debugobjects.c
69733 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69734 if (limit > 4)
69735 return;
69736
69737 - is_on_stack = object_is_on_stack(addr);
69738 + is_on_stack = object_starts_on_stack(addr);
69739 if (is_on_stack == onstack)
69740 return;
69741
69742 diff --git a/lib/devres.c b/lib/devres.c
69743 index 9676617..5149e15 100644
69744 --- a/lib/devres.c
69745 +++ b/lib/devres.c
69746 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69747 void devm_iounmap(struct device *dev, void __iomem *addr)
69748 {
69749 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69750 - (void *)addr));
69751 + (void __force *)addr));
69752 iounmap(addr);
69753 }
69754 EXPORT_SYMBOL(devm_iounmap);
69755 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69756 {
69757 ioport_unmap(addr);
69758 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69759 - devm_ioport_map_match, (void *)addr));
69760 + devm_ioport_map_match, (void __force *)addr));
69761 }
69762 EXPORT_SYMBOL(devm_ioport_unmap);
69763
69764 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69765 index fea790a..ebb0e82 100644
69766 --- a/lib/dma-debug.c
69767 +++ b/lib/dma-debug.c
69768 @@ -925,7 +925,7 @@ out:
69769
69770 static void check_for_stack(struct device *dev, void *addr)
69771 {
69772 - if (object_is_on_stack(addr))
69773 + if (object_starts_on_stack(addr))
69774 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69775 "stack [addr=%p]\n", addr);
69776 }
69777 diff --git a/lib/extable.c b/lib/extable.c
69778 index 4cac81e..63e9b8f 100644
69779 --- a/lib/extable.c
69780 +++ b/lib/extable.c
69781 @@ -13,6 +13,7 @@
69782 #include <linux/init.h>
69783 #include <linux/sort.h>
69784 #include <asm/uaccess.h>
69785 +#include <asm/pgtable.h>
69786
69787 #ifndef ARCH_HAS_SORT_EXTABLE
69788 /*
69789 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69790 void sort_extable(struct exception_table_entry *start,
69791 struct exception_table_entry *finish)
69792 {
69793 + pax_open_kernel();
69794 sort(start, finish - start, sizeof(struct exception_table_entry),
69795 cmp_ex, NULL);
69796 + pax_close_kernel();
69797 }
69798
69799 #ifdef CONFIG_MODULES
69800 diff --git a/lib/inflate.c b/lib/inflate.c
69801 index 013a761..c28f3fc 100644
69802 --- a/lib/inflate.c
69803 +++ b/lib/inflate.c
69804 @@ -269,7 +269,7 @@ static void free(void *where)
69805 malloc_ptr = free_mem_ptr;
69806 }
69807 #else
69808 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69809 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69810 #define free(a) kfree(a)
69811 #endif
69812
69813 diff --git a/lib/ioremap.c b/lib/ioremap.c
69814 index da4e2ad..6373b5f 100644
69815 --- a/lib/ioremap.c
69816 +++ b/lib/ioremap.c
69817 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
69818 unsigned long next;
69819
69820 phys_addr -= addr;
69821 - pmd = pmd_alloc(&init_mm, pud, addr);
69822 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
69823 if (!pmd)
69824 return -ENOMEM;
69825 do {
69826 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
69827 unsigned long next;
69828
69829 phys_addr -= addr;
69830 - pud = pud_alloc(&init_mm, pgd, addr);
69831 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
69832 if (!pud)
69833 return -ENOMEM;
69834 do {
69835 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69836 index bd2bea9..6b3c95e 100644
69837 --- a/lib/is_single_threaded.c
69838 +++ b/lib/is_single_threaded.c
69839 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69840 struct task_struct *p, *t;
69841 bool ret;
69842
69843 + if (!mm)
69844 + return true;
69845 +
69846 if (atomic_read(&task->signal->live) != 1)
69847 return false;
69848
69849 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69850 index dc63d08..95ae14a 100644
69851 --- a/lib/radix-tree.c
69852 +++ b/lib/radix-tree.c
69853 @@ -78,7 +78,7 @@ struct radix_tree_preload {
69854 int nr;
69855 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69856 };
69857 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69858 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69859
69860 static inline void *ptr_to_indirect(void *ptr)
69861 {
69862 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69863 index 38e612e..4fb99a8 100644
69864 --- a/lib/vsprintf.c
69865 +++ b/lib/vsprintf.c
69866 @@ -16,6 +16,9 @@
69867 * - scnprintf and vscnprintf
69868 */
69869
69870 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69871 +#define __INCLUDED_BY_HIDESYM 1
69872 +#endif
69873 #include <stdarg.h>
69874 #include <linux/module.h>
69875 #include <linux/types.h>
69876 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69877 char sym[KSYM_SYMBOL_LEN];
69878 if (ext == 'B')
69879 sprint_backtrace(sym, value);
69880 - else if (ext != 'f' && ext != 's')
69881 + else if (ext != 'f' && ext != 's' && ext != 'a')
69882 sprint_symbol(sym, value);
69883 else
69884 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69885 @@ -789,7 +792,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
69886 return number(buf, end, *(const netdev_features_t *)addr, spec);
69887 }
69888
69889 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69890 +int kptr_restrict __read_mostly = 2;
69891 +#else
69892 int kptr_restrict __read_mostly;
69893 +#endif
69894
69895 /*
69896 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69897 @@ -803,6 +810,8 @@ int kptr_restrict __read_mostly;
69898 * - 'S' For symbolic direct pointers with offset
69899 * - 's' For symbolic direct pointers without offset
69900 * - 'B' For backtraced symbolic direct pointers with offset
69901 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69902 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69903 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69904 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69905 * - 'M' For a 6-byte MAC address, it prints the address in the
69906 @@ -848,12 +857,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69907 {
69908 if (!ptr && *fmt != 'K') {
69909 /*
69910 - * Print (null) with the same width as a pointer so it makes
69911 + * Print (nil) with the same width as a pointer so it makes
69912 * tabular output look nice.
69913 */
69914 if (spec.field_width == -1)
69915 spec.field_width = 2 * sizeof(void *);
69916 - return string(buf, end, "(null)", spec);
69917 + return string(buf, end, "(nil)", spec);
69918 }
69919
69920 switch (*fmt) {
69921 @@ -863,6 +872,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69922 /* Fallthrough */
69923 case 'S':
69924 case 's':
69925 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69926 + break;
69927 +#else
69928 + return symbol_string(buf, end, ptr, spec, *fmt);
69929 +#endif
69930 + case 'A':
69931 + case 'a':
69932 case 'B':
69933 return symbol_string(buf, end, ptr, spec, *fmt);
69934 case 'R':
69935 @@ -1633,11 +1649,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69936 typeof(type) value; \
69937 if (sizeof(type) == 8) { \
69938 args = PTR_ALIGN(args, sizeof(u32)); \
69939 - *(u32 *)&value = *(u32 *)args; \
69940 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69941 + *(u32 *)&value = *(const u32 *)args; \
69942 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69943 } else { \
69944 args = PTR_ALIGN(args, sizeof(type)); \
69945 - value = *(typeof(type) *)args; \
69946 + value = *(const typeof(type) *)args; \
69947 } \
69948 args += sizeof(type); \
69949 value; \
69950 @@ -1700,7 +1716,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69951 case FORMAT_TYPE_STR: {
69952 const char *str_arg = args;
69953 args += strlen(str_arg) + 1;
69954 - str = string(str, end, (char *)str_arg, spec);
69955 + str = string(str, end, str_arg, spec);
69956 break;
69957 }
69958
69959 diff --git a/localversion-grsec b/localversion-grsec
69960 new file mode 100644
69961 index 0000000..7cd6065
69962 --- /dev/null
69963 +++ b/localversion-grsec
69964 @@ -0,0 +1 @@
69965 +-grsec
69966 diff --git a/mm/Kconfig b/mm/Kconfig
69967 index e338407..49b5b7a 100644
69968 --- a/mm/Kconfig
69969 +++ b/mm/Kconfig
69970 @@ -247,10 +247,10 @@ config KSM
69971 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69972
69973 config DEFAULT_MMAP_MIN_ADDR
69974 - int "Low address space to protect from user allocation"
69975 + int "Low address space to protect from user allocation"
69976 depends on MMU
69977 - default 4096
69978 - help
69979 + default 65536
69980 + help
69981 This is the portion of low virtual memory which should be protected
69982 from userspace allocation. Keeping a user from writing to low pages
69983 can help reduce the impact of kernel NULL pointer bugs.
69984 diff --git a/mm/filemap.c b/mm/filemap.c
69985 index b662757..3081ddd 100644
69986 --- a/mm/filemap.c
69987 +++ b/mm/filemap.c
69988 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69989 struct address_space *mapping = file->f_mapping;
69990
69991 if (!mapping->a_ops->readpage)
69992 - return -ENOEXEC;
69993 + return -ENODEV;
69994 file_accessed(file);
69995 vma->vm_ops = &generic_file_vm_ops;
69996 vma->vm_flags |= VM_CAN_NONLINEAR;
69997 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69998 *pos = i_size_read(inode);
69999
70000 if (limit != RLIM_INFINITY) {
70001 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
70002 if (*pos >= limit) {
70003 send_sig(SIGXFSZ, current, 0);
70004 return -EFBIG;
70005 diff --git a/mm/fremap.c b/mm/fremap.c
70006 index 9ed4fd4..c42648d 100644
70007 --- a/mm/fremap.c
70008 +++ b/mm/fremap.c
70009 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
70010 retry:
70011 vma = find_vma(mm, start);
70012
70013 +#ifdef CONFIG_PAX_SEGMEXEC
70014 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
70015 + goto out;
70016 +#endif
70017 +
70018 /*
70019 * Make sure the vma is shared, that it supports prefaulting,
70020 * and that the remapped range is valid and fully within
70021 diff --git a/mm/highmem.c b/mm/highmem.c
70022 index 57d82c6..e9e0552 100644
70023 --- a/mm/highmem.c
70024 +++ b/mm/highmem.c
70025 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
70026 * So no dangers, even with speculative execution.
70027 */
70028 page = pte_page(pkmap_page_table[i]);
70029 + pax_open_kernel();
70030 pte_clear(&init_mm, (unsigned long)page_address(page),
70031 &pkmap_page_table[i]);
70032 -
70033 + pax_close_kernel();
70034 set_page_address(page, NULL);
70035 need_flush = 1;
70036 }
70037 @@ -186,9 +187,11 @@ start:
70038 }
70039 }
70040 vaddr = PKMAP_ADDR(last_pkmap_nr);
70041 +
70042 + pax_open_kernel();
70043 set_pte_at(&init_mm, vaddr,
70044 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
70045 -
70046 + pax_close_kernel();
70047 pkmap_count[last_pkmap_nr] = 1;
70048 set_page_address(page, (void *)vaddr);
70049
70050 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
70051 index 8f7fc39..69bf1e9 100644
70052 --- a/mm/huge_memory.c
70053 +++ b/mm/huge_memory.c
70054 @@ -733,7 +733,7 @@ out:
70055 * run pte_offset_map on the pmd, if an huge pmd could
70056 * materialize from under us from a different thread.
70057 */
70058 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70059 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70060 return VM_FAULT_OOM;
70061 /* if an huge pmd materialized from under us just retry later */
70062 if (unlikely(pmd_trans_huge(*pmd)))
70063 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
70064 index fece520..7fad868 100644
70065 --- a/mm/hugetlb.c
70066 +++ b/mm/hugetlb.c
70067 @@ -2146,6 +2146,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
70068 kref_get(&reservations->refs);
70069 }
70070
70071 +static void resv_map_put(struct vm_area_struct *vma)
70072 +{
70073 + struct resv_map *reservations = vma_resv_map(vma);
70074 +
70075 + if (!reservations)
70076 + return;
70077 + kref_put(&reservations->refs, resv_map_release);
70078 +}
70079 +
70080 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
70081 {
70082 struct hstate *h = hstate_vma(vma);
70083 @@ -2162,7 +2171,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
70084 reserve = (end - start) -
70085 region_count(&reservations->regions, start, end);
70086
70087 - kref_put(&reservations->refs, resv_map_release);
70088 + resv_map_put(vma);
70089
70090 if (reserve) {
70091 hugetlb_acct_memory(h, -reserve);
70092 @@ -2425,6 +2434,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
70093 return 1;
70094 }
70095
70096 +#ifdef CONFIG_PAX_SEGMEXEC
70097 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70098 +{
70099 + struct mm_struct *mm = vma->vm_mm;
70100 + struct vm_area_struct *vma_m;
70101 + unsigned long address_m;
70102 + pte_t *ptep_m;
70103 +
70104 + vma_m = pax_find_mirror_vma(vma);
70105 + if (!vma_m)
70106 + return;
70107 +
70108 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70109 + address_m = address + SEGMEXEC_TASK_SIZE;
70110 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70111 + get_page(page_m);
70112 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
70113 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70114 +}
70115 +#endif
70116 +
70117 /*
70118 * Hugetlb_cow() should be called with page lock of the original hugepage held.
70119 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
70120 @@ -2537,6 +2567,11 @@ retry_avoidcopy:
70121 make_huge_pte(vma, new_page, 1));
70122 page_remove_rmap(old_page);
70123 hugepage_add_new_anon_rmap(new_page, vma, address);
70124 +
70125 +#ifdef CONFIG_PAX_SEGMEXEC
70126 + pax_mirror_huge_pte(vma, address, new_page);
70127 +#endif
70128 +
70129 /* Make the old page be freed below */
70130 new_page = old_page;
70131 mmu_notifier_invalidate_range_end(mm,
70132 @@ -2691,6 +2726,10 @@ retry:
70133 && (vma->vm_flags & VM_SHARED)));
70134 set_huge_pte_at(mm, address, ptep, new_pte);
70135
70136 +#ifdef CONFIG_PAX_SEGMEXEC
70137 + pax_mirror_huge_pte(vma, address, page);
70138 +#endif
70139 +
70140 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70141 /* Optimization, do the COW without a second fault */
70142 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70143 @@ -2720,6 +2759,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70144 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70145 struct hstate *h = hstate_vma(vma);
70146
70147 +#ifdef CONFIG_PAX_SEGMEXEC
70148 + struct vm_area_struct *vma_m;
70149 +#endif
70150 +
70151 address &= huge_page_mask(h);
70152
70153 ptep = huge_pte_offset(mm, address);
70154 @@ -2733,6 +2776,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70155 VM_FAULT_SET_HINDEX(h - hstates);
70156 }
70157
70158 +#ifdef CONFIG_PAX_SEGMEXEC
70159 + vma_m = pax_find_mirror_vma(vma);
70160 + if (vma_m) {
70161 + unsigned long address_m;
70162 +
70163 + if (vma->vm_start > vma_m->vm_start) {
70164 + address_m = address;
70165 + address -= SEGMEXEC_TASK_SIZE;
70166 + vma = vma_m;
70167 + h = hstate_vma(vma);
70168 + } else
70169 + address_m = address + SEGMEXEC_TASK_SIZE;
70170 +
70171 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70172 + return VM_FAULT_OOM;
70173 + address_m &= HPAGE_MASK;
70174 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70175 + }
70176 +#endif
70177 +
70178 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70179 if (!ptep)
70180 return VM_FAULT_OOM;
70181 @@ -2978,12 +3041,16 @@ int hugetlb_reserve_pages(struct inode *inode,
70182 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
70183 }
70184
70185 - if (chg < 0)
70186 - return chg;
70187 + if (chg < 0) {
70188 + ret = chg;
70189 + goto out_err;
70190 + }
70191
70192 /* There must be enough pages in the subpool for the mapping */
70193 - if (hugepage_subpool_get_pages(spool, chg))
70194 - return -ENOSPC;
70195 + if (hugepage_subpool_get_pages(spool, chg)) {
70196 + ret = -ENOSPC;
70197 + goto out_err;
70198 + }
70199
70200 /*
70201 * Check enough hugepages are available for the reservation.
70202 @@ -2992,7 +3059,7 @@ int hugetlb_reserve_pages(struct inode *inode,
70203 ret = hugetlb_acct_memory(h, chg);
70204 if (ret < 0) {
70205 hugepage_subpool_put_pages(spool, chg);
70206 - return ret;
70207 + goto out_err;
70208 }
70209
70210 /*
70211 @@ -3009,6 +3076,9 @@ int hugetlb_reserve_pages(struct inode *inode,
70212 if (!vma || vma->vm_flags & VM_MAYSHARE)
70213 region_add(&inode->i_mapping->private_list, from, to);
70214 return 0;
70215 +out_err:
70216 + resv_map_put(vma);
70217 + return ret;
70218 }
70219
70220 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
70221 diff --git a/mm/internal.h b/mm/internal.h
70222 index 2189af4..f2ca332 100644
70223 --- a/mm/internal.h
70224 +++ b/mm/internal.h
70225 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
70226 * in mm/page_alloc.c
70227 */
70228 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70229 +extern void free_compound_page(struct page *page);
70230 extern void prep_compound_page(struct page *page, unsigned long order);
70231 #ifdef CONFIG_MEMORY_FAILURE
70232 extern bool is_free_buddy_page(struct page *page);
70233 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70234 index 45eb621..6ccd8ea 100644
70235 --- a/mm/kmemleak.c
70236 +++ b/mm/kmemleak.c
70237 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
70238
70239 for (i = 0; i < object->trace_len; i++) {
70240 void *ptr = (void *)object->trace[i];
70241 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70242 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70243 }
70244 }
70245
70246 diff --git a/mm/maccess.c b/mm/maccess.c
70247 index d53adf9..03a24bf 100644
70248 --- a/mm/maccess.c
70249 +++ b/mm/maccess.c
70250 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
70251 set_fs(KERNEL_DS);
70252 pagefault_disable();
70253 ret = __copy_from_user_inatomic(dst,
70254 - (__force const void __user *)src, size);
70255 + (const void __force_user *)src, size);
70256 pagefault_enable();
70257 set_fs(old_fs);
70258
70259 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
70260
70261 set_fs(KERNEL_DS);
70262 pagefault_disable();
70263 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70264 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70265 pagefault_enable();
70266 set_fs(old_fs);
70267
70268 diff --git a/mm/madvise.c b/mm/madvise.c
70269 index 74bf193..feb6fd3 100644
70270 --- a/mm/madvise.c
70271 +++ b/mm/madvise.c
70272 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
70273 pgoff_t pgoff;
70274 unsigned long new_flags = vma->vm_flags;
70275
70276 +#ifdef CONFIG_PAX_SEGMEXEC
70277 + struct vm_area_struct *vma_m;
70278 +#endif
70279 +
70280 switch (behavior) {
70281 case MADV_NORMAL:
70282 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
70283 @@ -110,6 +114,13 @@ success:
70284 /*
70285 * vm_flags is protected by the mmap_sem held in write mode.
70286 */
70287 +
70288 +#ifdef CONFIG_PAX_SEGMEXEC
70289 + vma_m = pax_find_mirror_vma(vma);
70290 + if (vma_m)
70291 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70292 +#endif
70293 +
70294 vma->vm_flags = new_flags;
70295
70296 out:
70297 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70298 struct vm_area_struct ** prev,
70299 unsigned long start, unsigned long end)
70300 {
70301 +
70302 +#ifdef CONFIG_PAX_SEGMEXEC
70303 + struct vm_area_struct *vma_m;
70304 +#endif
70305 +
70306 *prev = vma;
70307 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70308 return -EINVAL;
70309 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70310 zap_page_range(vma, start, end - start, &details);
70311 } else
70312 zap_page_range(vma, start, end - start, NULL);
70313 +
70314 +#ifdef CONFIG_PAX_SEGMEXEC
70315 + vma_m = pax_find_mirror_vma(vma);
70316 + if (vma_m) {
70317 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70318 + struct zap_details details = {
70319 + .nonlinear_vma = vma_m,
70320 + .last_index = ULONG_MAX,
70321 + };
70322 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70323 + } else
70324 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70325 + }
70326 +#endif
70327 +
70328 return 0;
70329 }
70330
70331 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70332 if (end < start)
70333 goto out;
70334
70335 +#ifdef CONFIG_PAX_SEGMEXEC
70336 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70337 + if (end > SEGMEXEC_TASK_SIZE)
70338 + goto out;
70339 + } else
70340 +#endif
70341 +
70342 + if (end > TASK_SIZE)
70343 + goto out;
70344 +
70345 error = 0;
70346 if (end == start)
70347 goto out;
70348 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70349 index 56080ea..115071e 100644
70350 --- a/mm/memory-failure.c
70351 +++ b/mm/memory-failure.c
70352 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70353
70354 int sysctl_memory_failure_recovery __read_mostly = 1;
70355
70356 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70357 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70358
70359 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70360
70361 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
70362 si.si_signo = SIGBUS;
70363 si.si_errno = 0;
70364 si.si_code = BUS_MCEERR_AO;
70365 - si.si_addr = (void *)addr;
70366 + si.si_addr = (void __user *)addr;
70367 #ifdef __ARCH_SI_TRAPNO
70368 si.si_trapno = trapno;
70369 #endif
70370 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70371 }
70372
70373 nr_pages = 1 << compound_trans_order(hpage);
70374 - atomic_long_add(nr_pages, &mce_bad_pages);
70375 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70376
70377 /*
70378 * We need/can do nothing about count=0 pages.
70379 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70380 if (!PageHWPoison(hpage)
70381 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70382 || (p != hpage && TestSetPageHWPoison(hpage))) {
70383 - atomic_long_sub(nr_pages, &mce_bad_pages);
70384 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70385 return 0;
70386 }
70387 set_page_hwpoison_huge_page(hpage);
70388 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70389 }
70390 if (hwpoison_filter(p)) {
70391 if (TestClearPageHWPoison(p))
70392 - atomic_long_sub(nr_pages, &mce_bad_pages);
70393 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70394 unlock_page(hpage);
70395 put_page(hpage);
70396 return 0;
70397 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
70398 return 0;
70399 }
70400 if (TestClearPageHWPoison(p))
70401 - atomic_long_sub(nr_pages, &mce_bad_pages);
70402 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70403 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70404 return 0;
70405 }
70406 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
70407 */
70408 if (TestClearPageHWPoison(page)) {
70409 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70410 - atomic_long_sub(nr_pages, &mce_bad_pages);
70411 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70412 freeit = 1;
70413 if (PageHuge(page))
70414 clear_page_hwpoison_huge_page(page);
70415 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70416 }
70417 done:
70418 if (!PageHWPoison(hpage))
70419 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70420 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70421 set_page_hwpoison_huge_page(hpage);
70422 dequeue_hwpoisoned_huge_page(hpage);
70423 /* keep elevated page count for bad page */
70424 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
70425 return ret;
70426
70427 done:
70428 - atomic_long_add(1, &mce_bad_pages);
70429 + atomic_long_add_unchecked(1, &mce_bad_pages);
70430 SetPageHWPoison(page);
70431 /* keep elevated page count for bad page */
70432 return ret;
70433 diff --git a/mm/memory.c b/mm/memory.c
70434 index 10b4dda..06857f3 100644
70435 --- a/mm/memory.c
70436 +++ b/mm/memory.c
70437 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
70438 return;
70439
70440 pmd = pmd_offset(pud, start);
70441 +
70442 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70443 pud_clear(pud);
70444 pmd_free_tlb(tlb, pmd, start);
70445 +#endif
70446 +
70447 }
70448
70449 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70450 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70451 if (end - 1 > ceiling - 1)
70452 return;
70453
70454 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
70455 pud = pud_offset(pgd, start);
70456 pgd_clear(pgd);
70457 pud_free_tlb(tlb, pud, start);
70458 +#endif
70459 +
70460 }
70461
70462 /*
70463 @@ -1593,12 +1600,6 @@ no_page_table:
70464 return page;
70465 }
70466
70467 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70468 -{
70469 - return stack_guard_page_start(vma, addr) ||
70470 - stack_guard_page_end(vma, addr+PAGE_SIZE);
70471 -}
70472 -
70473 /**
70474 * __get_user_pages() - pin user pages in memory
70475 * @tsk: task_struct of target task
70476 @@ -1671,10 +1672,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70477 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
70478 i = 0;
70479
70480 - do {
70481 + while (nr_pages) {
70482 struct vm_area_struct *vma;
70483
70484 - vma = find_extend_vma(mm, start);
70485 + vma = find_vma(mm, start);
70486 if (!vma && in_gate_area(mm, start)) {
70487 unsigned long pg = start & PAGE_MASK;
70488 pgd_t *pgd;
70489 @@ -1722,7 +1723,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70490 goto next_page;
70491 }
70492
70493 - if (!vma ||
70494 + if (!vma || start < vma->vm_start ||
70495 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
70496 !(vm_flags & vma->vm_flags))
70497 return i ? : -EFAULT;
70498 @@ -1749,11 +1750,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70499 int ret;
70500 unsigned int fault_flags = 0;
70501
70502 - /* For mlock, just skip the stack guard page. */
70503 - if (foll_flags & FOLL_MLOCK) {
70504 - if (stack_guard_page(vma, start))
70505 - goto next_page;
70506 - }
70507 if (foll_flags & FOLL_WRITE)
70508 fault_flags |= FAULT_FLAG_WRITE;
70509 if (nonblocking)
70510 @@ -1827,7 +1823,7 @@ next_page:
70511 start += PAGE_SIZE;
70512 nr_pages--;
70513 } while (nr_pages && start < vma->vm_end);
70514 - } while (nr_pages);
70515 + }
70516 return i;
70517 }
70518 EXPORT_SYMBOL(__get_user_pages);
70519 @@ -2034,6 +2030,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
70520 page_add_file_rmap(page);
70521 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70522
70523 +#ifdef CONFIG_PAX_SEGMEXEC
70524 + pax_mirror_file_pte(vma, addr, page, ptl);
70525 +#endif
70526 +
70527 retval = 0;
70528 pte_unmap_unlock(pte, ptl);
70529 return retval;
70530 @@ -2068,10 +2068,22 @@ out:
70531 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70532 struct page *page)
70533 {
70534 +
70535 +#ifdef CONFIG_PAX_SEGMEXEC
70536 + struct vm_area_struct *vma_m;
70537 +#endif
70538 +
70539 if (addr < vma->vm_start || addr >= vma->vm_end)
70540 return -EFAULT;
70541 if (!page_count(page))
70542 return -EINVAL;
70543 +
70544 +#ifdef CONFIG_PAX_SEGMEXEC
70545 + vma_m = pax_find_mirror_vma(vma);
70546 + if (vma_m)
70547 + vma_m->vm_flags |= VM_INSERTPAGE;
70548 +#endif
70549 +
70550 vma->vm_flags |= VM_INSERTPAGE;
70551 return insert_page(vma, addr, page, vma->vm_page_prot);
70552 }
70553 @@ -2157,6 +2169,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70554 unsigned long pfn)
70555 {
70556 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70557 + BUG_ON(vma->vm_mirror);
70558
70559 if (addr < vma->vm_start || addr >= vma->vm_end)
70560 return -EFAULT;
70561 @@ -2364,7 +2377,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
70562
70563 BUG_ON(pud_huge(*pud));
70564
70565 - pmd = pmd_alloc(mm, pud, addr);
70566 + pmd = (mm == &init_mm) ?
70567 + pmd_alloc_kernel(mm, pud, addr) :
70568 + pmd_alloc(mm, pud, addr);
70569 if (!pmd)
70570 return -ENOMEM;
70571 do {
70572 @@ -2384,7 +2399,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
70573 unsigned long next;
70574 int err;
70575
70576 - pud = pud_alloc(mm, pgd, addr);
70577 + pud = (mm == &init_mm) ?
70578 + pud_alloc_kernel(mm, pgd, addr) :
70579 + pud_alloc(mm, pgd, addr);
70580 if (!pud)
70581 return -ENOMEM;
70582 do {
70583 @@ -2472,6 +2489,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70584 copy_user_highpage(dst, src, va, vma);
70585 }
70586
70587 +#ifdef CONFIG_PAX_SEGMEXEC
70588 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70589 +{
70590 + struct mm_struct *mm = vma->vm_mm;
70591 + spinlock_t *ptl;
70592 + pte_t *pte, entry;
70593 +
70594 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70595 + entry = *pte;
70596 + if (!pte_present(entry)) {
70597 + if (!pte_none(entry)) {
70598 + BUG_ON(pte_file(entry));
70599 + free_swap_and_cache(pte_to_swp_entry(entry));
70600 + pte_clear_not_present_full(mm, address, pte, 0);
70601 + }
70602 + } else {
70603 + struct page *page;
70604 +
70605 + flush_cache_page(vma, address, pte_pfn(entry));
70606 + entry = ptep_clear_flush(vma, address, pte);
70607 + BUG_ON(pte_dirty(entry));
70608 + page = vm_normal_page(vma, address, entry);
70609 + if (page) {
70610 + update_hiwater_rss(mm);
70611 + if (PageAnon(page))
70612 + dec_mm_counter_fast(mm, MM_ANONPAGES);
70613 + else
70614 + dec_mm_counter_fast(mm, MM_FILEPAGES);
70615 + page_remove_rmap(page);
70616 + page_cache_release(page);
70617 + }
70618 + }
70619 + pte_unmap_unlock(pte, ptl);
70620 +}
70621 +
70622 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
70623 + *
70624 + * the ptl of the lower mapped page is held on entry and is not released on exit
70625 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70626 + */
70627 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70628 +{
70629 + struct mm_struct *mm = vma->vm_mm;
70630 + unsigned long address_m;
70631 + spinlock_t *ptl_m;
70632 + struct vm_area_struct *vma_m;
70633 + pmd_t *pmd_m;
70634 + pte_t *pte_m, entry_m;
70635 +
70636 + BUG_ON(!page_m || !PageAnon(page_m));
70637 +
70638 + vma_m = pax_find_mirror_vma(vma);
70639 + if (!vma_m)
70640 + return;
70641 +
70642 + BUG_ON(!PageLocked(page_m));
70643 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70644 + address_m = address + SEGMEXEC_TASK_SIZE;
70645 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70646 + pte_m = pte_offset_map(pmd_m, address_m);
70647 + ptl_m = pte_lockptr(mm, pmd_m);
70648 + if (ptl != ptl_m) {
70649 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70650 + if (!pte_none(*pte_m))
70651 + goto out;
70652 + }
70653 +
70654 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70655 + page_cache_get(page_m);
70656 + page_add_anon_rmap(page_m, vma_m, address_m);
70657 + inc_mm_counter_fast(mm, MM_ANONPAGES);
70658 + set_pte_at(mm, address_m, pte_m, entry_m);
70659 + update_mmu_cache(vma_m, address_m, entry_m);
70660 +out:
70661 + if (ptl != ptl_m)
70662 + spin_unlock(ptl_m);
70663 + pte_unmap(pte_m);
70664 + unlock_page(page_m);
70665 +}
70666 +
70667 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70668 +{
70669 + struct mm_struct *mm = vma->vm_mm;
70670 + unsigned long address_m;
70671 + spinlock_t *ptl_m;
70672 + struct vm_area_struct *vma_m;
70673 + pmd_t *pmd_m;
70674 + pte_t *pte_m, entry_m;
70675 +
70676 + BUG_ON(!page_m || PageAnon(page_m));
70677 +
70678 + vma_m = pax_find_mirror_vma(vma);
70679 + if (!vma_m)
70680 + return;
70681 +
70682 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70683 + address_m = address + SEGMEXEC_TASK_SIZE;
70684 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70685 + pte_m = pte_offset_map(pmd_m, address_m);
70686 + ptl_m = pte_lockptr(mm, pmd_m);
70687 + if (ptl != ptl_m) {
70688 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70689 + if (!pte_none(*pte_m))
70690 + goto out;
70691 + }
70692 +
70693 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70694 + page_cache_get(page_m);
70695 + page_add_file_rmap(page_m);
70696 + inc_mm_counter_fast(mm, MM_FILEPAGES);
70697 + set_pte_at(mm, address_m, pte_m, entry_m);
70698 + update_mmu_cache(vma_m, address_m, entry_m);
70699 +out:
70700 + if (ptl != ptl_m)
70701 + spin_unlock(ptl_m);
70702 + pte_unmap(pte_m);
70703 +}
70704 +
70705 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70706 +{
70707 + struct mm_struct *mm = vma->vm_mm;
70708 + unsigned long address_m;
70709 + spinlock_t *ptl_m;
70710 + struct vm_area_struct *vma_m;
70711 + pmd_t *pmd_m;
70712 + pte_t *pte_m, entry_m;
70713 +
70714 + vma_m = pax_find_mirror_vma(vma);
70715 + if (!vma_m)
70716 + return;
70717 +
70718 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70719 + address_m = address + SEGMEXEC_TASK_SIZE;
70720 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70721 + pte_m = pte_offset_map(pmd_m, address_m);
70722 + ptl_m = pte_lockptr(mm, pmd_m);
70723 + if (ptl != ptl_m) {
70724 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70725 + if (!pte_none(*pte_m))
70726 + goto out;
70727 + }
70728 +
70729 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70730 + set_pte_at(mm, address_m, pte_m, entry_m);
70731 +out:
70732 + if (ptl != ptl_m)
70733 + spin_unlock(ptl_m);
70734 + pte_unmap(pte_m);
70735 +}
70736 +
70737 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70738 +{
70739 + struct page *page_m;
70740 + pte_t entry;
70741 +
70742 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70743 + goto out;
70744 +
70745 + entry = *pte;
70746 + page_m = vm_normal_page(vma, address, entry);
70747 + if (!page_m)
70748 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70749 + else if (PageAnon(page_m)) {
70750 + if (pax_find_mirror_vma(vma)) {
70751 + pte_unmap_unlock(pte, ptl);
70752 + lock_page(page_m);
70753 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70754 + if (pte_same(entry, *pte))
70755 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70756 + else
70757 + unlock_page(page_m);
70758 + }
70759 + } else
70760 + pax_mirror_file_pte(vma, address, page_m, ptl);
70761 +
70762 +out:
70763 + pte_unmap_unlock(pte, ptl);
70764 +}
70765 +#endif
70766 +
70767 /*
70768 * This routine handles present pages, when users try to write
70769 * to a shared page. It is done by copying the page to a new address
70770 @@ -2683,6 +2880,12 @@ gotten:
70771 */
70772 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70773 if (likely(pte_same(*page_table, orig_pte))) {
70774 +
70775 +#ifdef CONFIG_PAX_SEGMEXEC
70776 + if (pax_find_mirror_vma(vma))
70777 + BUG_ON(!trylock_page(new_page));
70778 +#endif
70779 +
70780 if (old_page) {
70781 if (!PageAnon(old_page)) {
70782 dec_mm_counter_fast(mm, MM_FILEPAGES);
70783 @@ -2734,6 +2937,10 @@ gotten:
70784 page_remove_rmap(old_page);
70785 }
70786
70787 +#ifdef CONFIG_PAX_SEGMEXEC
70788 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70789 +#endif
70790 +
70791 /* Free the old page.. */
70792 new_page = old_page;
70793 ret |= VM_FAULT_WRITE;
70794 @@ -3013,6 +3220,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70795 swap_free(entry);
70796 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70797 try_to_free_swap(page);
70798 +
70799 +#ifdef CONFIG_PAX_SEGMEXEC
70800 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70801 +#endif
70802 +
70803 unlock_page(page);
70804 if (swapcache) {
70805 /*
70806 @@ -3036,6 +3248,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70807
70808 /* No need to invalidate - it was non-present before */
70809 update_mmu_cache(vma, address, page_table);
70810 +
70811 +#ifdef CONFIG_PAX_SEGMEXEC
70812 + pax_mirror_anon_pte(vma, address, page, ptl);
70813 +#endif
70814 +
70815 unlock:
70816 pte_unmap_unlock(page_table, ptl);
70817 out:
70818 @@ -3055,40 +3272,6 @@ out_release:
70819 }
70820
70821 /*
70822 - * This is like a special single-page "expand_{down|up}wards()",
70823 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70824 - * doesn't hit another vma.
70825 - */
70826 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70827 -{
70828 - address &= PAGE_MASK;
70829 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70830 - struct vm_area_struct *prev = vma->vm_prev;
70831 -
70832 - /*
70833 - * Is there a mapping abutting this one below?
70834 - *
70835 - * That's only ok if it's the same stack mapping
70836 - * that has gotten split..
70837 - */
70838 - if (prev && prev->vm_end == address)
70839 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70840 -
70841 - expand_downwards(vma, address - PAGE_SIZE);
70842 - }
70843 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70844 - struct vm_area_struct *next = vma->vm_next;
70845 -
70846 - /* As VM_GROWSDOWN but s/below/above/ */
70847 - if (next && next->vm_start == address + PAGE_SIZE)
70848 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70849 -
70850 - expand_upwards(vma, address + PAGE_SIZE);
70851 - }
70852 - return 0;
70853 -}
70854 -
70855 -/*
70856 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70857 * but allow concurrent faults), and pte mapped but not yet locked.
70858 * We return with mmap_sem still held, but pte unmapped and unlocked.
70859 @@ -3097,27 +3280,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70860 unsigned long address, pte_t *page_table, pmd_t *pmd,
70861 unsigned int flags)
70862 {
70863 - struct page *page;
70864 + struct page *page = NULL;
70865 spinlock_t *ptl;
70866 pte_t entry;
70867
70868 - pte_unmap(page_table);
70869 -
70870 - /* Check if we need to add a guard page to the stack */
70871 - if (check_stack_guard_page(vma, address) < 0)
70872 - return VM_FAULT_SIGBUS;
70873 -
70874 - /* Use the zero-page for reads */
70875 if (!(flags & FAULT_FLAG_WRITE)) {
70876 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70877 vma->vm_page_prot));
70878 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70879 + ptl = pte_lockptr(mm, pmd);
70880 + spin_lock(ptl);
70881 if (!pte_none(*page_table))
70882 goto unlock;
70883 goto setpte;
70884 }
70885
70886 /* Allocate our own private page. */
70887 + pte_unmap(page_table);
70888 +
70889 if (unlikely(anon_vma_prepare(vma)))
70890 goto oom;
70891 page = alloc_zeroed_user_highpage_movable(vma, address);
70892 @@ -3136,6 +3315,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70893 if (!pte_none(*page_table))
70894 goto release;
70895
70896 +#ifdef CONFIG_PAX_SEGMEXEC
70897 + if (pax_find_mirror_vma(vma))
70898 + BUG_ON(!trylock_page(page));
70899 +#endif
70900 +
70901 inc_mm_counter_fast(mm, MM_ANONPAGES);
70902 page_add_new_anon_rmap(page, vma, address);
70903 setpte:
70904 @@ -3143,6 +3327,12 @@ setpte:
70905
70906 /* No need to invalidate - it was non-present before */
70907 update_mmu_cache(vma, address, page_table);
70908 +
70909 +#ifdef CONFIG_PAX_SEGMEXEC
70910 + if (page)
70911 + pax_mirror_anon_pte(vma, address, page, ptl);
70912 +#endif
70913 +
70914 unlock:
70915 pte_unmap_unlock(page_table, ptl);
70916 return 0;
70917 @@ -3286,6 +3476,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70918 */
70919 /* Only go through if we didn't race with anybody else... */
70920 if (likely(pte_same(*page_table, orig_pte))) {
70921 +
70922 +#ifdef CONFIG_PAX_SEGMEXEC
70923 + if (anon && pax_find_mirror_vma(vma))
70924 + BUG_ON(!trylock_page(page));
70925 +#endif
70926 +
70927 flush_icache_page(vma, page);
70928 entry = mk_pte(page, vma->vm_page_prot);
70929 if (flags & FAULT_FLAG_WRITE)
70930 @@ -3305,6 +3501,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70931
70932 /* no need to invalidate: a not-present page won't be cached */
70933 update_mmu_cache(vma, address, page_table);
70934 +
70935 +#ifdef CONFIG_PAX_SEGMEXEC
70936 + if (anon)
70937 + pax_mirror_anon_pte(vma, address, page, ptl);
70938 + else
70939 + pax_mirror_file_pte(vma, address, page, ptl);
70940 +#endif
70941 +
70942 } else {
70943 if (cow_page)
70944 mem_cgroup_uncharge_page(cow_page);
70945 @@ -3458,6 +3662,12 @@ int handle_pte_fault(struct mm_struct *mm,
70946 if (flags & FAULT_FLAG_WRITE)
70947 flush_tlb_fix_spurious_fault(vma, address);
70948 }
70949 +
70950 +#ifdef CONFIG_PAX_SEGMEXEC
70951 + pax_mirror_pte(vma, address, pte, pmd, ptl);
70952 + return 0;
70953 +#endif
70954 +
70955 unlock:
70956 pte_unmap_unlock(pte, ptl);
70957 return 0;
70958 @@ -3474,6 +3684,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70959 pmd_t *pmd;
70960 pte_t *pte;
70961
70962 +#ifdef CONFIG_PAX_SEGMEXEC
70963 + struct vm_area_struct *vma_m;
70964 +#endif
70965 +
70966 __set_current_state(TASK_RUNNING);
70967
70968 count_vm_event(PGFAULT);
70969 @@ -3485,6 +3699,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70970 if (unlikely(is_vm_hugetlb_page(vma)))
70971 return hugetlb_fault(mm, vma, address, flags);
70972
70973 +#ifdef CONFIG_PAX_SEGMEXEC
70974 + vma_m = pax_find_mirror_vma(vma);
70975 + if (vma_m) {
70976 + unsigned long address_m;
70977 + pgd_t *pgd_m;
70978 + pud_t *pud_m;
70979 + pmd_t *pmd_m;
70980 +
70981 + if (vma->vm_start > vma_m->vm_start) {
70982 + address_m = address;
70983 + address -= SEGMEXEC_TASK_SIZE;
70984 + vma = vma_m;
70985 + } else
70986 + address_m = address + SEGMEXEC_TASK_SIZE;
70987 +
70988 + pgd_m = pgd_offset(mm, address_m);
70989 + pud_m = pud_alloc(mm, pgd_m, address_m);
70990 + if (!pud_m)
70991 + return VM_FAULT_OOM;
70992 + pmd_m = pmd_alloc(mm, pud_m, address_m);
70993 + if (!pmd_m)
70994 + return VM_FAULT_OOM;
70995 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70996 + return VM_FAULT_OOM;
70997 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70998 + }
70999 +#endif
71000 +
71001 pgd = pgd_offset(mm, address);
71002 pud = pud_alloc(mm, pgd, address);
71003 if (!pud)
71004 @@ -3514,7 +3756,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71005 * run pte_offset_map on the pmd, if an huge pmd could
71006 * materialize from under us from a different thread.
71007 */
71008 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71009 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71010 return VM_FAULT_OOM;
71011 /* if an huge pmd materialized from under us just retry later */
71012 if (unlikely(pmd_trans_huge(*pmd)))
71013 @@ -3551,6 +3793,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
71014 spin_unlock(&mm->page_table_lock);
71015 return 0;
71016 }
71017 +
71018 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
71019 +{
71020 + pud_t *new = pud_alloc_one(mm, address);
71021 + if (!new)
71022 + return -ENOMEM;
71023 +
71024 + smp_wmb(); /* See comment in __pte_alloc */
71025 +
71026 + spin_lock(&mm->page_table_lock);
71027 + if (pgd_present(*pgd)) /* Another has populated it */
71028 + pud_free(mm, new);
71029 + else
71030 + pgd_populate_kernel(mm, pgd, new);
71031 + spin_unlock(&mm->page_table_lock);
71032 + return 0;
71033 +}
71034 #endif /* __PAGETABLE_PUD_FOLDED */
71035
71036 #ifndef __PAGETABLE_PMD_FOLDED
71037 @@ -3581,6 +3840,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
71038 spin_unlock(&mm->page_table_lock);
71039 return 0;
71040 }
71041 +
71042 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
71043 +{
71044 + pmd_t *new = pmd_alloc_one(mm, address);
71045 + if (!new)
71046 + return -ENOMEM;
71047 +
71048 + smp_wmb(); /* See comment in __pte_alloc */
71049 +
71050 + spin_lock(&mm->page_table_lock);
71051 +#ifndef __ARCH_HAS_4LEVEL_HACK
71052 + if (pud_present(*pud)) /* Another has populated it */
71053 + pmd_free(mm, new);
71054 + else
71055 + pud_populate_kernel(mm, pud, new);
71056 +#else
71057 + if (pgd_present(*pud)) /* Another has populated it */
71058 + pmd_free(mm, new);
71059 + else
71060 + pgd_populate_kernel(mm, pud, new);
71061 +#endif /* __ARCH_HAS_4LEVEL_HACK */
71062 + spin_unlock(&mm->page_table_lock);
71063 + return 0;
71064 +}
71065 #endif /* __PAGETABLE_PMD_FOLDED */
71066
71067 int make_pages_present(unsigned long addr, unsigned long end)
71068 @@ -3618,7 +3901,7 @@ static int __init gate_vma_init(void)
71069 gate_vma.vm_start = FIXADDR_USER_START;
71070 gate_vma.vm_end = FIXADDR_USER_END;
71071 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
71072 - gate_vma.vm_page_prot = __P101;
71073 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
71074 /*
71075 * Make sure the vDSO gets into every core dump.
71076 * Dumping its contents makes post-mortem fully interpretable later
71077 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
71078 index 0a37570..2048346 100644
71079 --- a/mm/mempolicy.c
71080 +++ b/mm/mempolicy.c
71081 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71082 unsigned long vmstart;
71083 unsigned long vmend;
71084
71085 +#ifdef CONFIG_PAX_SEGMEXEC
71086 + struct vm_area_struct *vma_m;
71087 +#endif
71088 +
71089 vma = find_vma(mm, start);
71090 if (!vma || vma->vm_start > start)
71091 return -EFAULT;
71092 @@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71093 err = policy_vma(vma, new_pol);
71094 if (err)
71095 goto out;
71096 +
71097 +#ifdef CONFIG_PAX_SEGMEXEC
71098 + vma_m = pax_find_mirror_vma(vma);
71099 + if (vma_m) {
71100 + err = policy_vma(vma_m, new_pol);
71101 + if (err)
71102 + goto out;
71103 + }
71104 +#endif
71105 +
71106 }
71107
71108 out:
71109 @@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start, unsigned long len,
71110
71111 if (end < start)
71112 return -EINVAL;
71113 +
71114 +#ifdef CONFIG_PAX_SEGMEXEC
71115 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71116 + if (end > SEGMEXEC_TASK_SIZE)
71117 + return -EINVAL;
71118 + } else
71119 +#endif
71120 +
71121 + if (end > TASK_SIZE)
71122 + return -EINVAL;
71123 +
71124 if (end == start)
71125 return 0;
71126
71127 @@ -1330,6 +1355,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71128 if (!mm)
71129 goto out;
71130
71131 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71132 + if (mm != current->mm &&
71133 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71134 + err = -EPERM;
71135 + goto out;
71136 + }
71137 +#endif
71138 +
71139 /*
71140 * Check if this process has the right to modify the specified
71141 * process. The right exists if the process has administrative
71142 @@ -1339,8 +1372,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71143 rcu_read_lock();
71144 tcred = __task_cred(task);
71145 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71146 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71147 - !capable(CAP_SYS_NICE)) {
71148 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71149 rcu_read_unlock();
71150 err = -EPERM;
71151 goto out;
71152 diff --git a/mm/migrate.c b/mm/migrate.c
71153 index 1503b6b..156c672 100644
71154 --- a/mm/migrate.c
71155 +++ b/mm/migrate.c
71156 @@ -1370,6 +1370,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71157 if (!mm)
71158 return -EINVAL;
71159
71160 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71161 + if (mm != current->mm &&
71162 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71163 + err = -EPERM;
71164 + goto out;
71165 + }
71166 +#endif
71167 +
71168 /*
71169 * Check if this process has the right to modify the specified
71170 * process. The right exists if the process has administrative
71171 @@ -1379,8 +1387,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71172 rcu_read_lock();
71173 tcred = __task_cred(task);
71174 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71175 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71176 - !capable(CAP_SYS_NICE)) {
71177 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71178 rcu_read_unlock();
71179 err = -EPERM;
71180 goto out;
71181 diff --git a/mm/mlock.c b/mm/mlock.c
71182 index ef726e8..13e0901 100644
71183 --- a/mm/mlock.c
71184 +++ b/mm/mlock.c
71185 @@ -13,6 +13,7 @@
71186 #include <linux/pagemap.h>
71187 #include <linux/mempolicy.h>
71188 #include <linux/syscalls.h>
71189 +#include <linux/security.h>
71190 #include <linux/sched.h>
71191 #include <linux/export.h>
71192 #include <linux/rmap.h>
71193 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
71194 return -EINVAL;
71195 if (end == start)
71196 return 0;
71197 + if (end > TASK_SIZE)
71198 + return -EINVAL;
71199 +
71200 vma = find_vma(current->mm, start);
71201 if (!vma || vma->vm_start > start)
71202 return -ENOMEM;
71203 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
71204 for (nstart = start ; ; ) {
71205 vm_flags_t newflags;
71206
71207 +#ifdef CONFIG_PAX_SEGMEXEC
71208 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71209 + break;
71210 +#endif
71211 +
71212 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71213
71214 newflags = vma->vm_flags | VM_LOCKED;
71215 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
71216 lock_limit >>= PAGE_SHIFT;
71217
71218 /* check against resource limits */
71219 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71220 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71221 error = do_mlock(start, len, 1);
71222 up_write(&current->mm->mmap_sem);
71223 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
71224 static int do_mlockall(int flags)
71225 {
71226 struct vm_area_struct * vma, * prev = NULL;
71227 - unsigned int def_flags = 0;
71228
71229 if (flags & MCL_FUTURE)
71230 - def_flags = VM_LOCKED;
71231 - current->mm->def_flags = def_flags;
71232 + current->mm->def_flags |= VM_LOCKED;
71233 + else
71234 + current->mm->def_flags &= ~VM_LOCKED;
71235 if (flags == MCL_FUTURE)
71236 goto out;
71237
71238 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71239 vm_flags_t newflags;
71240
71241 +#ifdef CONFIG_PAX_SEGMEXEC
71242 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71243 + break;
71244 +#endif
71245 +
71246 + BUG_ON(vma->vm_end > TASK_SIZE);
71247 newflags = vma->vm_flags | VM_LOCKED;
71248 if (!(flags & MCL_CURRENT))
71249 newflags &= ~VM_LOCKED;
71250 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71251 lock_limit >>= PAGE_SHIFT;
71252
71253 ret = -ENOMEM;
71254 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
71255 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71256 capable(CAP_IPC_LOCK))
71257 ret = do_mlockall(flags);
71258 diff --git a/mm/mmap.c b/mm/mmap.c
71259 index da15a79..314aef3 100644
71260 --- a/mm/mmap.c
71261 +++ b/mm/mmap.c
71262 @@ -46,6 +46,16 @@
71263 #define arch_rebalance_pgtables(addr, len) (addr)
71264 #endif
71265
71266 +static inline void verify_mm_writelocked(struct mm_struct *mm)
71267 +{
71268 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71269 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71270 + up_read(&mm->mmap_sem);
71271 + BUG();
71272 + }
71273 +#endif
71274 +}
71275 +
71276 static void unmap_region(struct mm_struct *mm,
71277 struct vm_area_struct *vma, struct vm_area_struct *prev,
71278 unsigned long start, unsigned long end);
71279 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
71280 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71281 *
71282 */
71283 -pgprot_t protection_map[16] = {
71284 +pgprot_t protection_map[16] __read_only = {
71285 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71286 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71287 };
71288
71289 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
71290 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
71291 {
71292 - return __pgprot(pgprot_val(protection_map[vm_flags &
71293 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71294 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71295 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71296 +
71297 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71298 + if (!(__supported_pte_mask & _PAGE_NX) &&
71299 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71300 + (vm_flags & (VM_READ | VM_WRITE)))
71301 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71302 +#endif
71303 +
71304 + return prot;
71305 }
71306 EXPORT_SYMBOL(vm_get_page_prot);
71307
71308 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71309 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
71310 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71311 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
71312 /*
71313 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71314 * other variables. It can be updated by several CPUs frequently.
71315 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
71316 struct vm_area_struct *next = vma->vm_next;
71317
71318 might_sleep();
71319 + BUG_ON(vma->vm_mirror);
71320 if (vma->vm_ops && vma->vm_ops->close)
71321 vma->vm_ops->close(vma);
71322 if (vma->vm_file) {
71323 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
71324 * not page aligned -Ram Gupta
71325 */
71326 rlim = rlimit(RLIMIT_DATA);
71327 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71328 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71329 (mm->end_data - mm->start_data) > rlim)
71330 goto out;
71331 @@ -689,6 +711,12 @@ static int
71332 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71333 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71334 {
71335 +
71336 +#ifdef CONFIG_PAX_SEGMEXEC
71337 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71338 + return 0;
71339 +#endif
71340 +
71341 if (is_mergeable_vma(vma, file, vm_flags) &&
71342 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71343 if (vma->vm_pgoff == vm_pgoff)
71344 @@ -708,6 +736,12 @@ static int
71345 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71346 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71347 {
71348 +
71349 +#ifdef CONFIG_PAX_SEGMEXEC
71350 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71351 + return 0;
71352 +#endif
71353 +
71354 if (is_mergeable_vma(vma, file, vm_flags) &&
71355 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71356 pgoff_t vm_pglen;
71357 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71358 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71359 struct vm_area_struct *prev, unsigned long addr,
71360 unsigned long end, unsigned long vm_flags,
71361 - struct anon_vma *anon_vma, struct file *file,
71362 + struct anon_vma *anon_vma, struct file *file,
71363 pgoff_t pgoff, struct mempolicy *policy)
71364 {
71365 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71366 struct vm_area_struct *area, *next;
71367 int err;
71368
71369 +#ifdef CONFIG_PAX_SEGMEXEC
71370 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71371 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71372 +
71373 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71374 +#endif
71375 +
71376 /*
71377 * We later require that vma->vm_flags == vm_flags,
71378 * so this tests vma->vm_flags & VM_SPECIAL, too.
71379 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71380 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71381 next = next->vm_next;
71382
71383 +#ifdef CONFIG_PAX_SEGMEXEC
71384 + if (prev)
71385 + prev_m = pax_find_mirror_vma(prev);
71386 + if (area)
71387 + area_m = pax_find_mirror_vma(area);
71388 + if (next)
71389 + next_m = pax_find_mirror_vma(next);
71390 +#endif
71391 +
71392 /*
71393 * Can it merge with the predecessor?
71394 */
71395 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71396 /* cases 1, 6 */
71397 err = vma_adjust(prev, prev->vm_start,
71398 next->vm_end, prev->vm_pgoff, NULL);
71399 - } else /* cases 2, 5, 7 */
71400 +
71401 +#ifdef CONFIG_PAX_SEGMEXEC
71402 + if (!err && prev_m)
71403 + err = vma_adjust(prev_m, prev_m->vm_start,
71404 + next_m->vm_end, prev_m->vm_pgoff, NULL);
71405 +#endif
71406 +
71407 + } else { /* cases 2, 5, 7 */
71408 err = vma_adjust(prev, prev->vm_start,
71409 end, prev->vm_pgoff, NULL);
71410 +
71411 +#ifdef CONFIG_PAX_SEGMEXEC
71412 + if (!err && prev_m)
71413 + err = vma_adjust(prev_m, prev_m->vm_start,
71414 + end_m, prev_m->vm_pgoff, NULL);
71415 +#endif
71416 +
71417 + }
71418 if (err)
71419 return NULL;
71420 khugepaged_enter_vma_merge(prev);
71421 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71422 mpol_equal(policy, vma_policy(next)) &&
71423 can_vma_merge_before(next, vm_flags,
71424 anon_vma, file, pgoff+pglen)) {
71425 - if (prev && addr < prev->vm_end) /* case 4 */
71426 + if (prev && addr < prev->vm_end) { /* case 4 */
71427 err = vma_adjust(prev, prev->vm_start,
71428 addr, prev->vm_pgoff, NULL);
71429 - else /* cases 3, 8 */
71430 +
71431 +#ifdef CONFIG_PAX_SEGMEXEC
71432 + if (!err && prev_m)
71433 + err = vma_adjust(prev_m, prev_m->vm_start,
71434 + addr_m, prev_m->vm_pgoff, NULL);
71435 +#endif
71436 +
71437 + } else { /* cases 3, 8 */
71438 err = vma_adjust(area, addr, next->vm_end,
71439 next->vm_pgoff - pglen, NULL);
71440 +
71441 +#ifdef CONFIG_PAX_SEGMEXEC
71442 + if (!err && area_m)
71443 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
71444 + next_m->vm_pgoff - pglen, NULL);
71445 +#endif
71446 +
71447 + }
71448 if (err)
71449 return NULL;
71450 khugepaged_enter_vma_merge(area);
71451 @@ -921,14 +1001,11 @@ none:
71452 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71453 struct file *file, long pages)
71454 {
71455 - const unsigned long stack_flags
71456 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71457 -
71458 if (file) {
71459 mm->shared_vm += pages;
71460 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71461 mm->exec_vm += pages;
71462 - } else if (flags & stack_flags)
71463 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71464 mm->stack_vm += pages;
71465 if (flags & (VM_RESERVED|VM_IO))
71466 mm->reserved_vm += pages;
71467 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71468 * (the exception is when the underlying filesystem is noexec
71469 * mounted, in which case we dont add PROT_EXEC.)
71470 */
71471 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71472 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71473 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71474 prot |= PROT_EXEC;
71475
71476 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71477 /* Obtain the address to map to. we verify (or select) it and ensure
71478 * that it represents a valid section of the address space.
71479 */
71480 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
71481 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71482 if (addr & ~PAGE_MASK)
71483 return addr;
71484
71485 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71486 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71487 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71488
71489 +#ifdef CONFIG_PAX_MPROTECT
71490 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71491 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71492 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71493 + gr_log_rwxmmap(file);
71494 +
71495 +#ifdef CONFIG_PAX_EMUPLT
71496 + vm_flags &= ~VM_EXEC;
71497 +#else
71498 + return -EPERM;
71499 +#endif
71500 +
71501 + }
71502 +
71503 + if (!(vm_flags & VM_EXEC))
71504 + vm_flags &= ~VM_MAYEXEC;
71505 +#else
71506 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71507 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71508 +#endif
71509 + else
71510 + vm_flags &= ~VM_MAYWRITE;
71511 + }
71512 +#endif
71513 +
71514 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71515 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71516 + vm_flags &= ~VM_PAGEEXEC;
71517 +#endif
71518 +
71519 if (flags & MAP_LOCKED)
71520 if (!can_do_mlock())
71521 return -EPERM;
71522 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71523 locked += mm->locked_vm;
71524 lock_limit = rlimit(RLIMIT_MEMLOCK);
71525 lock_limit >>= PAGE_SHIFT;
71526 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71527 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71528 return -EAGAIN;
71529 }
71530 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71531 if (error)
71532 return error;
71533
71534 + if (!gr_acl_handle_mmap(file, prot))
71535 + return -EACCES;
71536 +
71537 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71538 }
71539 EXPORT_SYMBOL(do_mmap_pgoff);
71540 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
71541 vm_flags_t vm_flags = vma->vm_flags;
71542
71543 /* If it was private or non-writable, the write bit is already clear */
71544 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71545 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71546 return 0;
71547
71548 /* The backer wishes to know when pages are first written to? */
71549 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
71550 unsigned long charged = 0;
71551 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71552
71553 +#ifdef CONFIG_PAX_SEGMEXEC
71554 + struct vm_area_struct *vma_m = NULL;
71555 +#endif
71556 +
71557 + /*
71558 + * mm->mmap_sem is required to protect against another thread
71559 + * changing the mappings in case we sleep.
71560 + */
71561 + verify_mm_writelocked(mm);
71562 +
71563 /* Clear old maps */
71564 error = -ENOMEM;
71565 -munmap_back:
71566 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71567 if (vma && vma->vm_start < addr + len) {
71568 if (do_munmap(mm, addr, len))
71569 return -ENOMEM;
71570 - goto munmap_back;
71571 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71572 + BUG_ON(vma && vma->vm_start < addr + len);
71573 }
71574
71575 /* Check against address space limit. */
71576 @@ -1258,6 +1379,16 @@ munmap_back:
71577 goto unacct_error;
71578 }
71579
71580 +#ifdef CONFIG_PAX_SEGMEXEC
71581 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71582 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71583 + if (!vma_m) {
71584 + error = -ENOMEM;
71585 + goto free_vma;
71586 + }
71587 + }
71588 +#endif
71589 +
71590 vma->vm_mm = mm;
71591 vma->vm_start = addr;
71592 vma->vm_end = addr + len;
71593 @@ -1282,6 +1413,19 @@ munmap_back:
71594 error = file->f_op->mmap(file, vma);
71595 if (error)
71596 goto unmap_and_free_vma;
71597 +
71598 +#ifdef CONFIG_PAX_SEGMEXEC
71599 + if (vma_m && (vm_flags & VM_EXECUTABLE))
71600 + added_exe_file_vma(mm);
71601 +#endif
71602 +
71603 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71604 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71605 + vma->vm_flags |= VM_PAGEEXEC;
71606 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71607 + }
71608 +#endif
71609 +
71610 if (vm_flags & VM_EXECUTABLE)
71611 added_exe_file_vma(mm);
71612
71613 @@ -1319,6 +1463,11 @@ munmap_back:
71614 vma_link(mm, vma, prev, rb_link, rb_parent);
71615 file = vma->vm_file;
71616
71617 +#ifdef CONFIG_PAX_SEGMEXEC
71618 + if (vma_m)
71619 + BUG_ON(pax_mirror_vma(vma_m, vma));
71620 +#endif
71621 +
71622 /* Once vma denies write, undo our temporary denial count */
71623 if (correct_wcount)
71624 atomic_inc(&inode->i_writecount);
71625 @@ -1327,6 +1476,7 @@ out:
71626
71627 mm->total_vm += len >> PAGE_SHIFT;
71628 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71629 + track_exec_limit(mm, addr, addr + len, vm_flags);
71630 if (vm_flags & VM_LOCKED) {
71631 if (!mlock_vma_pages_range(vma, addr, addr + len))
71632 mm->locked_vm += (len >> PAGE_SHIFT);
71633 @@ -1344,6 +1494,12 @@ unmap_and_free_vma:
71634 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71635 charged = 0;
71636 free_vma:
71637 +
71638 +#ifdef CONFIG_PAX_SEGMEXEC
71639 + if (vma_m)
71640 + kmem_cache_free(vm_area_cachep, vma_m);
71641 +#endif
71642 +
71643 kmem_cache_free(vm_area_cachep, vma);
71644 unacct_error:
71645 if (charged)
71646 @@ -1351,6 +1507,44 @@ unacct_error:
71647 return error;
71648 }
71649
71650 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71651 +{
71652 + if (!vma) {
71653 +#ifdef CONFIG_STACK_GROWSUP
71654 + if (addr > sysctl_heap_stack_gap)
71655 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71656 + else
71657 + vma = find_vma(current->mm, 0);
71658 + if (vma && (vma->vm_flags & VM_GROWSUP))
71659 + return false;
71660 +#endif
71661 + return true;
71662 + }
71663 +
71664 + if (addr + len > vma->vm_start)
71665 + return false;
71666 +
71667 + if (vma->vm_flags & VM_GROWSDOWN)
71668 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71669 +#ifdef CONFIG_STACK_GROWSUP
71670 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71671 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71672 +#endif
71673 +
71674 + return true;
71675 +}
71676 +
71677 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71678 +{
71679 + if (vma->vm_start < len)
71680 + return -ENOMEM;
71681 + if (!(vma->vm_flags & VM_GROWSDOWN))
71682 + return vma->vm_start - len;
71683 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
71684 + return vma->vm_start - len - sysctl_heap_stack_gap;
71685 + return -ENOMEM;
71686 +}
71687 +
71688 /* Get an address range which is currently unmapped.
71689 * For shmat() with addr=0.
71690 *
71691 @@ -1377,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71692 if (flags & MAP_FIXED)
71693 return addr;
71694
71695 +#ifdef CONFIG_PAX_RANDMMAP
71696 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71697 +#endif
71698 +
71699 if (addr) {
71700 addr = PAGE_ALIGN(addr);
71701 - vma = find_vma(mm, addr);
71702 - if (TASK_SIZE - len >= addr &&
71703 - (!vma || addr + len <= vma->vm_start))
71704 - return addr;
71705 + if (TASK_SIZE - len >= addr) {
71706 + vma = find_vma(mm, addr);
71707 + if (check_heap_stack_gap(vma, addr, len))
71708 + return addr;
71709 + }
71710 }
71711 if (len > mm->cached_hole_size) {
71712 - start_addr = addr = mm->free_area_cache;
71713 + start_addr = addr = mm->free_area_cache;
71714 } else {
71715 - start_addr = addr = TASK_UNMAPPED_BASE;
71716 - mm->cached_hole_size = 0;
71717 + start_addr = addr = mm->mmap_base;
71718 + mm->cached_hole_size = 0;
71719 }
71720
71721 full_search:
71722 @@ -1399,34 +1598,40 @@ full_search:
71723 * Start a new search - just in case we missed
71724 * some holes.
71725 */
71726 - if (start_addr != TASK_UNMAPPED_BASE) {
71727 - addr = TASK_UNMAPPED_BASE;
71728 - start_addr = addr;
71729 + if (start_addr != mm->mmap_base) {
71730 + start_addr = addr = mm->mmap_base;
71731 mm->cached_hole_size = 0;
71732 goto full_search;
71733 }
71734 return -ENOMEM;
71735 }
71736 - if (!vma || addr + len <= vma->vm_start) {
71737 - /*
71738 - * Remember the place where we stopped the search:
71739 - */
71740 - mm->free_area_cache = addr + len;
71741 - return addr;
71742 - }
71743 + if (check_heap_stack_gap(vma, addr, len))
71744 + break;
71745 if (addr + mm->cached_hole_size < vma->vm_start)
71746 mm->cached_hole_size = vma->vm_start - addr;
71747 addr = vma->vm_end;
71748 }
71749 +
71750 + /*
71751 + * Remember the place where we stopped the search:
71752 + */
71753 + mm->free_area_cache = addr + len;
71754 + return addr;
71755 }
71756 #endif
71757
71758 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71759 {
71760 +
71761 +#ifdef CONFIG_PAX_SEGMEXEC
71762 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71763 + return;
71764 +#endif
71765 +
71766 /*
71767 * Is this a new hole at the lowest possible address?
71768 */
71769 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
71770 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
71771 mm->free_area_cache = addr;
71772 mm->cached_hole_size = ~0UL;
71773 }
71774 @@ -1444,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71775 {
71776 struct vm_area_struct *vma;
71777 struct mm_struct *mm = current->mm;
71778 - unsigned long addr = addr0;
71779 + unsigned long base = mm->mmap_base, addr = addr0;
71780
71781 /* requested length too big for entire address space */
71782 if (len > TASK_SIZE)
71783 @@ -1453,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71784 if (flags & MAP_FIXED)
71785 return addr;
71786
71787 +#ifdef CONFIG_PAX_RANDMMAP
71788 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71789 +#endif
71790 +
71791 /* requesting a specific address */
71792 if (addr) {
71793 addr = PAGE_ALIGN(addr);
71794 - vma = find_vma(mm, addr);
71795 - if (TASK_SIZE - len >= addr &&
71796 - (!vma || addr + len <= vma->vm_start))
71797 - return addr;
71798 + if (TASK_SIZE - len >= addr) {
71799 + vma = find_vma(mm, addr);
71800 + if (check_heap_stack_gap(vma, addr, len))
71801 + return addr;
71802 + }
71803 }
71804
71805 /* check if free_area_cache is useful for us */
71806 @@ -1474,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71807 /* make sure it can fit in the remaining address space */
71808 if (addr > len) {
71809 vma = find_vma(mm, addr-len);
71810 - if (!vma || addr <= vma->vm_start)
71811 + if (check_heap_stack_gap(vma, addr - len, len))
71812 /* remember the address as a hint for next time */
71813 return (mm->free_area_cache = addr-len);
71814 }
71815 @@ -1491,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71816 * return with success:
71817 */
71818 vma = find_vma(mm, addr);
71819 - if (!vma || addr+len <= vma->vm_start)
71820 + if (check_heap_stack_gap(vma, addr, len))
71821 /* remember the address as a hint for next time */
71822 return (mm->free_area_cache = addr);
71823
71824 @@ -1500,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71825 mm->cached_hole_size = vma->vm_start - addr;
71826
71827 /* try just below the current vma->vm_start */
71828 - addr = vma->vm_start-len;
71829 - } while (len < vma->vm_start);
71830 + addr = skip_heap_stack_gap(vma, len);
71831 + } while (!IS_ERR_VALUE(addr));
71832
71833 bottomup:
71834 /*
71835 @@ -1510,13 +1720,21 @@ bottomup:
71836 * can happen with large stack limits and large mmap()
71837 * allocations.
71838 */
71839 + mm->mmap_base = TASK_UNMAPPED_BASE;
71840 +
71841 +#ifdef CONFIG_PAX_RANDMMAP
71842 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71843 + mm->mmap_base += mm->delta_mmap;
71844 +#endif
71845 +
71846 + mm->free_area_cache = mm->mmap_base;
71847 mm->cached_hole_size = ~0UL;
71848 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71849 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71850 /*
71851 * Restore the topdown base:
71852 */
71853 - mm->free_area_cache = mm->mmap_base;
71854 + mm->mmap_base = base;
71855 + mm->free_area_cache = base;
71856 mm->cached_hole_size = ~0UL;
71857
71858 return addr;
71859 @@ -1525,6 +1743,12 @@ bottomup:
71860
71861 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71862 {
71863 +
71864 +#ifdef CONFIG_PAX_SEGMEXEC
71865 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71866 + return;
71867 +#endif
71868 +
71869 /*
71870 * Is this a new hole at the highest possible address?
71871 */
71872 @@ -1532,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71873 mm->free_area_cache = addr;
71874
71875 /* dont allow allocations above current base */
71876 - if (mm->free_area_cache > mm->mmap_base)
71877 + if (mm->free_area_cache > mm->mmap_base) {
71878 mm->free_area_cache = mm->mmap_base;
71879 + mm->cached_hole_size = ~0UL;
71880 + }
71881 }
71882
71883 unsigned long
71884 @@ -1629,6 +1855,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71885 return vma;
71886 }
71887
71888 +#ifdef CONFIG_PAX_SEGMEXEC
71889 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71890 +{
71891 + struct vm_area_struct *vma_m;
71892 +
71893 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71894 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71895 + BUG_ON(vma->vm_mirror);
71896 + return NULL;
71897 + }
71898 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71899 + vma_m = vma->vm_mirror;
71900 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71901 + BUG_ON(vma->vm_file != vma_m->vm_file);
71902 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71903 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71904 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71905 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71906 + return vma_m;
71907 +}
71908 +#endif
71909 +
71910 /*
71911 * Verify that the stack growth is acceptable and
71912 * update accounting. This is shared with both the
71913 @@ -1645,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71914 return -ENOMEM;
71915
71916 /* Stack limit test */
71917 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
71918 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71919 return -ENOMEM;
71920
71921 @@ -1655,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71922 locked = mm->locked_vm + grow;
71923 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71924 limit >>= PAGE_SHIFT;
71925 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71926 if (locked > limit && !capable(CAP_IPC_LOCK))
71927 return -ENOMEM;
71928 }
71929 @@ -1685,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71930 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71931 * vma is the last one with address > vma->vm_end. Have to extend vma.
71932 */
71933 +#ifndef CONFIG_IA64
71934 +static
71935 +#endif
71936 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71937 {
71938 int error;
71939 + bool locknext;
71940
71941 if (!(vma->vm_flags & VM_GROWSUP))
71942 return -EFAULT;
71943
71944 + /* Also guard against wrapping around to address 0. */
71945 + if (address < PAGE_ALIGN(address+1))
71946 + address = PAGE_ALIGN(address+1);
71947 + else
71948 + return -ENOMEM;
71949 +
71950 /*
71951 * We must make sure the anon_vma is allocated
71952 * so that the anon_vma locking is not a noop.
71953 */
71954 if (unlikely(anon_vma_prepare(vma)))
71955 return -ENOMEM;
71956 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71957 + if (locknext && anon_vma_prepare(vma->vm_next))
71958 + return -ENOMEM;
71959 vma_lock_anon_vma(vma);
71960 + if (locknext)
71961 + vma_lock_anon_vma(vma->vm_next);
71962
71963 /*
71964 * vma->vm_start/vm_end cannot change under us because the caller
71965 * is required to hold the mmap_sem in read mode. We need the
71966 - * anon_vma lock to serialize against concurrent expand_stacks.
71967 - * Also guard against wrapping around to address 0.
71968 + * anon_vma locks to serialize against concurrent expand_stacks
71969 + * and expand_upwards.
71970 */
71971 - if (address < PAGE_ALIGN(address+4))
71972 - address = PAGE_ALIGN(address+4);
71973 - else {
71974 - vma_unlock_anon_vma(vma);
71975 - return -ENOMEM;
71976 - }
71977 error = 0;
71978
71979 /* Somebody else might have raced and expanded it already */
71980 - if (address > vma->vm_end) {
71981 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71982 + error = -ENOMEM;
71983 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71984 unsigned long size, grow;
71985
71986 size = address - vma->vm_start;
71987 @@ -1730,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71988 }
71989 }
71990 }
71991 + if (locknext)
71992 + vma_unlock_anon_vma(vma->vm_next);
71993 vma_unlock_anon_vma(vma);
71994 khugepaged_enter_vma_merge(vma);
71995 return error;
71996 @@ -1743,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
71997 unsigned long address)
71998 {
71999 int error;
72000 + bool lockprev = false;
72001 + struct vm_area_struct *prev;
72002
72003 /*
72004 * We must make sure the anon_vma is allocated
72005 @@ -1756,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
72006 if (error)
72007 return error;
72008
72009 + prev = vma->vm_prev;
72010 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
72011 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
72012 +#endif
72013 + if (lockprev && anon_vma_prepare(prev))
72014 + return -ENOMEM;
72015 + if (lockprev)
72016 + vma_lock_anon_vma(prev);
72017 +
72018 vma_lock_anon_vma(vma);
72019
72020 /*
72021 @@ -1765,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
72022 */
72023
72024 /* Somebody else might have raced and expanded it already */
72025 - if (address < vma->vm_start) {
72026 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
72027 + error = -ENOMEM;
72028 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
72029 unsigned long size, grow;
72030
72031 +#ifdef CONFIG_PAX_SEGMEXEC
72032 + struct vm_area_struct *vma_m;
72033 +
72034 + vma_m = pax_find_mirror_vma(vma);
72035 +#endif
72036 +
72037 size = vma->vm_end - address;
72038 grow = (vma->vm_start - address) >> PAGE_SHIFT;
72039
72040 @@ -1777,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
72041 if (!error) {
72042 vma->vm_start = address;
72043 vma->vm_pgoff -= grow;
72044 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
72045 +
72046 +#ifdef CONFIG_PAX_SEGMEXEC
72047 + if (vma_m) {
72048 + vma_m->vm_start -= grow << PAGE_SHIFT;
72049 + vma_m->vm_pgoff -= grow;
72050 + }
72051 +#endif
72052 +
72053 perf_event_mmap(vma);
72054 }
72055 }
72056 }
72057 vma_unlock_anon_vma(vma);
72058 + if (lockprev)
72059 + vma_unlock_anon_vma(prev);
72060 khugepaged_enter_vma_merge(vma);
72061 return error;
72062 }
72063 @@ -1851,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
72064 do {
72065 long nrpages = vma_pages(vma);
72066
72067 +#ifdef CONFIG_PAX_SEGMEXEC
72068 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
72069 + vma = remove_vma(vma);
72070 + continue;
72071 + }
72072 +#endif
72073 +
72074 mm->total_vm -= nrpages;
72075 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
72076 vma = remove_vma(vma);
72077 @@ -1896,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
72078 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
72079 vma->vm_prev = NULL;
72080 do {
72081 +
72082 +#ifdef CONFIG_PAX_SEGMEXEC
72083 + if (vma->vm_mirror) {
72084 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
72085 + vma->vm_mirror->vm_mirror = NULL;
72086 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
72087 + vma->vm_mirror = NULL;
72088 + }
72089 +#endif
72090 +
72091 rb_erase(&vma->vm_rb, &mm->mm_rb);
72092 mm->map_count--;
72093 tail_vma = vma;
72094 @@ -1924,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72095 struct vm_area_struct *new;
72096 int err = -ENOMEM;
72097
72098 +#ifdef CONFIG_PAX_SEGMEXEC
72099 + struct vm_area_struct *vma_m, *new_m = NULL;
72100 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
72101 +#endif
72102 +
72103 if (is_vm_hugetlb_page(vma) && (addr &
72104 ~(huge_page_mask(hstate_vma(vma)))))
72105 return -EINVAL;
72106
72107 +#ifdef CONFIG_PAX_SEGMEXEC
72108 + vma_m = pax_find_mirror_vma(vma);
72109 +#endif
72110 +
72111 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72112 if (!new)
72113 goto out_err;
72114
72115 +#ifdef CONFIG_PAX_SEGMEXEC
72116 + if (vma_m) {
72117 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72118 + if (!new_m) {
72119 + kmem_cache_free(vm_area_cachep, new);
72120 + goto out_err;
72121 + }
72122 + }
72123 +#endif
72124 +
72125 /* most fields are the same, copy all, and then fixup */
72126 *new = *vma;
72127
72128 @@ -1944,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72129 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
72130 }
72131
72132 +#ifdef CONFIG_PAX_SEGMEXEC
72133 + if (vma_m) {
72134 + *new_m = *vma_m;
72135 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
72136 + new_m->vm_mirror = new;
72137 + new->vm_mirror = new_m;
72138 +
72139 + if (new_below)
72140 + new_m->vm_end = addr_m;
72141 + else {
72142 + new_m->vm_start = addr_m;
72143 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72144 + }
72145 + }
72146 +#endif
72147 +
72148 pol = mpol_dup(vma_policy(vma));
72149 if (IS_ERR(pol)) {
72150 err = PTR_ERR(pol);
72151 @@ -1969,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72152 else
72153 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
72154
72155 +#ifdef CONFIG_PAX_SEGMEXEC
72156 + if (!err && vma_m) {
72157 + if (anon_vma_clone(new_m, vma_m))
72158 + goto out_free_mpol;
72159 +
72160 + mpol_get(pol);
72161 + vma_set_policy(new_m, pol);
72162 +
72163 + if (new_m->vm_file) {
72164 + get_file(new_m->vm_file);
72165 + if (vma_m->vm_flags & VM_EXECUTABLE)
72166 + added_exe_file_vma(mm);
72167 + }
72168 +
72169 + if (new_m->vm_ops && new_m->vm_ops->open)
72170 + new_m->vm_ops->open(new_m);
72171 +
72172 + if (new_below)
72173 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
72174 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72175 + else
72176 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72177 +
72178 + if (err) {
72179 + if (new_m->vm_ops && new_m->vm_ops->close)
72180 + new_m->vm_ops->close(new_m);
72181 + if (new_m->vm_file) {
72182 + if (vma_m->vm_flags & VM_EXECUTABLE)
72183 + removed_exe_file_vma(mm);
72184 + fput(new_m->vm_file);
72185 + }
72186 + mpol_put(pol);
72187 + }
72188 + }
72189 +#endif
72190 +
72191 /* Success. */
72192 if (!err)
72193 return 0;
72194 @@ -1981,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72195 removed_exe_file_vma(mm);
72196 fput(new->vm_file);
72197 }
72198 - unlink_anon_vmas(new);
72199 out_free_mpol:
72200 mpol_put(pol);
72201 out_free_vma:
72202 +
72203 +#ifdef CONFIG_PAX_SEGMEXEC
72204 + if (new_m) {
72205 + unlink_anon_vmas(new_m);
72206 + kmem_cache_free(vm_area_cachep, new_m);
72207 + }
72208 +#endif
72209 +
72210 + unlink_anon_vmas(new);
72211 kmem_cache_free(vm_area_cachep, new);
72212 out_err:
72213 return err;
72214 @@ -1997,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72215 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72216 unsigned long addr, int new_below)
72217 {
72218 +
72219 +#ifdef CONFIG_PAX_SEGMEXEC
72220 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72221 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72222 + if (mm->map_count >= sysctl_max_map_count-1)
72223 + return -ENOMEM;
72224 + } else
72225 +#endif
72226 +
72227 if (mm->map_count >= sysctl_max_map_count)
72228 return -ENOMEM;
72229
72230 @@ -2008,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72231 * work. This now handles partial unmappings.
72232 * Jeremy Fitzhardinge <jeremy@goop.org>
72233 */
72234 +#ifdef CONFIG_PAX_SEGMEXEC
72235 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72236 {
72237 + int ret = __do_munmap(mm, start, len);
72238 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72239 + return ret;
72240 +
72241 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72242 +}
72243 +
72244 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72245 +#else
72246 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72247 +#endif
72248 +{
72249 unsigned long end;
72250 struct vm_area_struct *vma, *prev, *last;
72251
72252 + /*
72253 + * mm->mmap_sem is required to protect against another thread
72254 + * changing the mappings in case we sleep.
72255 + */
72256 + verify_mm_writelocked(mm);
72257 +
72258 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72259 return -EINVAL;
72260
72261 @@ -2087,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72262 /* Fix up all other VM information */
72263 remove_vma_list(mm, vma);
72264
72265 + track_exec_limit(mm, start, end, 0UL);
72266 +
72267 return 0;
72268 }
72269
72270 @@ -2099,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72271
72272 profile_munmap(addr);
72273
72274 +#ifdef CONFIG_PAX_SEGMEXEC
72275 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72276 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
72277 + return -EINVAL;
72278 +#endif
72279 +
72280 down_write(&mm->mmap_sem);
72281 ret = do_munmap(mm, addr, len);
72282 up_write(&mm->mmap_sem);
72283 return ret;
72284 }
72285
72286 -static inline void verify_mm_writelocked(struct mm_struct *mm)
72287 -{
72288 -#ifdef CONFIG_DEBUG_VM
72289 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72290 - WARN_ON(1);
72291 - up_read(&mm->mmap_sem);
72292 - }
72293 -#endif
72294 -}
72295 -
72296 /*
72297 * this is really a simplified "do_mmap". it only handles
72298 * anonymous maps. eventually we may be able to do some
72299 @@ -2128,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72300 struct rb_node ** rb_link, * rb_parent;
72301 pgoff_t pgoff = addr >> PAGE_SHIFT;
72302 int error;
72303 + unsigned long charged;
72304
72305 len = PAGE_ALIGN(len);
72306 if (!len)
72307 @@ -2139,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72308
72309 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72310
72311 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72312 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72313 + flags &= ~VM_EXEC;
72314 +
72315 +#ifdef CONFIG_PAX_MPROTECT
72316 + if (mm->pax_flags & MF_PAX_MPROTECT)
72317 + flags &= ~VM_MAYEXEC;
72318 +#endif
72319 +
72320 + }
72321 +#endif
72322 +
72323 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72324 if (error & ~PAGE_MASK)
72325 return error;
72326
72327 + charged = len >> PAGE_SHIFT;
72328 +
72329 /*
72330 * mlock MCL_FUTURE?
72331 */
72332 if (mm->def_flags & VM_LOCKED) {
72333 unsigned long locked, lock_limit;
72334 - locked = len >> PAGE_SHIFT;
72335 + locked = charged;
72336 locked += mm->locked_vm;
72337 lock_limit = rlimit(RLIMIT_MEMLOCK);
72338 lock_limit >>= PAGE_SHIFT;
72339 @@ -2165,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72340 /*
72341 * Clear old maps. this also does some error checking for us
72342 */
72343 - munmap_back:
72344 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72345 if (vma && vma->vm_start < addr + len) {
72346 if (do_munmap(mm, addr, len))
72347 return -ENOMEM;
72348 - goto munmap_back;
72349 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72350 + BUG_ON(vma && vma->vm_start < addr + len);
72351 }
72352
72353 /* Check against address space limits *after* clearing old maps... */
72354 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72355 + if (!may_expand_vm(mm, charged))
72356 return -ENOMEM;
72357
72358 if (mm->map_count > sysctl_max_map_count)
72359 return -ENOMEM;
72360
72361 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
72362 + if (security_vm_enough_memory(charged))
72363 return -ENOMEM;
72364
72365 /* Can we just expand an old private anonymous mapping? */
72366 @@ -2194,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72367 */
72368 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72369 if (!vma) {
72370 - vm_unacct_memory(len >> PAGE_SHIFT);
72371 + vm_unacct_memory(charged);
72372 return -ENOMEM;
72373 }
72374
72375 @@ -2208,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72376 vma_link(mm, vma, prev, rb_link, rb_parent);
72377 out:
72378 perf_event_mmap(vma);
72379 - mm->total_vm += len >> PAGE_SHIFT;
72380 + mm->total_vm += charged;
72381 if (flags & VM_LOCKED) {
72382 if (!mlock_vma_pages_range(vma, addr, addr + len))
72383 - mm->locked_vm += (len >> PAGE_SHIFT);
72384 + mm->locked_vm += charged;
72385 }
72386 + track_exec_limit(mm, addr, addr + len, flags);
72387 return addr;
72388 }
72389
72390 @@ -2259,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
72391 * Walk the list again, actually closing and freeing it,
72392 * with preemption enabled, without holding any MM locks.
72393 */
72394 - while (vma)
72395 + while (vma) {
72396 + vma->vm_mirror = NULL;
72397 vma = remove_vma(vma);
72398 + }
72399
72400 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72401 }
72402 @@ -2274,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72403 struct vm_area_struct * __vma, * prev;
72404 struct rb_node ** rb_link, * rb_parent;
72405
72406 +#ifdef CONFIG_PAX_SEGMEXEC
72407 + struct vm_area_struct *vma_m = NULL;
72408 +#endif
72409 +
72410 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72411 + return -EPERM;
72412 +
72413 /*
72414 * The vm_pgoff of a purely anonymous vma should be irrelevant
72415 * until its first write fault, when page's anon_vma and index
72416 @@ -2296,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72417 if ((vma->vm_flags & VM_ACCOUNT) &&
72418 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72419 return -ENOMEM;
72420 +
72421 +#ifdef CONFIG_PAX_SEGMEXEC
72422 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72423 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72424 + if (!vma_m)
72425 + return -ENOMEM;
72426 + }
72427 +#endif
72428 +
72429 vma_link(mm, vma, prev, rb_link, rb_parent);
72430 +
72431 +#ifdef CONFIG_PAX_SEGMEXEC
72432 + if (vma_m)
72433 + BUG_ON(pax_mirror_vma(vma_m, vma));
72434 +#endif
72435 +
72436 return 0;
72437 }
72438
72439 @@ -2315,6 +2770,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72440 struct mempolicy *pol;
72441 bool faulted_in_anon_vma = true;
72442
72443 + BUG_ON(vma->vm_mirror);
72444 +
72445 /*
72446 * If anonymous vma has not yet been faulted, update new pgoff
72447 * to match new location, to increase its chance of merging.
72448 @@ -2382,6 +2839,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72449 return NULL;
72450 }
72451
72452 +#ifdef CONFIG_PAX_SEGMEXEC
72453 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
72454 +{
72455 + struct vm_area_struct *prev_m;
72456 + struct rb_node **rb_link_m, *rb_parent_m;
72457 + struct mempolicy *pol_m;
72458 +
72459 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72460 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72461 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72462 + *vma_m = *vma;
72463 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72464 + if (anon_vma_clone(vma_m, vma))
72465 + return -ENOMEM;
72466 + pol_m = vma_policy(vma_m);
72467 + mpol_get(pol_m);
72468 + vma_set_policy(vma_m, pol_m);
72469 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72470 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72471 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72472 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72473 + if (vma_m->vm_file)
72474 + get_file(vma_m->vm_file);
72475 + if (vma_m->vm_ops && vma_m->vm_ops->open)
72476 + vma_m->vm_ops->open(vma_m);
72477 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72478 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72479 + vma_m->vm_mirror = vma;
72480 + vma->vm_mirror = vma_m;
72481 + return 0;
72482 +}
72483 +#endif
72484 +
72485 /*
72486 * Return true if the calling process may expand its vm space by the passed
72487 * number of pages
72488 @@ -2393,6 +2883,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
72489
72490 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
72491
72492 +#ifdef CONFIG_PAX_RANDMMAP
72493 + if (mm->pax_flags & MF_PAX_RANDMMAP)
72494 + cur -= mm->brk_gap;
72495 +#endif
72496 +
72497 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72498 if (cur + npages > lim)
72499 return 0;
72500 return 1;
72501 @@ -2463,6 +2959,22 @@ int install_special_mapping(struct mm_struct *mm,
72502 vma->vm_start = addr;
72503 vma->vm_end = addr + len;
72504
72505 +#ifdef CONFIG_PAX_MPROTECT
72506 + if (mm->pax_flags & MF_PAX_MPROTECT) {
72507 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
72508 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72509 + return -EPERM;
72510 + if (!(vm_flags & VM_EXEC))
72511 + vm_flags &= ~VM_MAYEXEC;
72512 +#else
72513 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72514 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72515 +#endif
72516 + else
72517 + vm_flags &= ~VM_MAYWRITE;
72518 + }
72519 +#endif
72520 +
72521 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72522 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72523
72524 diff --git a/mm/mprotect.c b/mm/mprotect.c
72525 index f437d05..e3763f6 100644
72526 --- a/mm/mprotect.c
72527 +++ b/mm/mprotect.c
72528 @@ -23,10 +23,16 @@
72529 #include <linux/mmu_notifier.h>
72530 #include <linux/migrate.h>
72531 #include <linux/perf_event.h>
72532 +
72533 +#ifdef CONFIG_PAX_MPROTECT
72534 +#include <linux/elf.h>
72535 +#endif
72536 +
72537 #include <asm/uaccess.h>
72538 #include <asm/pgtable.h>
72539 #include <asm/cacheflush.h>
72540 #include <asm/tlbflush.h>
72541 +#include <asm/mmu_context.h>
72542
72543 #ifndef pgprot_modify
72544 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
72545 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
72546 flush_tlb_range(vma, start, end);
72547 }
72548
72549 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72550 +/* called while holding the mmap semaphor for writing except stack expansion */
72551 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72552 +{
72553 + unsigned long oldlimit, newlimit = 0UL;
72554 +
72555 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
72556 + return;
72557 +
72558 + spin_lock(&mm->page_table_lock);
72559 + oldlimit = mm->context.user_cs_limit;
72560 + if ((prot & VM_EXEC) && oldlimit < end)
72561 + /* USER_CS limit moved up */
72562 + newlimit = end;
72563 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72564 + /* USER_CS limit moved down */
72565 + newlimit = start;
72566 +
72567 + if (newlimit) {
72568 + mm->context.user_cs_limit = newlimit;
72569 +
72570 +#ifdef CONFIG_SMP
72571 + wmb();
72572 + cpus_clear(mm->context.cpu_user_cs_mask);
72573 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72574 +#endif
72575 +
72576 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72577 + }
72578 + spin_unlock(&mm->page_table_lock);
72579 + if (newlimit == end) {
72580 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
72581 +
72582 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
72583 + if (is_vm_hugetlb_page(vma))
72584 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72585 + else
72586 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72587 + }
72588 +}
72589 +#endif
72590 +
72591 int
72592 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72593 unsigned long start, unsigned long end, unsigned long newflags)
72594 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72595 int error;
72596 int dirty_accountable = 0;
72597
72598 +#ifdef CONFIG_PAX_SEGMEXEC
72599 + struct vm_area_struct *vma_m = NULL;
72600 + unsigned long start_m, end_m;
72601 +
72602 + start_m = start + SEGMEXEC_TASK_SIZE;
72603 + end_m = end + SEGMEXEC_TASK_SIZE;
72604 +#endif
72605 +
72606 if (newflags == oldflags) {
72607 *pprev = vma;
72608 return 0;
72609 }
72610
72611 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72612 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72613 +
72614 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72615 + return -ENOMEM;
72616 +
72617 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72618 + return -ENOMEM;
72619 + }
72620 +
72621 /*
72622 * If we make a private mapping writable we increase our commit;
72623 * but (without finer accounting) cannot reduce our commit if we
72624 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72625 }
72626 }
72627
72628 +#ifdef CONFIG_PAX_SEGMEXEC
72629 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72630 + if (start != vma->vm_start) {
72631 + error = split_vma(mm, vma, start, 1);
72632 + if (error)
72633 + goto fail;
72634 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72635 + *pprev = (*pprev)->vm_next;
72636 + }
72637 +
72638 + if (end != vma->vm_end) {
72639 + error = split_vma(mm, vma, end, 0);
72640 + if (error)
72641 + goto fail;
72642 + }
72643 +
72644 + if (pax_find_mirror_vma(vma)) {
72645 + error = __do_munmap(mm, start_m, end_m - start_m);
72646 + if (error)
72647 + goto fail;
72648 + } else {
72649 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72650 + if (!vma_m) {
72651 + error = -ENOMEM;
72652 + goto fail;
72653 + }
72654 + vma->vm_flags = newflags;
72655 + error = pax_mirror_vma(vma_m, vma);
72656 + if (error) {
72657 + vma->vm_flags = oldflags;
72658 + goto fail;
72659 + }
72660 + }
72661 + }
72662 +#endif
72663 +
72664 /*
72665 * First try to merge with previous and/or next vma.
72666 */
72667 @@ -204,9 +306,21 @@ success:
72668 * vm_flags and vm_page_prot are protected by the mmap_sem
72669 * held in write mode.
72670 */
72671 +
72672 +#ifdef CONFIG_PAX_SEGMEXEC
72673 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72674 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72675 +#endif
72676 +
72677 vma->vm_flags = newflags;
72678 +
72679 +#ifdef CONFIG_PAX_MPROTECT
72680 + if (mm->binfmt && mm->binfmt->handle_mprotect)
72681 + mm->binfmt->handle_mprotect(vma, newflags);
72682 +#endif
72683 +
72684 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72685 - vm_get_page_prot(newflags));
72686 + vm_get_page_prot(vma->vm_flags));
72687
72688 if (vma_wants_writenotify(vma)) {
72689 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72690 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72691 end = start + len;
72692 if (end <= start)
72693 return -ENOMEM;
72694 +
72695 +#ifdef CONFIG_PAX_SEGMEXEC
72696 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72697 + if (end > SEGMEXEC_TASK_SIZE)
72698 + return -EINVAL;
72699 + } else
72700 +#endif
72701 +
72702 + if (end > TASK_SIZE)
72703 + return -EINVAL;
72704 +
72705 if (!arch_validate_prot(prot))
72706 return -EINVAL;
72707
72708 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72709 /*
72710 * Does the application expect PROT_READ to imply PROT_EXEC:
72711 */
72712 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72713 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72714 prot |= PROT_EXEC;
72715
72716 vm_flags = calc_vm_prot_bits(prot);
72717 @@ -288,6 +413,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72718 if (start > vma->vm_start)
72719 prev = vma;
72720
72721 +#ifdef CONFIG_PAX_MPROTECT
72722 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72723 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
72724 +#endif
72725 +
72726 for (nstart = start ; ; ) {
72727 unsigned long newflags;
72728
72729 @@ -297,6 +427,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72730
72731 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72732 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72733 + if (prot & (PROT_WRITE | PROT_EXEC))
72734 + gr_log_rwxmprotect(vma->vm_file);
72735 +
72736 + error = -EACCES;
72737 + goto out;
72738 + }
72739 +
72740 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72741 error = -EACCES;
72742 goto out;
72743 }
72744 @@ -311,6 +449,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72745 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72746 if (error)
72747 goto out;
72748 +
72749 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72750 +
72751 nstart = tmp;
72752
72753 if (nstart < prev->vm_end)
72754 diff --git a/mm/mremap.c b/mm/mremap.c
72755 index 87bb839..c3bfadb 100644
72756 --- a/mm/mremap.c
72757 +++ b/mm/mremap.c
72758 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72759 continue;
72760 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72761 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72762 +
72763 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72764 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72765 + pte = pte_exprotect(pte);
72766 +#endif
72767 +
72768 set_pte_at(mm, new_addr, new_pte, pte);
72769 }
72770
72771 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72772 if (is_vm_hugetlb_page(vma))
72773 goto Einval;
72774
72775 +#ifdef CONFIG_PAX_SEGMEXEC
72776 + if (pax_find_mirror_vma(vma))
72777 + goto Einval;
72778 +#endif
72779 +
72780 /* We can't remap across vm area boundaries */
72781 if (old_len > vma->vm_end - addr)
72782 goto Efault;
72783 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72784 unsigned long ret = -EINVAL;
72785 unsigned long charged = 0;
72786 unsigned long map_flags;
72787 + unsigned long pax_task_size = TASK_SIZE;
72788
72789 if (new_addr & ~PAGE_MASK)
72790 goto out;
72791
72792 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72793 +#ifdef CONFIG_PAX_SEGMEXEC
72794 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72795 + pax_task_size = SEGMEXEC_TASK_SIZE;
72796 +#endif
72797 +
72798 + pax_task_size -= PAGE_SIZE;
72799 +
72800 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72801 goto out;
72802
72803 /* Check if the location we're moving into overlaps the
72804 * old location at all, and fail if it does.
72805 */
72806 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72807 - goto out;
72808 -
72809 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72810 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72811 goto out;
72812
72813 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72814 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72815 struct vm_area_struct *vma;
72816 unsigned long ret = -EINVAL;
72817 unsigned long charged = 0;
72818 + unsigned long pax_task_size = TASK_SIZE;
72819
72820 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72821 goto out;
72822 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72823 if (!new_len)
72824 goto out;
72825
72826 +#ifdef CONFIG_PAX_SEGMEXEC
72827 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72828 + pax_task_size = SEGMEXEC_TASK_SIZE;
72829 +#endif
72830 +
72831 + pax_task_size -= PAGE_SIZE;
72832 +
72833 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72834 + old_len > pax_task_size || addr > pax_task_size-old_len)
72835 + goto out;
72836 +
72837 if (flags & MREMAP_FIXED) {
72838 if (flags & MREMAP_MAYMOVE)
72839 ret = mremap_to(addr, old_len, new_addr, new_len);
72840 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72841 addr + new_len);
72842 }
72843 ret = addr;
72844 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72845 goto out;
72846 }
72847 }
72848 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72849 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72850 if (ret)
72851 goto out;
72852 +
72853 + map_flags = vma->vm_flags;
72854 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72855 + if (!(ret & ~PAGE_MASK)) {
72856 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72857 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72858 + }
72859 }
72860 out:
72861 if (ret & ~PAGE_MASK)
72862 diff --git a/mm/nommu.c b/mm/nommu.c
72863 index f59e170..34e2a2b 100644
72864 --- a/mm/nommu.c
72865 +++ b/mm/nommu.c
72866 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72867 int sysctl_overcommit_ratio = 50; /* default is 50% */
72868 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72869 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72870 -int heap_stack_gap = 0;
72871
72872 atomic_long_t mmap_pages_allocated;
72873
72874 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72875 EXPORT_SYMBOL(find_vma);
72876
72877 /*
72878 - * find a VMA
72879 - * - we don't extend stack VMAs under NOMMU conditions
72880 - */
72881 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72882 -{
72883 - return find_vma(mm, addr);
72884 -}
72885 -
72886 -/*
72887 * expand a stack to a given address
72888 * - not supported under NOMMU conditions
72889 */
72890 @@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72891
72892 /* most fields are the same, copy all, and then fixup */
72893 *new = *vma;
72894 + INIT_LIST_HEAD(&new->anon_vma_chain);
72895 *region = *vma->vm_region;
72896 new->vm_region = region;
72897
72898 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72899 index a13ded1..b949d15 100644
72900 --- a/mm/page_alloc.c
72901 +++ b/mm/page_alloc.c
72902 @@ -335,7 +335,7 @@ out:
72903 * This usage means that zero-order pages may not be compound.
72904 */
72905
72906 -static void free_compound_page(struct page *page)
72907 +void free_compound_page(struct page *page)
72908 {
72909 __free_pages_ok(page, compound_order(page));
72910 }
72911 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72912 int i;
72913 int bad = 0;
72914
72915 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72916 + unsigned long index = 1UL << order;
72917 +#endif
72918 +
72919 trace_mm_page_free(page, order);
72920 kmemcheck_free_shadow(page, order);
72921
72922 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72923 debug_check_no_obj_freed(page_address(page),
72924 PAGE_SIZE << order);
72925 }
72926 +
72927 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72928 + for (; index; --index)
72929 + sanitize_highpage(page + index - 1);
72930 +#endif
72931 +
72932 arch_free_page(page, order);
72933 kernel_map_pages(page, 1 << order, 0);
72934
72935 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72936 arch_alloc_page(page, order);
72937 kernel_map_pages(page, 1 << order, 1);
72938
72939 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
72940 if (gfp_flags & __GFP_ZERO)
72941 prep_zero_page(page, order, gfp_flags);
72942 +#endif
72943
72944 if (order && (gfp_flags & __GFP_COMP))
72945 prep_compound_page(page, order);
72946 @@ -3468,7 +3480,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72947 unsigned long pfn;
72948
72949 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72950 +#ifdef CONFIG_X86_32
72951 + /* boot failures in VMware 8 on 32bit vanilla since
72952 + this change */
72953 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72954 +#else
72955 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72956 +#endif
72957 return 1;
72958 }
72959 return 0;
72960 diff --git a/mm/percpu.c b/mm/percpu.c
72961 index 317de23..1ba7776 100644
72962 --- a/mm/percpu.c
72963 +++ b/mm/percpu.c
72964 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
72965 static unsigned int pcpu_high_unit_cpu __read_mostly;
72966
72967 /* the address of the first chunk which starts with the kernel static area */
72968 -void *pcpu_base_addr __read_mostly;
72969 +void *pcpu_base_addr __read_only;
72970 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72971
72972 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72973 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
72974 index c20ff48..137702a 100644
72975 --- a/mm/process_vm_access.c
72976 +++ b/mm/process_vm_access.c
72977 @@ -13,6 +13,7 @@
72978 #include <linux/uio.h>
72979 #include <linux/sched.h>
72980 #include <linux/highmem.h>
72981 +#include <linux/security.h>
72982 #include <linux/ptrace.h>
72983 #include <linux/slab.h>
72984 #include <linux/syscalls.h>
72985 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72986 size_t iov_l_curr_offset = 0;
72987 ssize_t iov_len;
72988
72989 + return -ENOSYS; // PaX: until properly audited
72990 +
72991 /*
72992 * Work out how many pages of struct pages we're going to need
72993 * when eventually calling get_user_pages
72994 */
72995 for (i = 0; i < riovcnt; i++) {
72996 iov_len = rvec[i].iov_len;
72997 - if (iov_len > 0) {
72998 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
72999 - + iov_len)
73000 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
73001 - / PAGE_SIZE + 1;
73002 - nr_pages = max(nr_pages, nr_pages_iov);
73003 - }
73004 + if (iov_len <= 0)
73005 + continue;
73006 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
73007 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
73008 + nr_pages = max(nr_pages, nr_pages_iov);
73009 }
73010
73011 if (nr_pages == 0)
73012 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73013 goto free_proc_pages;
73014 }
73015
73016 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
73017 + rc = -EPERM;
73018 + goto put_task_struct;
73019 + }
73020 +
73021 mm = mm_access(task, PTRACE_MODE_ATTACH);
73022 if (!mm || IS_ERR(mm)) {
73023 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
73024 diff --git a/mm/rmap.c b/mm/rmap.c
73025 index c8454e0..b04f3a2 100644
73026 --- a/mm/rmap.c
73027 +++ b/mm/rmap.c
73028 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73029 struct anon_vma *anon_vma = vma->anon_vma;
73030 struct anon_vma_chain *avc;
73031
73032 +#ifdef CONFIG_PAX_SEGMEXEC
73033 + struct anon_vma_chain *avc_m = NULL;
73034 +#endif
73035 +
73036 might_sleep();
73037 if (unlikely(!anon_vma)) {
73038 struct mm_struct *mm = vma->vm_mm;
73039 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73040 if (!avc)
73041 goto out_enomem;
73042
73043 +#ifdef CONFIG_PAX_SEGMEXEC
73044 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
73045 + if (!avc_m)
73046 + goto out_enomem_free_avc;
73047 +#endif
73048 +
73049 anon_vma = find_mergeable_anon_vma(vma);
73050 allocated = NULL;
73051 if (!anon_vma) {
73052 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73053 /* page_table_lock to protect against threads */
73054 spin_lock(&mm->page_table_lock);
73055 if (likely(!vma->anon_vma)) {
73056 +
73057 +#ifdef CONFIG_PAX_SEGMEXEC
73058 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
73059 +
73060 + if (vma_m) {
73061 + BUG_ON(vma_m->anon_vma);
73062 + vma_m->anon_vma = anon_vma;
73063 + avc_m->anon_vma = anon_vma;
73064 + avc_m->vma = vma;
73065 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
73066 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
73067 + avc_m = NULL;
73068 + }
73069 +#endif
73070 +
73071 vma->anon_vma = anon_vma;
73072 avc->anon_vma = anon_vma;
73073 avc->vma = vma;
73074 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73075
73076 if (unlikely(allocated))
73077 put_anon_vma(allocated);
73078 +
73079 +#ifdef CONFIG_PAX_SEGMEXEC
73080 + if (unlikely(avc_m))
73081 + anon_vma_chain_free(avc_m);
73082 +#endif
73083 +
73084 if (unlikely(avc))
73085 anon_vma_chain_free(avc);
73086 }
73087 return 0;
73088
73089 out_enomem_free_avc:
73090 +
73091 +#ifdef CONFIG_PAX_SEGMEXEC
73092 + if (avc_m)
73093 + anon_vma_chain_free(avc_m);
73094 +#endif
73095 +
73096 anon_vma_chain_free(avc);
73097 out_enomem:
73098 return -ENOMEM;
73099 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
73100 * Attach the anon_vmas from src to dst.
73101 * Returns 0 on success, -ENOMEM on failure.
73102 */
73103 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73104 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
73105 {
73106 struct anon_vma_chain *avc, *pavc;
73107 struct anon_vma *root = NULL;
73108 @@ -321,7 +358,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
73109 * the corresponding VMA in the parent process is attached to.
73110 * Returns 0 on success, non-zero on failure.
73111 */
73112 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
73113 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
73114 {
73115 struct anon_vma_chain *avc;
73116 struct anon_vma *anon_vma;
73117 diff --git a/mm/shmem.c b/mm/shmem.c
73118 index 269d049..a9d2b50 100644
73119 --- a/mm/shmem.c
73120 +++ b/mm/shmem.c
73121 @@ -31,7 +31,7 @@
73122 #include <linux/export.h>
73123 #include <linux/swap.h>
73124
73125 -static struct vfsmount *shm_mnt;
73126 +struct vfsmount *shm_mnt;
73127
73128 #ifdef CONFIG_SHMEM
73129 /*
73130 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
73131 #define BOGO_DIRENT_SIZE 20
73132
73133 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
73134 -#define SHORT_SYMLINK_LEN 128
73135 +#define SHORT_SYMLINK_LEN 64
73136
73137 struct shmem_xattr {
73138 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
73139 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
73140 int err = -ENOMEM;
73141
73142 /* Round up to L1_CACHE_BYTES to resist false sharing */
73143 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73144 - L1_CACHE_BYTES), GFP_KERNEL);
73145 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73146 if (!sbinfo)
73147 return -ENOMEM;
73148
73149 diff --git a/mm/slab.c b/mm/slab.c
73150 index f0bd785..348b96a 100644
73151 --- a/mm/slab.c
73152 +++ b/mm/slab.c
73153 @@ -153,7 +153,7 @@
73154
73155 /* Legal flag mask for kmem_cache_create(). */
73156 #if DEBUG
73157 -# define CREATE_MASK (SLAB_RED_ZONE | \
73158 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73159 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73160 SLAB_CACHE_DMA | \
73161 SLAB_STORE_USER | \
73162 @@ -161,7 +161,7 @@
73163 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73164 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73165 #else
73166 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73167 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73168 SLAB_CACHE_DMA | \
73169 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73170 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73171 @@ -290,7 +290,7 @@ struct kmem_list3 {
73172 * Need this for bootstrapping a per node allocator.
73173 */
73174 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
73175 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73176 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
73177 #define CACHE_CACHE 0
73178 #define SIZE_AC MAX_NUMNODES
73179 #define SIZE_L3 (2 * MAX_NUMNODES)
73180 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
73181 if ((x)->max_freeable < i) \
73182 (x)->max_freeable = i; \
73183 } while (0)
73184 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73185 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73186 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73187 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73188 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73189 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73190 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73191 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73192 #else
73193 #define STATS_INC_ACTIVE(x) do { } while (0)
73194 #define STATS_DEC_ACTIVE(x) do { } while (0)
73195 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
73196 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73197 */
73198 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73199 - const struct slab *slab, void *obj)
73200 + const struct slab *slab, const void *obj)
73201 {
73202 u32 offset = (obj - slab->s_mem);
73203 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
73204 @@ -568,7 +568,7 @@ struct cache_names {
73205 static struct cache_names __initdata cache_names[] = {
73206 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73207 #include <linux/kmalloc_sizes.h>
73208 - {NULL,}
73209 + {NULL}
73210 #undef CACHE
73211 };
73212
73213 @@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
73214 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73215 sizes[INDEX_AC].cs_size,
73216 ARCH_KMALLOC_MINALIGN,
73217 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73218 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73219 NULL);
73220
73221 if (INDEX_AC != INDEX_L3) {
73222 @@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
73223 kmem_cache_create(names[INDEX_L3].name,
73224 sizes[INDEX_L3].cs_size,
73225 ARCH_KMALLOC_MINALIGN,
73226 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73227 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73228 NULL);
73229 }
73230
73231 @@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
73232 sizes->cs_cachep = kmem_cache_create(names->name,
73233 sizes->cs_size,
73234 ARCH_KMALLOC_MINALIGN,
73235 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73236 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73237 NULL);
73238 }
73239 #ifdef CONFIG_ZONE_DMA
73240 @@ -4339,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
73241 }
73242 /* cpu stats */
73243 {
73244 - unsigned long allochit = atomic_read(&cachep->allochit);
73245 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73246 - unsigned long freehit = atomic_read(&cachep->freehit);
73247 - unsigned long freemiss = atomic_read(&cachep->freemiss);
73248 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73249 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73250 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73251 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73252
73253 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73254 allochit, allocmiss, freehit, freemiss);
73255 @@ -4601,13 +4601,62 @@ static int __init slab_proc_init(void)
73256 {
73257 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
73258 #ifdef CONFIG_DEBUG_SLAB_LEAK
73259 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73260 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
73261 #endif
73262 return 0;
73263 }
73264 module_init(slab_proc_init);
73265 #endif
73266
73267 +void check_object_size(const void *ptr, unsigned long n, bool to)
73268 +{
73269 +
73270 +#ifdef CONFIG_PAX_USERCOPY
73271 + struct page *page;
73272 + struct kmem_cache *cachep = NULL;
73273 + struct slab *slabp;
73274 + unsigned int objnr;
73275 + unsigned long offset;
73276 + const char *type;
73277 +
73278 + if (!n)
73279 + return;
73280 +
73281 + type = "<null>";
73282 + if (ZERO_OR_NULL_PTR(ptr))
73283 + goto report;
73284 +
73285 + if (!virt_addr_valid(ptr))
73286 + return;
73287 +
73288 + page = virt_to_head_page(ptr);
73289 +
73290 + type = "<process stack>";
73291 + if (!PageSlab(page)) {
73292 + if (object_is_on_stack(ptr, n) == -1)
73293 + goto report;
73294 + return;
73295 + }
73296 +
73297 + cachep = page_get_cache(page);
73298 + type = cachep->name;
73299 + if (!(cachep->flags & SLAB_USERCOPY))
73300 + goto report;
73301 +
73302 + slabp = page_get_slab(page);
73303 + objnr = obj_to_index(cachep, slabp, ptr);
73304 + BUG_ON(objnr >= cachep->num);
73305 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73306 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73307 + return;
73308 +
73309 +report:
73310 + pax_report_usercopy(ptr, n, to, type);
73311 +#endif
73312 +
73313 +}
73314 +EXPORT_SYMBOL(check_object_size);
73315 +
73316 /**
73317 * ksize - get the actual amount of memory allocated for a given object
73318 * @objp: Pointer to the object
73319 diff --git a/mm/slob.c b/mm/slob.c
73320 index 8105be4..e045f96 100644
73321 --- a/mm/slob.c
73322 +++ b/mm/slob.c
73323 @@ -29,7 +29,7 @@
73324 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73325 * alloc_pages() directly, allocating compound pages so the page order
73326 * does not have to be separately tracked, and also stores the exact
73327 - * allocation size in page->private so that it can be used to accurately
73328 + * allocation size in slob_page->size so that it can be used to accurately
73329 * provide ksize(). These objects are detected in kfree() because slob_page()
73330 * is false for them.
73331 *
73332 @@ -58,6 +58,7 @@
73333 */
73334
73335 #include <linux/kernel.h>
73336 +#include <linux/sched.h>
73337 #include <linux/slab.h>
73338 #include <linux/mm.h>
73339 #include <linux/swap.h> /* struct reclaim_state */
73340 @@ -102,7 +103,8 @@ struct slob_page {
73341 unsigned long flags; /* mandatory */
73342 atomic_t _count; /* mandatory */
73343 slobidx_t units; /* free units left in page */
73344 - unsigned long pad[2];
73345 + unsigned long pad[1];
73346 + unsigned long size; /* size when >=PAGE_SIZE */
73347 slob_t *free; /* first free slob_t in page */
73348 struct list_head list; /* linked list of free pages */
73349 };
73350 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
73351 */
73352 static inline int is_slob_page(struct slob_page *sp)
73353 {
73354 - return PageSlab((struct page *)sp);
73355 + return PageSlab((struct page *)sp) && !sp->size;
73356 }
73357
73358 static inline void set_slob_page(struct slob_page *sp)
73359 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
73360
73361 static inline struct slob_page *slob_page(const void *addr)
73362 {
73363 - return (struct slob_page *)virt_to_page(addr);
73364 + return (struct slob_page *)virt_to_head_page(addr);
73365 }
73366
73367 /*
73368 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73369 /*
73370 * Return the size of a slob block.
73371 */
73372 -static slobidx_t slob_units(slob_t *s)
73373 +static slobidx_t slob_units(const slob_t *s)
73374 {
73375 if (s->units > 0)
73376 return s->units;
73377 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
73378 /*
73379 * Return the next free slob block pointer after this one.
73380 */
73381 -static slob_t *slob_next(slob_t *s)
73382 +static slob_t *slob_next(const slob_t *s)
73383 {
73384 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73385 slobidx_t next;
73386 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
73387 /*
73388 * Returns true if s is the last free block in its page.
73389 */
73390 -static int slob_last(slob_t *s)
73391 +static int slob_last(const slob_t *s)
73392 {
73393 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73394 }
73395 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
73396 if (!page)
73397 return NULL;
73398
73399 + set_slob_page(page);
73400 return page_address(page);
73401 }
73402
73403 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
73404 if (!b)
73405 return NULL;
73406 sp = slob_page(b);
73407 - set_slob_page(sp);
73408
73409 spin_lock_irqsave(&slob_lock, flags);
73410 sp->units = SLOB_UNITS(PAGE_SIZE);
73411 sp->free = b;
73412 + sp->size = 0;
73413 INIT_LIST_HEAD(&sp->list);
73414 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73415 set_slob_page_free(sp, slob_list);
73416 @@ -476,10 +479,9 @@ out:
73417 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73418 */
73419
73420 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73421 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73422 {
73423 - unsigned int *m;
73424 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73425 + slob_t *m;
73426 void *ret;
73427
73428 gfp &= gfp_allowed_mask;
73429 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73430
73431 if (!m)
73432 return NULL;
73433 - *m = size;
73434 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73435 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73436 + m[0].units = size;
73437 + m[1].units = align;
73438 ret = (void *)m + align;
73439
73440 trace_kmalloc_node(_RET_IP_, ret,
73441 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73442 gfp |= __GFP_COMP;
73443 ret = slob_new_pages(gfp, order, node);
73444 if (ret) {
73445 - struct page *page;
73446 - page = virt_to_page(ret);
73447 - page->private = size;
73448 + struct slob_page *sp;
73449 + sp = slob_page(ret);
73450 + sp->size = size;
73451 }
73452
73453 trace_kmalloc_node(_RET_IP_, ret,
73454 size, PAGE_SIZE << order, gfp, node);
73455 }
73456
73457 - kmemleak_alloc(ret, size, 1, gfp);
73458 + return ret;
73459 +}
73460 +
73461 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73462 +{
73463 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73464 + void *ret = __kmalloc_node_align(size, gfp, node, align);
73465 +
73466 + if (!ZERO_OR_NULL_PTR(ret))
73467 + kmemleak_alloc(ret, size, 1, gfp);
73468 return ret;
73469 }
73470 EXPORT_SYMBOL(__kmalloc_node);
73471 @@ -533,13 +547,92 @@ void kfree(const void *block)
73472 sp = slob_page(block);
73473 if (is_slob_page(sp)) {
73474 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73475 - unsigned int *m = (unsigned int *)(block - align);
73476 - slob_free(m, *m + align);
73477 - } else
73478 + slob_t *m = (slob_t *)(block - align);
73479 + slob_free(m, m[0].units + align);
73480 + } else {
73481 + clear_slob_page(sp);
73482 + free_slob_page(sp);
73483 + sp->size = 0;
73484 put_page(&sp->page);
73485 + }
73486 }
73487 EXPORT_SYMBOL(kfree);
73488
73489 +void check_object_size(const void *ptr, unsigned long n, bool to)
73490 +{
73491 +
73492 +#ifdef CONFIG_PAX_USERCOPY
73493 + struct slob_page *sp;
73494 + const slob_t *free;
73495 + const void *base;
73496 + unsigned long flags;
73497 + const char *type;
73498 +
73499 + if (!n)
73500 + return;
73501 +
73502 + type = "<null>";
73503 + if (ZERO_OR_NULL_PTR(ptr))
73504 + goto report;
73505 +
73506 + if (!virt_addr_valid(ptr))
73507 + return;
73508 +
73509 + type = "<process stack>";
73510 + sp = slob_page(ptr);
73511 + if (!PageSlab((struct page *)sp)) {
73512 + if (object_is_on_stack(ptr, n) == -1)
73513 + goto report;
73514 + return;
73515 + }
73516 +
73517 + type = "<slob>";
73518 + if (sp->size) {
73519 + base = page_address(&sp->page);
73520 + if (base <= ptr && n <= sp->size - (ptr - base))
73521 + return;
73522 + goto report;
73523 + }
73524 +
73525 + /* some tricky double walking to find the chunk */
73526 + spin_lock_irqsave(&slob_lock, flags);
73527 + base = (void *)((unsigned long)ptr & PAGE_MASK);
73528 + free = sp->free;
73529 +
73530 + while (!slob_last(free) && (void *)free <= ptr) {
73531 + base = free + slob_units(free);
73532 + free = slob_next(free);
73533 + }
73534 +
73535 + while (base < (void *)free) {
73536 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73537 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
73538 + int offset;
73539 +
73540 + if (ptr < base + align)
73541 + break;
73542 +
73543 + offset = ptr - base - align;
73544 + if (offset >= m) {
73545 + base += size;
73546 + continue;
73547 + }
73548 +
73549 + if (n > m - offset)
73550 + break;
73551 +
73552 + spin_unlock_irqrestore(&slob_lock, flags);
73553 + return;
73554 + }
73555 +
73556 + spin_unlock_irqrestore(&slob_lock, flags);
73557 +report:
73558 + pax_report_usercopy(ptr, n, to, type);
73559 +#endif
73560 +
73561 +}
73562 +EXPORT_SYMBOL(check_object_size);
73563 +
73564 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73565 size_t ksize(const void *block)
73566 {
73567 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
73568 sp = slob_page(block);
73569 if (is_slob_page(sp)) {
73570 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73571 - unsigned int *m = (unsigned int *)(block - align);
73572 - return SLOB_UNITS(*m) * SLOB_UNIT;
73573 + slob_t *m = (slob_t *)(block - align);
73574 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73575 } else
73576 - return sp->page.private;
73577 + return sp->size;
73578 }
73579 EXPORT_SYMBOL(ksize);
73580
73581 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73582 {
73583 struct kmem_cache *c;
73584
73585 +#ifdef CONFIG_PAX_USERCOPY
73586 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
73587 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73588 +#else
73589 c = slob_alloc(sizeof(struct kmem_cache),
73590 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73591 +#endif
73592
73593 if (c) {
73594 c->name = name;
73595 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73596
73597 lockdep_trace_alloc(flags);
73598
73599 +#ifdef CONFIG_PAX_USERCOPY
73600 + b = __kmalloc_node_align(c->size, flags, node, c->align);
73601 +#else
73602 if (c->size < PAGE_SIZE) {
73603 b = slob_alloc(c->size, flags, c->align, node);
73604 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73605 SLOB_UNITS(c->size) * SLOB_UNIT,
73606 flags, node);
73607 } else {
73608 + struct slob_page *sp;
73609 +
73610 b = slob_new_pages(flags, get_order(c->size), node);
73611 + sp = slob_page(b);
73612 + sp->size = c->size;
73613 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73614 PAGE_SIZE << get_order(c->size),
73615 flags, node);
73616 }
73617 +#endif
73618
73619 if (c->ctor)
73620 c->ctor(b);
73621 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73622
73623 static void __kmem_cache_free(void *b, int size)
73624 {
73625 - if (size < PAGE_SIZE)
73626 + struct slob_page *sp = slob_page(b);
73627 +
73628 + if (is_slob_page(sp))
73629 slob_free(b, size);
73630 - else
73631 + else {
73632 + clear_slob_page(sp);
73633 + free_slob_page(sp);
73634 + sp->size = 0;
73635 slob_free_pages(b, get_order(size));
73636 + }
73637 }
73638
73639 static void kmem_rcu_free(struct rcu_head *head)
73640 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73641
73642 void kmem_cache_free(struct kmem_cache *c, void *b)
73643 {
73644 + int size = c->size;
73645 +
73646 +#ifdef CONFIG_PAX_USERCOPY
73647 + if (size + c->align < PAGE_SIZE) {
73648 + size += c->align;
73649 + b -= c->align;
73650 + }
73651 +#endif
73652 +
73653 kmemleak_free_recursive(b, c->flags);
73654 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73655 struct slob_rcu *slob_rcu;
73656 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73657 - slob_rcu->size = c->size;
73658 + slob_rcu = b + (size - sizeof(struct slob_rcu));
73659 + slob_rcu->size = size;
73660 call_rcu(&slob_rcu->head, kmem_rcu_free);
73661 } else {
73662 - __kmem_cache_free(b, c->size);
73663 + __kmem_cache_free(b, size);
73664 }
73665
73666 +#ifdef CONFIG_PAX_USERCOPY
73667 + trace_kfree(_RET_IP_, b);
73668 +#else
73669 trace_kmem_cache_free(_RET_IP_, b);
73670 +#endif
73671 +
73672 }
73673 EXPORT_SYMBOL(kmem_cache_free);
73674
73675 diff --git a/mm/slub.c b/mm/slub.c
73676 index 0342a5d..8180ae9 100644
73677 --- a/mm/slub.c
73678 +++ b/mm/slub.c
73679 @@ -208,7 +208,7 @@ struct track {
73680
73681 enum track_item { TRACK_ALLOC, TRACK_FREE };
73682
73683 -#ifdef CONFIG_SYSFS
73684 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73685 static int sysfs_slab_add(struct kmem_cache *);
73686 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73687 static void sysfs_slab_remove(struct kmem_cache *);
73688 @@ -532,7 +532,7 @@ static void print_track(const char *s, struct track *t)
73689 if (!t->addr)
73690 return;
73691
73692 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73693 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73694 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73695 #ifdef CONFIG_STACKTRACE
73696 {
73697 @@ -2571,6 +2571,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73698
73699 page = virt_to_head_page(x);
73700
73701 + BUG_ON(!PageSlab(page));
73702 +
73703 slab_free(s, page, x, _RET_IP_);
73704
73705 trace_kmem_cache_free(_RET_IP_, x);
73706 @@ -2604,7 +2606,7 @@ static int slub_min_objects;
73707 * Merge control. If this is set then no merging of slab caches will occur.
73708 * (Could be removed. This was introduced to pacify the merge skeptics.)
73709 */
73710 -static int slub_nomerge;
73711 +static int slub_nomerge = 1;
73712
73713 /*
73714 * Calculate the order of allocation given an slab object size.
73715 @@ -3057,7 +3059,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73716 else
73717 s->cpu_partial = 30;
73718
73719 - s->refcount = 1;
73720 + atomic_set(&s->refcount, 1);
73721 #ifdef CONFIG_NUMA
73722 s->remote_node_defrag_ratio = 1000;
73723 #endif
73724 @@ -3161,8 +3163,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73725 void kmem_cache_destroy(struct kmem_cache *s)
73726 {
73727 down_write(&slub_lock);
73728 - s->refcount--;
73729 - if (!s->refcount) {
73730 + if (atomic_dec_and_test(&s->refcount)) {
73731 list_del(&s->list);
73732 up_write(&slub_lock);
73733 if (kmem_cache_close(s)) {
73734 @@ -3373,6 +3374,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73735 EXPORT_SYMBOL(__kmalloc_node);
73736 #endif
73737
73738 +void check_object_size(const void *ptr, unsigned long n, bool to)
73739 +{
73740 +
73741 +#ifdef CONFIG_PAX_USERCOPY
73742 + struct page *page;
73743 + struct kmem_cache *s = NULL;
73744 + unsigned long offset;
73745 + const char *type;
73746 +
73747 + if (!n)
73748 + return;
73749 +
73750 + type = "<null>";
73751 + if (ZERO_OR_NULL_PTR(ptr))
73752 + goto report;
73753 +
73754 + if (!virt_addr_valid(ptr))
73755 + return;
73756 +
73757 + page = virt_to_head_page(ptr);
73758 +
73759 + type = "<process stack>";
73760 + if (!PageSlab(page)) {
73761 + if (object_is_on_stack(ptr, n) == -1)
73762 + goto report;
73763 + return;
73764 + }
73765 +
73766 + s = page->slab;
73767 + type = s->name;
73768 + if (!(s->flags & SLAB_USERCOPY))
73769 + goto report;
73770 +
73771 + offset = (ptr - page_address(page)) % s->size;
73772 + if (offset <= s->objsize && n <= s->objsize - offset)
73773 + return;
73774 +
73775 +report:
73776 + pax_report_usercopy(ptr, n, to, type);
73777 +#endif
73778 +
73779 +}
73780 +EXPORT_SYMBOL(check_object_size);
73781 +
73782 size_t ksize(const void *object)
73783 {
73784 struct page *page;
73785 @@ -3647,7 +3692,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73786 int node;
73787
73788 list_add(&s->list, &slab_caches);
73789 - s->refcount = -1;
73790 + atomic_set(&s->refcount, -1);
73791
73792 for_each_node_state(node, N_NORMAL_MEMORY) {
73793 struct kmem_cache_node *n = get_node(s, node);
73794 @@ -3767,17 +3812,17 @@ void __init kmem_cache_init(void)
73795
73796 /* Caches that are not of the two-to-the-power-of size */
73797 if (KMALLOC_MIN_SIZE <= 32) {
73798 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73799 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73800 caches++;
73801 }
73802
73803 if (KMALLOC_MIN_SIZE <= 64) {
73804 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73805 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73806 caches++;
73807 }
73808
73809 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73810 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73811 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73812 caches++;
73813 }
73814
73815 @@ -3845,7 +3890,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73816 /*
73817 * We may have set a slab to be unmergeable during bootstrap.
73818 */
73819 - if (s->refcount < 0)
73820 + if (atomic_read(&s->refcount) < 0)
73821 return 1;
73822
73823 return 0;
73824 @@ -3904,7 +3949,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73825 down_write(&slub_lock);
73826 s = find_mergeable(size, align, flags, name, ctor);
73827 if (s) {
73828 - s->refcount++;
73829 + atomic_inc(&s->refcount);
73830 /*
73831 * Adjust the object sizes so that we clear
73832 * the complete object on kzalloc.
73833 @@ -3913,7 +3958,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73834 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73835
73836 if (sysfs_slab_alias(s, name)) {
73837 - s->refcount--;
73838 + atomic_dec(&s->refcount);
73839 goto err;
73840 }
73841 up_write(&slub_lock);
73842 @@ -4042,7 +4087,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73843 }
73844 #endif
73845
73846 -#ifdef CONFIG_SYSFS
73847 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73848 static int count_inuse(struct page *page)
73849 {
73850 return page->inuse;
73851 @@ -4429,12 +4474,12 @@ static void resiliency_test(void)
73852 validate_slab_cache(kmalloc_caches[9]);
73853 }
73854 #else
73855 -#ifdef CONFIG_SYSFS
73856 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73857 static void resiliency_test(void) {};
73858 #endif
73859 #endif
73860
73861 -#ifdef CONFIG_SYSFS
73862 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73863 enum slab_stat_type {
73864 SL_ALL, /* All slabs */
73865 SL_PARTIAL, /* Only partially allocated slabs */
73866 @@ -4677,7 +4722,7 @@ SLAB_ATTR_RO(ctor);
73867
73868 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73869 {
73870 - return sprintf(buf, "%d\n", s->refcount - 1);
73871 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73872 }
73873 SLAB_ATTR_RO(aliases);
73874
73875 @@ -5244,6 +5289,7 @@ static char *create_unique_id(struct kmem_cache *s)
73876 return name;
73877 }
73878
73879 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73880 static int sysfs_slab_add(struct kmem_cache *s)
73881 {
73882 int err;
73883 @@ -5306,6 +5352,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73884 kobject_del(&s->kobj);
73885 kobject_put(&s->kobj);
73886 }
73887 +#endif
73888
73889 /*
73890 * Need to buffer aliases during bootup until sysfs becomes
73891 @@ -5319,6 +5366,7 @@ struct saved_alias {
73892
73893 static struct saved_alias *alias_list;
73894
73895 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73896 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73897 {
73898 struct saved_alias *al;
73899 @@ -5341,6 +5389,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73900 alias_list = al;
73901 return 0;
73902 }
73903 +#endif
73904
73905 static int __init slab_sysfs_init(void)
73906 {
73907 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
73908 index 1b7e22a..3fcd4f3 100644
73909 --- a/mm/sparse-vmemmap.c
73910 +++ b/mm/sparse-vmemmap.c
73911 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
73912 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73913 if (!p)
73914 return NULL;
73915 - pud_populate(&init_mm, pud, p);
73916 + pud_populate_kernel(&init_mm, pud, p);
73917 }
73918 return pud;
73919 }
73920 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
73921 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73922 if (!p)
73923 return NULL;
73924 - pgd_populate(&init_mm, pgd, p);
73925 + pgd_populate_kernel(&init_mm, pgd, p);
73926 }
73927 return pgd;
73928 }
73929 diff --git a/mm/swap.c b/mm/swap.c
73930 index 14380e9..e244704 100644
73931 --- a/mm/swap.c
73932 +++ b/mm/swap.c
73933 @@ -30,6 +30,7 @@
73934 #include <linux/backing-dev.h>
73935 #include <linux/memcontrol.h>
73936 #include <linux/gfp.h>
73937 +#include <linux/hugetlb.h>
73938
73939 #include "internal.h"
73940
73941 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
73942
73943 __page_cache_release(page);
73944 dtor = get_compound_page_dtor(page);
73945 + if (!PageHuge(page))
73946 + BUG_ON(dtor != free_compound_page);
73947 (*dtor)(page);
73948 }
73949
73950 diff --git a/mm/swapfile.c b/mm/swapfile.c
73951 index f31b29d..8bdcae2 100644
73952 --- a/mm/swapfile.c
73953 +++ b/mm/swapfile.c
73954 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73955
73956 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73957 /* Activity counter to indicate that a swapon or swapoff has occurred */
73958 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
73959 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73960
73961 static inline unsigned char swap_count(unsigned char ent)
73962 {
73963 @@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73964 }
73965 filp_close(swap_file, NULL);
73966 err = 0;
73967 - atomic_inc(&proc_poll_event);
73968 + atomic_inc_unchecked(&proc_poll_event);
73969 wake_up_interruptible(&proc_poll_wait);
73970
73971 out_dput:
73972 @@ -1685,8 +1685,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73973
73974 poll_wait(file, &proc_poll_wait, wait);
73975
73976 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
73977 - seq->poll_event = atomic_read(&proc_poll_event);
73978 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73979 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73980 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73981 }
73982
73983 @@ -1784,7 +1784,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73984 return ret;
73985
73986 seq = file->private_data;
73987 - seq->poll_event = atomic_read(&proc_poll_event);
73988 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73989 return 0;
73990 }
73991
73992 @@ -2122,7 +2122,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73993 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73994
73995 mutex_unlock(&swapon_mutex);
73996 - atomic_inc(&proc_poll_event);
73997 + atomic_inc_unchecked(&proc_poll_event);
73998 wake_up_interruptible(&proc_poll_wait);
73999
74000 if (S_ISREG(inode->i_mode))
74001 diff --git a/mm/util.c b/mm/util.c
74002 index 136ac4f..f917fa9 100644
74003 --- a/mm/util.c
74004 +++ b/mm/util.c
74005 @@ -243,6 +243,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
74006 void arch_pick_mmap_layout(struct mm_struct *mm)
74007 {
74008 mm->mmap_base = TASK_UNMAPPED_BASE;
74009 +
74010 +#ifdef CONFIG_PAX_RANDMMAP
74011 + if (mm->pax_flags & MF_PAX_RANDMMAP)
74012 + mm->mmap_base += mm->delta_mmap;
74013 +#endif
74014 +
74015 mm->get_unmapped_area = arch_get_unmapped_area;
74016 mm->unmap_area = arch_unmap_area;
74017 }
74018 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
74019 index 86ce9a5..fc9fb61 100644
74020 --- a/mm/vmalloc.c
74021 +++ b/mm/vmalloc.c
74022 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
74023
74024 pte = pte_offset_kernel(pmd, addr);
74025 do {
74026 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74027 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74028 +
74029 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74030 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74031 + BUG_ON(!pte_exec(*pte));
74032 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74033 + continue;
74034 + }
74035 +#endif
74036 +
74037 + {
74038 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74039 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74040 + }
74041 } while (pte++, addr += PAGE_SIZE, addr != end);
74042 }
74043
74044 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74045 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74046 {
74047 pte_t *pte;
74048 + int ret = -ENOMEM;
74049
74050 /*
74051 * nr is a running index into the array which helps higher level
74052 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74053 pte = pte_alloc_kernel(pmd, addr);
74054 if (!pte)
74055 return -ENOMEM;
74056 +
74057 + pax_open_kernel();
74058 do {
74059 struct page *page = pages[*nr];
74060
74061 - if (WARN_ON(!pte_none(*pte)))
74062 - return -EBUSY;
74063 - if (WARN_ON(!page))
74064 - return -ENOMEM;
74065 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74066 + if (pgprot_val(prot) & _PAGE_NX)
74067 +#endif
74068 +
74069 + if (WARN_ON(!pte_none(*pte))) {
74070 + ret = -EBUSY;
74071 + goto out;
74072 + }
74073 + if (WARN_ON(!page)) {
74074 + ret = -ENOMEM;
74075 + goto out;
74076 + }
74077 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74078 (*nr)++;
74079 } while (pte++, addr += PAGE_SIZE, addr != end);
74080 - return 0;
74081 + ret = 0;
74082 +out:
74083 + pax_close_kernel();
74084 + return ret;
74085 }
74086
74087 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74088 @@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74089 pmd_t *pmd;
74090 unsigned long next;
74091
74092 - pmd = pmd_alloc(&init_mm, pud, addr);
74093 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
74094 if (!pmd)
74095 return -ENOMEM;
74096 do {
74097 @@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
74098 pud_t *pud;
74099 unsigned long next;
74100
74101 - pud = pud_alloc(&init_mm, pgd, addr);
74102 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
74103 if (!pud)
74104 return -ENOMEM;
74105 do {
74106 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
74107 * and fall back on vmalloc() if that fails. Others
74108 * just put it in the vmalloc space.
74109 */
74110 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74111 +#ifdef CONFIG_MODULES
74112 +#ifdef MODULES_VADDR
74113 unsigned long addr = (unsigned long)x;
74114 if (addr >= MODULES_VADDR && addr < MODULES_END)
74115 return 1;
74116 #endif
74117 +
74118 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74119 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74120 + return 1;
74121 +#endif
74122 +
74123 +#endif
74124 +
74125 return is_vmalloc_addr(x);
74126 }
74127
74128 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
74129
74130 if (!pgd_none(*pgd)) {
74131 pud_t *pud = pud_offset(pgd, addr);
74132 +#ifdef CONFIG_X86
74133 + if (!pud_large(*pud))
74134 +#endif
74135 if (!pud_none(*pud)) {
74136 pmd_t *pmd = pmd_offset(pud, addr);
74137 +#ifdef CONFIG_X86
74138 + if (!pmd_large(*pmd))
74139 +#endif
74140 if (!pmd_none(*pmd)) {
74141 pte_t *ptep, pte;
74142
74143 @@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
74144 static struct vmap_area *alloc_vmap_area(unsigned long size,
74145 unsigned long align,
74146 unsigned long vstart, unsigned long vend,
74147 + int node, gfp_t gfp_mask) __size_overflow(1);
74148 +static struct vmap_area *alloc_vmap_area(unsigned long size,
74149 + unsigned long align,
74150 + unsigned long vstart, unsigned long vend,
74151 int node, gfp_t gfp_mask)
74152 {
74153 struct vmap_area *va;
74154 @@ -1319,6 +1363,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
74155 struct vm_struct *area;
74156
74157 BUG_ON(in_interrupt());
74158 +
74159 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74160 + if (flags & VM_KERNEXEC) {
74161 + if (start != VMALLOC_START || end != VMALLOC_END)
74162 + return NULL;
74163 + start = (unsigned long)MODULES_EXEC_VADDR;
74164 + end = (unsigned long)MODULES_EXEC_END;
74165 + }
74166 +#endif
74167 +
74168 if (flags & VM_IOREMAP) {
74169 int bit = fls(size);
74170
74171 @@ -1551,6 +1605,11 @@ void *vmap(struct page **pages, unsigned int count,
74172 if (count > totalram_pages)
74173 return NULL;
74174
74175 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74176 + if (!(pgprot_val(prot) & _PAGE_NX))
74177 + flags |= VM_KERNEXEC;
74178 +#endif
74179 +
74180 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74181 __builtin_return_address(0));
74182 if (!area)
74183 @@ -1652,6 +1711,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
74184 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
74185 goto fail;
74186
74187 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74188 + if (!(pgprot_val(prot) & _PAGE_NX))
74189 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74190 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
74191 + else
74192 +#endif
74193 +
74194 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74195 start, end, node, gfp_mask, caller);
74196 if (!area)
74197 @@ -1825,10 +1891,9 @@ EXPORT_SYMBOL(vzalloc_node);
74198 * For tight control over page level allocator and protection flags
74199 * use __vmalloc() instead.
74200 */
74201 -
74202 void *vmalloc_exec(unsigned long size)
74203 {
74204 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74205 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74206 -1, __builtin_return_address(0));
74207 }
74208
74209 @@ -2123,6 +2188,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
74210 unsigned long uaddr = vma->vm_start;
74211 unsigned long usize = vma->vm_end - vma->vm_start;
74212
74213 + BUG_ON(vma->vm_mirror);
74214 +
74215 if ((PAGE_SIZE-1) & (unsigned long)addr)
74216 return -EINVAL;
74217
74218 diff --git a/mm/vmstat.c b/mm/vmstat.c
74219 index f600557..1459fc8 100644
74220 --- a/mm/vmstat.c
74221 +++ b/mm/vmstat.c
74222 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
74223 *
74224 * vm_stat contains the global counters
74225 */
74226 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74227 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74228 EXPORT_SYMBOL(vm_stat);
74229
74230 #ifdef CONFIG_SMP
74231 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
74232 v = p->vm_stat_diff[i];
74233 p->vm_stat_diff[i] = 0;
74234 local_irq_restore(flags);
74235 - atomic_long_add(v, &zone->vm_stat[i]);
74236 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74237 global_diff[i] += v;
74238 #ifdef CONFIG_NUMA
74239 /* 3 seconds idle till flush */
74240 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
74241
74242 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74243 if (global_diff[i])
74244 - atomic_long_add(global_diff[i], &vm_stat[i]);
74245 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74246 }
74247
74248 #endif
74249 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
74250 start_cpu_timer(cpu);
74251 #endif
74252 #ifdef CONFIG_PROC_FS
74253 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74254 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74255 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74256 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74257 + {
74258 + mode_t gr_mode = S_IRUGO;
74259 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74260 + gr_mode = S_IRUSR;
74261 +#endif
74262 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74263 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
74264 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74265 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74266 +#else
74267 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
74268 +#endif
74269 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74270 + }
74271 #endif
74272 return 0;
74273 }
74274 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74275 index efea35b..9c8dd0b 100644
74276 --- a/net/8021q/vlan.c
74277 +++ b/net/8021q/vlan.c
74278 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
74279 err = -EPERM;
74280 if (!capable(CAP_NET_ADMIN))
74281 break;
74282 - if ((args.u.name_type >= 0) &&
74283 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74284 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74285 struct vlan_net *vn;
74286
74287 vn = net_generic(net, vlan_net_id);
74288 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74289 index fccae26..e7ece2f 100644
74290 --- a/net/9p/trans_fd.c
74291 +++ b/net/9p/trans_fd.c
74292 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
74293 oldfs = get_fs();
74294 set_fs(get_ds());
74295 /* The cast to a user pointer is valid due to the set_fs() */
74296 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74297 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74298 set_fs(oldfs);
74299
74300 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
74301 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74302 index 876fbe8..8bbea9f 100644
74303 --- a/net/atm/atm_misc.c
74304 +++ b/net/atm/atm_misc.c
74305 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74306 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74307 return 1;
74308 atm_return(vcc, truesize);
74309 - atomic_inc(&vcc->stats->rx_drop);
74310 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74311 return 0;
74312 }
74313 EXPORT_SYMBOL(atm_charge);
74314 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74315 }
74316 }
74317 atm_return(vcc, guess);
74318 - atomic_inc(&vcc->stats->rx_drop);
74319 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74320 return NULL;
74321 }
74322 EXPORT_SYMBOL(atm_alloc_charge);
74323 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74324
74325 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74326 {
74327 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74328 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74329 __SONET_ITEMS
74330 #undef __HANDLE_ITEM
74331 }
74332 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74333
74334 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74335 {
74336 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74337 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74338 __SONET_ITEMS
74339 #undef __HANDLE_ITEM
74340 }
74341 diff --git a/net/atm/lec.h b/net/atm/lec.h
74342 index dfc0719..47c5322 100644
74343 --- a/net/atm/lec.h
74344 +++ b/net/atm/lec.h
74345 @@ -48,7 +48,7 @@ struct lane2_ops {
74346 const u8 *tlvs, u32 sizeoftlvs);
74347 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74348 const u8 *tlvs, u32 sizeoftlvs);
74349 -};
74350 +} __no_const;
74351
74352 /*
74353 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74354 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74355 index 0919a88..a23d54e 100644
74356 --- a/net/atm/mpc.h
74357 +++ b/net/atm/mpc.h
74358 @@ -33,7 +33,7 @@ struct mpoa_client {
74359 struct mpc_parameters parameters; /* parameters for this client */
74360
74361 const struct net_device_ops *old_ops;
74362 - struct net_device_ops new_ops;
74363 + net_device_ops_no_const new_ops;
74364 };
74365
74366
74367 diff --git a/net/atm/proc.c b/net/atm/proc.c
74368 index 0d020de..011c7bb 100644
74369 --- a/net/atm/proc.c
74370 +++ b/net/atm/proc.c
74371 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74372 const struct k_atm_aal_stats *stats)
74373 {
74374 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74375 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74376 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74377 - atomic_read(&stats->rx_drop));
74378 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74379 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74380 + atomic_read_unchecked(&stats->rx_drop));
74381 }
74382
74383 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
74384 diff --git a/net/atm/resources.c b/net/atm/resources.c
74385 index 23f45ce..c748f1a 100644
74386 --- a/net/atm/resources.c
74387 +++ b/net/atm/resources.c
74388 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
74389 static void copy_aal_stats(struct k_atm_aal_stats *from,
74390 struct atm_aal_stats *to)
74391 {
74392 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74393 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74394 __AAL_STAT_ITEMS
74395 #undef __HANDLE_ITEM
74396 }
74397 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
74398 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74399 struct atm_aal_stats *to)
74400 {
74401 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74402 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74403 __AAL_STAT_ITEMS
74404 #undef __HANDLE_ITEM
74405 }
74406 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
74407 index 3512e25..2b33401 100644
74408 --- a/net/batman-adv/bat_iv_ogm.c
74409 +++ b/net/batman-adv/bat_iv_ogm.c
74410 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
74411
74412 /* change sequence number to network order */
74413 batman_ogm_packet->seqno =
74414 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
74415 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74416
74417 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
74418 batman_ogm_packet->tt_crc = htons((uint16_t)
74419 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
74420 else
74421 batman_ogm_packet->gw_flags = NO_FLAGS;
74422
74423 - atomic_inc(&hard_iface->seqno);
74424 + atomic_inc_unchecked(&hard_iface->seqno);
74425
74426 slide_own_bcast_window(hard_iface);
74427 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
74428 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
74429 return;
74430
74431 /* could be changed by schedule_own_packet() */
74432 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
74433 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74434
74435 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
74436
74437 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74438 index 7704df4..beb4e16 100644
74439 --- a/net/batman-adv/hard-interface.c
74440 +++ b/net/batman-adv/hard-interface.c
74441 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
74442 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74443 dev_add_pack(&hard_iface->batman_adv_ptype);
74444
74445 - atomic_set(&hard_iface->seqno, 1);
74446 - atomic_set(&hard_iface->frag_seqno, 1);
74447 + atomic_set_unchecked(&hard_iface->seqno, 1);
74448 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74449 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74450 hard_iface->net_dev->name);
74451
74452 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74453 index 987c75a..20d6f36 100644
74454 --- a/net/batman-adv/soft-interface.c
74455 +++ b/net/batman-adv/soft-interface.c
74456 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
74457
74458 /* set broadcast sequence number */
74459 bcast_packet->seqno =
74460 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74461 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74462
74463 add_bcast_packet_to_list(bat_priv, skb, 1);
74464
74465 @@ -843,7 +843,7 @@ struct net_device *softif_create(const char *name)
74466 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74467
74468 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74469 - atomic_set(&bat_priv->bcast_seqno, 1);
74470 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
74471 atomic_set(&bat_priv->ttvn, 0);
74472 atomic_set(&bat_priv->tt_local_changes, 0);
74473 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
74474 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74475 index e9eb043..d174eeb 100644
74476 --- a/net/batman-adv/types.h
74477 +++ b/net/batman-adv/types.h
74478 @@ -38,8 +38,8 @@ struct hard_iface {
74479 int16_t if_num;
74480 char if_status;
74481 struct net_device *net_dev;
74482 - atomic_t seqno;
74483 - atomic_t frag_seqno;
74484 + atomic_unchecked_t seqno;
74485 + atomic_unchecked_t frag_seqno;
74486 unsigned char *packet_buff;
74487 int packet_len;
74488 struct kobject *hardif_obj;
74489 @@ -154,7 +154,7 @@ struct bat_priv {
74490 atomic_t orig_interval; /* uint */
74491 atomic_t hop_penalty; /* uint */
74492 atomic_t log_level; /* uint */
74493 - atomic_t bcast_seqno;
74494 + atomic_unchecked_t bcast_seqno;
74495 atomic_t bcast_queue_left;
74496 atomic_t batman_queue_left;
74497 atomic_t ttvn; /* translation table version number */
74498 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74499 index 07d1c1d..7e9bea9 100644
74500 --- a/net/batman-adv/unicast.c
74501 +++ b/net/batman-adv/unicast.c
74502 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
74503 frag1->flags = UNI_FRAG_HEAD | large_tail;
74504 frag2->flags = large_tail;
74505
74506 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74507 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74508 frag1->seqno = htons(seqno - 1);
74509 frag2->seqno = htons(seqno);
74510
74511 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74512 index 280953b..cd219bb 100644
74513 --- a/net/bluetooth/hci_conn.c
74514 +++ b/net/bluetooth/hci_conn.c
74515 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
74516 memset(&cp, 0, sizeof(cp));
74517
74518 cp.handle = cpu_to_le16(conn->handle);
74519 - memcpy(cp.ltk, ltk, sizeof(ltk));
74520 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74521
74522 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74523 }
74524 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
74525 index 32d338c..d24bcdb 100644
74526 --- a/net/bluetooth/l2cap_core.c
74527 +++ b/net/bluetooth/l2cap_core.c
74528 @@ -2418,8 +2418,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
74529 break;
74530
74531 case L2CAP_CONF_RFC:
74532 - if (olen == sizeof(rfc))
74533 - memcpy(&rfc, (void *)val, olen);
74534 + if (olen != sizeof(rfc))
74535 + break;
74536 +
74537 + memcpy(&rfc, (void *)val, olen);
74538
74539 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
74540 rfc.mode != chan->mode)
74541 @@ -2537,8 +2539,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
74542
74543 switch (type) {
74544 case L2CAP_CONF_RFC:
74545 - if (olen == sizeof(rfc))
74546 - memcpy(&rfc, (void *)val, olen);
74547 + if (olen != sizeof(rfc))
74548 + break;
74549 +
74550 + memcpy(&rfc, (void *)val, olen);
74551 goto done;
74552 }
74553 }
74554 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74555 index 5fe2ff3..10968b5 100644
74556 --- a/net/bridge/netfilter/ebtables.c
74557 +++ b/net/bridge/netfilter/ebtables.c
74558 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
74559 tmp.valid_hooks = t->table->valid_hooks;
74560 }
74561 mutex_unlock(&ebt_mutex);
74562 - if (copy_to_user(user, &tmp, *len) != 0){
74563 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74564 BUGPRINT("c2u Didn't work\n");
74565 ret = -EFAULT;
74566 break;
74567 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
74568 index a97d97a..6f679ed 100644
74569 --- a/net/caif/caif_socket.c
74570 +++ b/net/caif/caif_socket.c
74571 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
74572 #ifdef CONFIG_DEBUG_FS
74573 struct debug_fs_counter {
74574 atomic_t caif_nr_socks;
74575 - atomic_t caif_sock_create;
74576 - atomic_t num_connect_req;
74577 - atomic_t num_connect_resp;
74578 - atomic_t num_connect_fail_resp;
74579 - atomic_t num_disconnect;
74580 - atomic_t num_remote_shutdown_ind;
74581 - atomic_t num_tx_flow_off_ind;
74582 - atomic_t num_tx_flow_on_ind;
74583 - atomic_t num_rx_flow_off;
74584 - atomic_t num_rx_flow_on;
74585 + atomic_unchecked_t caif_sock_create;
74586 + atomic_unchecked_t num_connect_req;
74587 + atomic_unchecked_t num_connect_resp;
74588 + atomic_unchecked_t num_connect_fail_resp;
74589 + atomic_unchecked_t num_disconnect;
74590 + atomic_unchecked_t num_remote_shutdown_ind;
74591 + atomic_unchecked_t num_tx_flow_off_ind;
74592 + atomic_unchecked_t num_tx_flow_on_ind;
74593 + atomic_unchecked_t num_rx_flow_off;
74594 + atomic_unchecked_t num_rx_flow_on;
74595 };
74596 static struct debug_fs_counter cnt;
74597 #define dbfs_atomic_inc(v) atomic_inc_return(v)
74598 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
74599 #define dbfs_atomic_dec(v) atomic_dec_return(v)
74600 #else
74601 #define dbfs_atomic_inc(v) 0
74602 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74603 atomic_read(&cf_sk->sk.sk_rmem_alloc),
74604 sk_rcvbuf_lowwater(cf_sk));
74605 set_rx_flow_off(cf_sk);
74606 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74607 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74608 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74609 }
74610
74611 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74612 set_rx_flow_off(cf_sk);
74613 if (net_ratelimit())
74614 pr_debug("sending flow OFF due to rmem_schedule\n");
74615 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74616 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74617 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74618 }
74619 skb->dev = NULL;
74620 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
74621 switch (flow) {
74622 case CAIF_CTRLCMD_FLOW_ON_IND:
74623 /* OK from modem to start sending again */
74624 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
74625 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
74626 set_tx_flow_on(cf_sk);
74627 cf_sk->sk.sk_state_change(&cf_sk->sk);
74628 break;
74629
74630 case CAIF_CTRLCMD_FLOW_OFF_IND:
74631 /* Modem asks us to shut up */
74632 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
74633 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
74634 set_tx_flow_off(cf_sk);
74635 cf_sk->sk.sk_state_change(&cf_sk->sk);
74636 break;
74637 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74638 /* We're now connected */
74639 caif_client_register_refcnt(&cf_sk->layer,
74640 cfsk_hold, cfsk_put);
74641 - dbfs_atomic_inc(&cnt.num_connect_resp);
74642 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
74643 cf_sk->sk.sk_state = CAIF_CONNECTED;
74644 set_tx_flow_on(cf_sk);
74645 cf_sk->sk.sk_state_change(&cf_sk->sk);
74646 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74647
74648 case CAIF_CTRLCMD_INIT_FAIL_RSP:
74649 /* Connect request failed */
74650 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
74651 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
74652 cf_sk->sk.sk_err = ECONNREFUSED;
74653 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
74654 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74655 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74656
74657 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
74658 /* Modem has closed this connection, or device is down. */
74659 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
74660 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
74661 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74662 cf_sk->sk.sk_err = ECONNRESET;
74663 set_rx_flow_on(cf_sk);
74664 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
74665 return;
74666
74667 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
74668 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
74669 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
74670 set_rx_flow_on(cf_sk);
74671 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
74672 }
74673 @@ -856,7 +857,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
74674 /*ifindex = id of the interface.*/
74675 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
74676
74677 - dbfs_atomic_inc(&cnt.num_connect_req);
74678 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
74679 cf_sk->layer.receive = caif_sktrecv_cb;
74680
74681 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
74682 @@ -945,7 +946,7 @@ static int caif_release(struct socket *sock)
74683 spin_unlock_bh(&sk->sk_receive_queue.lock);
74684 sock->sk = NULL;
74685
74686 - dbfs_atomic_inc(&cnt.num_disconnect);
74687 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
74688
74689 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
74690 if (cf_sk->debugfs_socket_dir != NULL)
74691 @@ -1124,7 +1125,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
74692 cf_sk->conn_req.protocol = protocol;
74693 /* Increase the number of sockets created. */
74694 dbfs_atomic_inc(&cnt.caif_nr_socks);
74695 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
74696 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
74697 #ifdef CONFIG_DEBUG_FS
74698 if (!IS_ERR(debugfsdir)) {
74699
74700 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74701 index 5cf5222..6f704ad 100644
74702 --- a/net/caif/cfctrl.c
74703 +++ b/net/caif/cfctrl.c
74704 @@ -9,6 +9,7 @@
74705 #include <linux/stddef.h>
74706 #include <linux/spinlock.h>
74707 #include <linux/slab.h>
74708 +#include <linux/sched.h>
74709 #include <net/caif/caif_layer.h>
74710 #include <net/caif/cfpkt.h>
74711 #include <net/caif/cfctrl.h>
74712 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
74713 memset(&dev_info, 0, sizeof(dev_info));
74714 dev_info.id = 0xff;
74715 cfsrvl_init(&this->serv, 0, &dev_info, false);
74716 - atomic_set(&this->req_seq_no, 1);
74717 - atomic_set(&this->rsp_seq_no, 1);
74718 + atomic_set_unchecked(&this->req_seq_no, 1);
74719 + atomic_set_unchecked(&this->rsp_seq_no, 1);
74720 this->serv.layer.receive = cfctrl_recv;
74721 sprintf(this->serv.layer.name, "ctrl");
74722 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74723 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74724 struct cfctrl_request_info *req)
74725 {
74726 spin_lock_bh(&ctrl->info_list_lock);
74727 - atomic_inc(&ctrl->req_seq_no);
74728 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
74729 + atomic_inc_unchecked(&ctrl->req_seq_no);
74730 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74731 list_add_tail(&req->list, &ctrl->list);
74732 spin_unlock_bh(&ctrl->info_list_lock);
74733 }
74734 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74735 if (p != first)
74736 pr_warn("Requests are not received in order\n");
74737
74738 - atomic_set(&ctrl->rsp_seq_no,
74739 + atomic_set_unchecked(&ctrl->rsp_seq_no,
74740 p->sequence_no);
74741 list_del(&p->list);
74742 goto out;
74743 diff --git a/net/can/gw.c b/net/can/gw.c
74744 index 3d79b12..8de85fa 100644
74745 --- a/net/can/gw.c
74746 +++ b/net/can/gw.c
74747 @@ -96,7 +96,7 @@ struct cf_mod {
74748 struct {
74749 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74750 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74751 - } csumfunc;
74752 + } __no_const csumfunc;
74753 };
74754
74755
74756 diff --git a/net/compat.c b/net/compat.c
74757 index 6def90e..c6992fa 100644
74758 --- a/net/compat.c
74759 +++ b/net/compat.c
74760 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74761 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74762 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74763 return -EFAULT;
74764 - kmsg->msg_name = compat_ptr(tmp1);
74765 - kmsg->msg_iov = compat_ptr(tmp2);
74766 - kmsg->msg_control = compat_ptr(tmp3);
74767 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74768 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74769 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74770 return 0;
74771 }
74772
74773 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74774
74775 if (kern_msg->msg_namelen) {
74776 if (mode == VERIFY_READ) {
74777 - int err = move_addr_to_kernel(kern_msg->msg_name,
74778 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74779 kern_msg->msg_namelen,
74780 kern_address);
74781 if (err < 0)
74782 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74783 kern_msg->msg_name = NULL;
74784
74785 tot_len = iov_from_user_compat_to_kern(kern_iov,
74786 - (struct compat_iovec __user *)kern_msg->msg_iov,
74787 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
74788 kern_msg->msg_iovlen);
74789 if (tot_len >= 0)
74790 kern_msg->msg_iov = kern_iov;
74791 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74792
74793 #define CMSG_COMPAT_FIRSTHDR(msg) \
74794 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74795 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74796 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74797 (struct compat_cmsghdr __user *)NULL)
74798
74799 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74800 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74801 (ucmlen) <= (unsigned long) \
74802 ((mhdr)->msg_controllen - \
74803 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74804 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74805
74806 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74807 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74808 {
74809 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74810 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74811 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74812 msg->msg_controllen)
74813 return NULL;
74814 return (struct compat_cmsghdr __user *)ptr;
74815 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74816 {
74817 struct compat_timeval ctv;
74818 struct compat_timespec cts[3];
74819 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74820 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74821 struct compat_cmsghdr cmhdr;
74822 int cmlen;
74823
74824 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74825
74826 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74827 {
74828 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74829 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74830 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74831 int fdnum = scm->fp->count;
74832 struct file **fp = scm->fp->fp;
74833 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74834 return -EFAULT;
74835 old_fs = get_fs();
74836 set_fs(KERNEL_DS);
74837 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74838 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74839 set_fs(old_fs);
74840
74841 return err;
74842 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74843 len = sizeof(ktime);
74844 old_fs = get_fs();
74845 set_fs(KERNEL_DS);
74846 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74847 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74848 set_fs(old_fs);
74849
74850 if (!err) {
74851 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74852 case MCAST_JOIN_GROUP:
74853 case MCAST_LEAVE_GROUP:
74854 {
74855 - struct compat_group_req __user *gr32 = (void *)optval;
74856 + struct compat_group_req __user *gr32 = (void __user *)optval;
74857 struct group_req __user *kgr =
74858 compat_alloc_user_space(sizeof(struct group_req));
74859 u32 interface;
74860 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74861 case MCAST_BLOCK_SOURCE:
74862 case MCAST_UNBLOCK_SOURCE:
74863 {
74864 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74865 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74866 struct group_source_req __user *kgsr = compat_alloc_user_space(
74867 sizeof(struct group_source_req));
74868 u32 interface;
74869 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74870 }
74871 case MCAST_MSFILTER:
74872 {
74873 - struct compat_group_filter __user *gf32 = (void *)optval;
74874 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74875 struct group_filter __user *kgf;
74876 u32 interface, fmode, numsrc;
74877
74878 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74879 char __user *optval, int __user *optlen,
74880 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74881 {
74882 - struct compat_group_filter __user *gf32 = (void *)optval;
74883 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74884 struct group_filter __user *kgf;
74885 int __user *koptlen;
74886 u32 interface, fmode, numsrc;
74887 diff --git a/net/core/datagram.c b/net/core/datagram.c
74888 index 68bbf9f..5ef0d12 100644
74889 --- a/net/core/datagram.c
74890 +++ b/net/core/datagram.c
74891 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74892 }
74893
74894 kfree_skb(skb);
74895 - atomic_inc(&sk->sk_drops);
74896 + atomic_inc_unchecked(&sk->sk_drops);
74897 sk_mem_reclaim_partial(sk);
74898
74899 return err;
74900 diff --git a/net/core/dev.c b/net/core/dev.c
74901 index 0336374..659088a 100644
74902 --- a/net/core/dev.c
74903 +++ b/net/core/dev.c
74904 @@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
74905 if (no_module && capable(CAP_NET_ADMIN))
74906 no_module = request_module("netdev-%s", name);
74907 if (no_module && capable(CAP_SYS_MODULE)) {
74908 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74909 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74910 +#else
74911 if (!request_module("%s", name))
74912 pr_err("Loading kernel module for a network device "
74913 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
74914 "instead\n", name);
74915 +#endif
74916 }
74917 }
74918 EXPORT_SYMBOL(dev_load);
74919 @@ -1605,7 +1609,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74920 {
74921 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74922 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74923 - atomic_long_inc(&dev->rx_dropped);
74924 + atomic_long_inc_unchecked(&dev->rx_dropped);
74925 kfree_skb(skb);
74926 return NET_RX_DROP;
74927 }
74928 @@ -1615,7 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74929 nf_reset(skb);
74930
74931 if (unlikely(!is_skb_forwardable(dev, skb))) {
74932 - atomic_long_inc(&dev->rx_dropped);
74933 + atomic_long_inc_unchecked(&dev->rx_dropped);
74934 kfree_skb(skb);
74935 return NET_RX_DROP;
74936 }
74937 @@ -2077,7 +2081,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74938
74939 struct dev_gso_cb {
74940 void (*destructor)(struct sk_buff *skb);
74941 -};
74942 +} __no_const;
74943
74944 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74945
74946 @@ -2933,7 +2937,7 @@ enqueue:
74947
74948 local_irq_restore(flags);
74949
74950 - atomic_long_inc(&skb->dev->rx_dropped);
74951 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74952 kfree_skb(skb);
74953 return NET_RX_DROP;
74954 }
74955 @@ -3005,7 +3009,7 @@ int netif_rx_ni(struct sk_buff *skb)
74956 }
74957 EXPORT_SYMBOL(netif_rx_ni);
74958
74959 -static void net_tx_action(struct softirq_action *h)
74960 +static void net_tx_action(void)
74961 {
74962 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74963
74964 @@ -3293,7 +3297,7 @@ ncls:
74965 if (pt_prev) {
74966 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74967 } else {
74968 - atomic_long_inc(&skb->dev->rx_dropped);
74969 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74970 kfree_skb(skb);
74971 /* Jamal, now you will not able to escape explaining
74972 * me how you were going to use this. :-)
74973 @@ -3853,7 +3857,7 @@ void netif_napi_del(struct napi_struct *napi)
74974 }
74975 EXPORT_SYMBOL(netif_napi_del);
74976
74977 -static void net_rx_action(struct softirq_action *h)
74978 +static void net_rx_action(void)
74979 {
74980 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74981 unsigned long time_limit = jiffies + 2;
74982 @@ -5878,7 +5882,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74983 } else {
74984 netdev_stats_to_stats64(storage, &dev->stats);
74985 }
74986 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74987 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74988 return storage;
74989 }
74990 EXPORT_SYMBOL(dev_get_stats);
74991 diff --git a/net/core/flow.c b/net/core/flow.c
74992 index e318c7e..168b1d0 100644
74993 --- a/net/core/flow.c
74994 +++ b/net/core/flow.c
74995 @@ -61,7 +61,7 @@ struct flow_cache {
74996 struct timer_list rnd_timer;
74997 };
74998
74999 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
75000 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75001 EXPORT_SYMBOL(flow_cache_genid);
75002 static struct flow_cache flow_cache_global;
75003 static struct kmem_cache *flow_cachep __read_mostly;
75004 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
75005
75006 static int flow_entry_valid(struct flow_cache_entry *fle)
75007 {
75008 - if (atomic_read(&flow_cache_genid) != fle->genid)
75009 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
75010 return 0;
75011 if (fle->object && !fle->object->ops->check(fle->object))
75012 return 0;
75013 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
75014 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
75015 fcp->hash_count++;
75016 }
75017 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
75018 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
75019 flo = fle->object;
75020 if (!flo)
75021 goto ret_object;
75022 @@ -280,7 +280,7 @@ nocache:
75023 }
75024 flo = resolver(net, key, family, dir, flo, ctx);
75025 if (fle) {
75026 - fle->genid = atomic_read(&flow_cache_genid);
75027 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
75028 if (!IS_ERR(flo))
75029 fle->object = flo;
75030 else
75031 diff --git a/net/core/iovec.c b/net/core/iovec.c
75032 index c40f27e..7f49254 100644
75033 --- a/net/core/iovec.c
75034 +++ b/net/core/iovec.c
75035 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75036 if (m->msg_namelen) {
75037 if (mode == VERIFY_READ) {
75038 void __user *namep;
75039 - namep = (void __user __force *) m->msg_name;
75040 + namep = (void __force_user *) m->msg_name;
75041 err = move_addr_to_kernel(namep, m->msg_namelen,
75042 address);
75043 if (err < 0)
75044 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75045 }
75046
75047 size = m->msg_iovlen * sizeof(struct iovec);
75048 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
75049 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
75050 return -EFAULT;
75051
75052 m->msg_iov = iov;
75053 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
75054 index 5c30296..ebe7b61 100644
75055 --- a/net/core/rtnetlink.c
75056 +++ b/net/core/rtnetlink.c
75057 @@ -57,7 +57,7 @@ struct rtnl_link {
75058 rtnl_doit_func doit;
75059 rtnl_dumpit_func dumpit;
75060 rtnl_calcit_func calcit;
75061 -};
75062 +} __no_const;
75063
75064 static DEFINE_MUTEX(rtnl_mutex);
75065
75066 diff --git a/net/core/scm.c b/net/core/scm.c
75067 index ff52ad0..aff1c0f 100644
75068 --- a/net/core/scm.c
75069 +++ b/net/core/scm.c
75070 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
75071 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75072 {
75073 struct cmsghdr __user *cm
75074 - = (__force struct cmsghdr __user *)msg->msg_control;
75075 + = (struct cmsghdr __force_user *)msg->msg_control;
75076 struct cmsghdr cmhdr;
75077 int cmlen = CMSG_LEN(len);
75078 int err;
75079 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75080 err = -EFAULT;
75081 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75082 goto out;
75083 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75084 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75085 goto out;
75086 cmlen = CMSG_SPACE(len);
75087 if (msg->msg_controllen < cmlen)
75088 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
75089 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75090 {
75091 struct cmsghdr __user *cm
75092 - = (__force struct cmsghdr __user*)msg->msg_control;
75093 + = (struct cmsghdr __force_user *)msg->msg_control;
75094
75095 int fdmax = 0;
75096 int fdnum = scm->fp->count;
75097 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75098 if (fdnum < fdmax)
75099 fdmax = fdnum;
75100
75101 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75102 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75103 i++, cmfptr++)
75104 {
75105 int new_fd;
75106 diff --git a/net/core/sock.c b/net/core/sock.c
75107 index 02f8dfe..86dfd4a 100644
75108 --- a/net/core/sock.c
75109 +++ b/net/core/sock.c
75110 @@ -341,7 +341,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75111 struct sk_buff_head *list = &sk->sk_receive_queue;
75112
75113 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
75114 - atomic_inc(&sk->sk_drops);
75115 + atomic_inc_unchecked(&sk->sk_drops);
75116 trace_sock_rcvqueue_full(sk, skb);
75117 return -ENOMEM;
75118 }
75119 @@ -351,7 +351,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75120 return err;
75121
75122 if (!sk_rmem_schedule(sk, skb->truesize)) {
75123 - atomic_inc(&sk->sk_drops);
75124 + atomic_inc_unchecked(&sk->sk_drops);
75125 return -ENOBUFS;
75126 }
75127
75128 @@ -371,7 +371,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75129 skb_dst_force(skb);
75130
75131 spin_lock_irqsave(&list->lock, flags);
75132 - skb->dropcount = atomic_read(&sk->sk_drops);
75133 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75134 __skb_queue_tail(list, skb);
75135 spin_unlock_irqrestore(&list->lock, flags);
75136
75137 @@ -391,7 +391,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75138 skb->dev = NULL;
75139
75140 if (sk_rcvqueues_full(sk, skb)) {
75141 - atomic_inc(&sk->sk_drops);
75142 + atomic_inc_unchecked(&sk->sk_drops);
75143 goto discard_and_relse;
75144 }
75145 if (nested)
75146 @@ -409,7 +409,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75147 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
75148 } else if (sk_add_backlog(sk, skb)) {
75149 bh_unlock_sock(sk);
75150 - atomic_inc(&sk->sk_drops);
75151 + atomic_inc_unchecked(&sk->sk_drops);
75152 goto discard_and_relse;
75153 }
75154
75155 @@ -974,7 +974,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75156 if (len > sizeof(peercred))
75157 len = sizeof(peercred);
75158 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75159 - if (copy_to_user(optval, &peercred, len))
75160 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75161 return -EFAULT;
75162 goto lenout;
75163 }
75164 @@ -987,7 +987,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75165 return -ENOTCONN;
75166 if (lv < len)
75167 return -EINVAL;
75168 - if (copy_to_user(optval, address, len))
75169 + if (len > sizeof(address) || copy_to_user(optval, address, len))
75170 return -EFAULT;
75171 goto lenout;
75172 }
75173 @@ -1024,7 +1024,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75174
75175 if (len > lv)
75176 len = lv;
75177 - if (copy_to_user(optval, &v, len))
75178 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
75179 return -EFAULT;
75180 lenout:
75181 if (put_user(len, optlen))
75182 @@ -2108,7 +2108,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
75183 */
75184 smp_wmb();
75185 atomic_set(&sk->sk_refcnt, 1);
75186 - atomic_set(&sk->sk_drops, 0);
75187 + atomic_set_unchecked(&sk->sk_drops, 0);
75188 }
75189 EXPORT_SYMBOL(sock_init_data);
75190
75191 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
75192 index b9868e1..849f809 100644
75193 --- a/net/core/sock_diag.c
75194 +++ b/net/core/sock_diag.c
75195 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
75196
75197 int sock_diag_check_cookie(void *sk, __u32 *cookie)
75198 {
75199 +#ifndef CONFIG_GRKERNSEC_HIDESYM
75200 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
75201 cookie[1] != INET_DIAG_NOCOOKIE) &&
75202 ((u32)(unsigned long)sk != cookie[0] ||
75203 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
75204 return -ESTALE;
75205 else
75206 +#endif
75207 return 0;
75208 }
75209 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
75210
75211 void sock_diag_save_cookie(void *sk, __u32 *cookie)
75212 {
75213 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75214 + cookie[0] = 0;
75215 + cookie[1] = 0;
75216 +#else
75217 cookie[0] = (u32)(unsigned long)sk;
75218 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75219 +#endif
75220 }
75221 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
75222
75223 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75224 index 02e75d1..9a57a7c 100644
75225 --- a/net/decnet/sysctl_net_decnet.c
75226 +++ b/net/decnet/sysctl_net_decnet.c
75227 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
75228
75229 if (len > *lenp) len = *lenp;
75230
75231 - if (copy_to_user(buffer, addr, len))
75232 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
75233 return -EFAULT;
75234
75235 *lenp = len;
75236 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
75237
75238 if (len > *lenp) len = *lenp;
75239
75240 - if (copy_to_user(buffer, devname, len))
75241 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
75242 return -EFAULT;
75243
75244 *lenp = len;
75245 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75246 index 39a2d29..f39c0fe 100644
75247 --- a/net/econet/Kconfig
75248 +++ b/net/econet/Kconfig
75249 @@ -4,7 +4,7 @@
75250
75251 config ECONET
75252 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75253 - depends on EXPERIMENTAL && INET
75254 + depends on EXPERIMENTAL && INET && BROKEN
75255 ---help---
75256 Econet is a fairly old and slow networking protocol mainly used by
75257 Acorn computers to access file and print servers. It uses native
75258 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75259 index 92fc5f6..b790d91 100644
75260 --- a/net/ipv4/fib_frontend.c
75261 +++ b/net/ipv4/fib_frontend.c
75262 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
75263 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75264 fib_sync_up(dev);
75265 #endif
75266 - atomic_inc(&net->ipv4.dev_addr_genid);
75267 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75268 rt_cache_flush(dev_net(dev), -1);
75269 break;
75270 case NETDEV_DOWN:
75271 fib_del_ifaddr(ifa, NULL);
75272 - atomic_inc(&net->ipv4.dev_addr_genid);
75273 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75274 if (ifa->ifa_dev->ifa_list == NULL) {
75275 /* Last address was deleted from this interface.
75276 * Disable IP.
75277 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
75278 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75279 fib_sync_up(dev);
75280 #endif
75281 - atomic_inc(&net->ipv4.dev_addr_genid);
75282 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75283 rt_cache_flush(dev_net(dev), -1);
75284 break;
75285 case NETDEV_DOWN:
75286 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75287 index 80106d8..232e898 100644
75288 --- a/net/ipv4/fib_semantics.c
75289 +++ b/net/ipv4/fib_semantics.c
75290 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
75291 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75292 nh->nh_gw,
75293 nh->nh_parent->fib_scope);
75294 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75295 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75296
75297 return nh->nh_saddr;
75298 }
75299 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75300 index 984ec65..97ac518 100644
75301 --- a/net/ipv4/inet_hashtables.c
75302 +++ b/net/ipv4/inet_hashtables.c
75303 @@ -18,12 +18,15 @@
75304 #include <linux/sched.h>
75305 #include <linux/slab.h>
75306 #include <linux/wait.h>
75307 +#include <linux/security.h>
75308
75309 #include <net/inet_connection_sock.h>
75310 #include <net/inet_hashtables.h>
75311 #include <net/secure_seq.h>
75312 #include <net/ip.h>
75313
75314 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75315 +
75316 /*
75317 * Allocate and initialize a new local port bind bucket.
75318 * The bindhash mutex for snum's hash chain must be held here.
75319 @@ -530,6 +533,8 @@ ok:
75320 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75321 spin_unlock(&head->lock);
75322
75323 + gr_update_task_in_ip_table(current, inet_sk(sk));
75324 +
75325 if (tw) {
75326 inet_twsk_deschedule(tw, death_row);
75327 while (twrefcnt) {
75328 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75329 index d4d61b6..b81aec8 100644
75330 --- a/net/ipv4/inetpeer.c
75331 +++ b/net/ipv4/inetpeer.c
75332 @@ -487,8 +487,8 @@ relookup:
75333 if (p) {
75334 p->daddr = *daddr;
75335 atomic_set(&p->refcnt, 1);
75336 - atomic_set(&p->rid, 0);
75337 - atomic_set(&p->ip_id_count,
75338 + atomic_set_unchecked(&p->rid, 0);
75339 + atomic_set_unchecked(&p->ip_id_count,
75340 (daddr->family == AF_INET) ?
75341 secure_ip_id(daddr->addr.a4) :
75342 secure_ipv6_id(daddr->addr.a6));
75343 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75344 index 1f23a57..7180dfe 100644
75345 --- a/net/ipv4/ip_fragment.c
75346 +++ b/net/ipv4/ip_fragment.c
75347 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75348 return 0;
75349
75350 start = qp->rid;
75351 - end = atomic_inc_return(&peer->rid);
75352 + end = atomic_inc_return_unchecked(&peer->rid);
75353 qp->rid = end;
75354
75355 rc = qp->q.fragments && (end - start) > max;
75356 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75357 index 8aa87c1..35c3248 100644
75358 --- a/net/ipv4/ip_sockglue.c
75359 +++ b/net/ipv4/ip_sockglue.c
75360 @@ -1112,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75361 len = min_t(unsigned int, len, opt->optlen);
75362 if (put_user(len, optlen))
75363 return -EFAULT;
75364 - if (copy_to_user(optval, opt->__data, len))
75365 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75366 + copy_to_user(optval, opt->__data, len))
75367 return -EFAULT;
75368 return 0;
75369 }
75370 @@ -1240,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75371 if (sk->sk_type != SOCK_STREAM)
75372 return -ENOPROTOOPT;
75373
75374 - msg.msg_control = optval;
75375 + msg.msg_control = (void __force_kernel *)optval;
75376 msg.msg_controllen = len;
75377 msg.msg_flags = flags;
75378
75379 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75380 index 6e412a6..6640538 100644
75381 --- a/net/ipv4/ipconfig.c
75382 +++ b/net/ipv4/ipconfig.c
75383 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75384
75385 mm_segment_t oldfs = get_fs();
75386 set_fs(get_ds());
75387 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75388 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75389 set_fs(oldfs);
75390 return res;
75391 }
75392 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75393
75394 mm_segment_t oldfs = get_fs();
75395 set_fs(get_ds());
75396 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75397 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75398 set_fs(oldfs);
75399 return res;
75400 }
75401 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75402
75403 mm_segment_t oldfs = get_fs();
75404 set_fs(get_ds());
75405 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75406 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75407 set_fs(oldfs);
75408 return res;
75409 }
75410 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75411 index b072386..abdebcf 100644
75412 --- a/net/ipv4/ping.c
75413 +++ b/net/ipv4/ping.c
75414 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
75415 sk_rmem_alloc_get(sp),
75416 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75417 atomic_read(&sp->sk_refcnt), sp,
75418 - atomic_read(&sp->sk_drops), len);
75419 + atomic_read_unchecked(&sp->sk_drops), len);
75420 }
75421
75422 static int ping_seq_show(struct seq_file *seq, void *v)
75423 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75424 index 3ccda5a..3c1e61d 100644
75425 --- a/net/ipv4/raw.c
75426 +++ b/net/ipv4/raw.c
75427 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
75428 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75429 {
75430 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75431 - atomic_inc(&sk->sk_drops);
75432 + atomic_inc_unchecked(&sk->sk_drops);
75433 kfree_skb(skb);
75434 return NET_RX_DROP;
75435 }
75436 @@ -742,16 +742,20 @@ static int raw_init(struct sock *sk)
75437
75438 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75439 {
75440 + struct icmp_filter filter;
75441 +
75442 if (optlen > sizeof(struct icmp_filter))
75443 optlen = sizeof(struct icmp_filter);
75444 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75445 + if (copy_from_user(&filter, optval, optlen))
75446 return -EFAULT;
75447 + raw_sk(sk)->filter = filter;
75448 return 0;
75449 }
75450
75451 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75452 {
75453 int len, ret = -EFAULT;
75454 + struct icmp_filter filter;
75455
75456 if (get_user(len, optlen))
75457 goto out;
75458 @@ -761,8 +765,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
75459 if (len > sizeof(struct icmp_filter))
75460 len = sizeof(struct icmp_filter);
75461 ret = -EFAULT;
75462 - if (put_user(len, optlen) ||
75463 - copy_to_user(optval, &raw_sk(sk)->filter, len))
75464 + filter = raw_sk(sk)->filter;
75465 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
75466 goto out;
75467 ret = 0;
75468 out: return ret;
75469 @@ -990,7 +994,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75470 sk_wmem_alloc_get(sp),
75471 sk_rmem_alloc_get(sp),
75472 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75473 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75474 + atomic_read(&sp->sk_refcnt),
75475 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75476 + NULL,
75477 +#else
75478 + sp,
75479 +#endif
75480 + atomic_read_unchecked(&sp->sk_drops));
75481 }
75482
75483 static int raw_seq_show(struct seq_file *seq, void *v)
75484 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75485 index 0197747..7adb0dc 100644
75486 --- a/net/ipv4/route.c
75487 +++ b/net/ipv4/route.c
75488 @@ -311,7 +311,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
75489
75490 static inline int rt_genid(struct net *net)
75491 {
75492 - return atomic_read(&net->ipv4.rt_genid);
75493 + return atomic_read_unchecked(&net->ipv4.rt_genid);
75494 }
75495
75496 #ifdef CONFIG_PROC_FS
75497 @@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
75498 unsigned char shuffle;
75499
75500 get_random_bytes(&shuffle, sizeof(shuffle));
75501 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75502 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
75503 inetpeer_invalidate_tree(AF_INET);
75504 }
75505
75506 @@ -3010,7 +3010,7 @@ static int rt_fill_info(struct net *net,
75507 error = rt->dst.error;
75508 if (peer) {
75509 inet_peer_refcheck(rt->peer);
75510 - id = atomic_read(&peer->ip_id_count) & 0xffff;
75511 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75512 if (peer->tcp_ts_stamp) {
75513 ts = peer->tcp_ts;
75514 tsage = get_seconds() - peer->tcp_ts_stamp;
75515 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75516 index fd54c5f..96d6407 100644
75517 --- a/net/ipv4/tcp_ipv4.c
75518 +++ b/net/ipv4/tcp_ipv4.c
75519 @@ -88,6 +88,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
75520 int sysctl_tcp_low_latency __read_mostly;
75521 EXPORT_SYMBOL(sysctl_tcp_low_latency);
75522
75523 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75524 +extern int grsec_enable_blackhole;
75525 +#endif
75526
75527 #ifdef CONFIG_TCP_MD5SIG
75528 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
75529 @@ -1638,6 +1641,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
75530 return 0;
75531
75532 reset:
75533 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75534 + if (!grsec_enable_blackhole)
75535 +#endif
75536 tcp_v4_send_reset(rsk, skb);
75537 discard:
75538 kfree_skb(skb);
75539 @@ -1700,12 +1706,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
75540 TCP_SKB_CB(skb)->sacked = 0;
75541
75542 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75543 - if (!sk)
75544 + if (!sk) {
75545 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75546 + ret = 1;
75547 +#endif
75548 goto no_tcp_socket;
75549 -
75550 + }
75551 process:
75552 - if (sk->sk_state == TCP_TIME_WAIT)
75553 + if (sk->sk_state == TCP_TIME_WAIT) {
75554 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75555 + ret = 2;
75556 +#endif
75557 goto do_time_wait;
75558 + }
75559
75560 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75561 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75562 @@ -1755,6 +1768,10 @@ no_tcp_socket:
75563 bad_packet:
75564 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75565 } else {
75566 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75567 + if (!grsec_enable_blackhole || (ret == 1 &&
75568 + (skb->dev->flags & IFF_LOOPBACK)))
75569 +#endif
75570 tcp_v4_send_reset(NULL, skb);
75571 }
75572
75573 @@ -2417,7 +2434,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
75574 0, /* non standard timer */
75575 0, /* open_requests have no inode */
75576 atomic_read(&sk->sk_refcnt),
75577 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75578 + NULL,
75579 +#else
75580 req,
75581 +#endif
75582 len);
75583 }
75584
75585 @@ -2467,7 +2488,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75586 sock_i_uid(sk),
75587 icsk->icsk_probes_out,
75588 sock_i_ino(sk),
75589 - atomic_read(&sk->sk_refcnt), sk,
75590 + atomic_read(&sk->sk_refcnt),
75591 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75592 + NULL,
75593 +#else
75594 + sk,
75595 +#endif
75596 jiffies_to_clock_t(icsk->icsk_rto),
75597 jiffies_to_clock_t(icsk->icsk_ack.ato),
75598 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75599 @@ -2495,7 +2521,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
75600 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75601 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75602 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75603 - atomic_read(&tw->tw_refcnt), tw, len);
75604 + atomic_read(&tw->tw_refcnt),
75605 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75606 + NULL,
75607 +#else
75608 + tw,
75609 +#endif
75610 + len);
75611 }
75612
75613 #define TMPSZ 150
75614 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75615 index 550e755..25721b3 100644
75616 --- a/net/ipv4/tcp_minisocks.c
75617 +++ b/net/ipv4/tcp_minisocks.c
75618 @@ -27,6 +27,10 @@
75619 #include <net/inet_common.h>
75620 #include <net/xfrm.h>
75621
75622 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75623 +extern int grsec_enable_blackhole;
75624 +#endif
75625 +
75626 int sysctl_tcp_syncookies __read_mostly = 1;
75627 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75628
75629 @@ -753,6 +757,10 @@ listen_overflow:
75630
75631 embryonic_reset:
75632 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75633 +
75634 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75635 + if (!grsec_enable_blackhole)
75636 +#endif
75637 if (!(flg & TCP_FLAG_RST))
75638 req->rsk_ops->send_reset(sk, skb);
75639
75640 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75641 index 85ee7eb..53277ab 100644
75642 --- a/net/ipv4/tcp_probe.c
75643 +++ b/net/ipv4/tcp_probe.c
75644 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75645 if (cnt + width >= len)
75646 break;
75647
75648 - if (copy_to_user(buf + cnt, tbuf, width))
75649 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75650 return -EFAULT;
75651 cnt += width;
75652 }
75653 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75654 index cd2e072..1fffee2 100644
75655 --- a/net/ipv4/tcp_timer.c
75656 +++ b/net/ipv4/tcp_timer.c
75657 @@ -22,6 +22,10 @@
75658 #include <linux/gfp.h>
75659 #include <net/tcp.h>
75660
75661 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75662 +extern int grsec_lastack_retries;
75663 +#endif
75664 +
75665 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75666 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75667 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
75668 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
75669 }
75670 }
75671
75672 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75673 + if ((sk->sk_state == TCP_LAST_ACK) &&
75674 + (grsec_lastack_retries > 0) &&
75675 + (grsec_lastack_retries < retry_until))
75676 + retry_until = grsec_lastack_retries;
75677 +#endif
75678 +
75679 if (retransmits_timed_out(sk, retry_until,
75680 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75681 /* Has it gone just too far? */
75682 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75683 index 5d075b5..d907d5f 100644
75684 --- a/net/ipv4/udp.c
75685 +++ b/net/ipv4/udp.c
75686 @@ -86,6 +86,7 @@
75687 #include <linux/types.h>
75688 #include <linux/fcntl.h>
75689 #include <linux/module.h>
75690 +#include <linux/security.h>
75691 #include <linux/socket.h>
75692 #include <linux/sockios.h>
75693 #include <linux/igmp.h>
75694 @@ -108,6 +109,10 @@
75695 #include <trace/events/udp.h>
75696 #include "udp_impl.h"
75697
75698 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75699 +extern int grsec_enable_blackhole;
75700 +#endif
75701 +
75702 struct udp_table udp_table __read_mostly;
75703 EXPORT_SYMBOL(udp_table);
75704
75705 @@ -566,6 +571,9 @@ found:
75706 return s;
75707 }
75708
75709 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75710 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75711 +
75712 /*
75713 * This routine is called by the ICMP module when it gets some
75714 * sort of error condition. If err < 0 then the socket should
75715 @@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75716 dport = usin->sin_port;
75717 if (dport == 0)
75718 return -EINVAL;
75719 +
75720 + err = gr_search_udp_sendmsg(sk, usin);
75721 + if (err)
75722 + return err;
75723 } else {
75724 if (sk->sk_state != TCP_ESTABLISHED)
75725 return -EDESTADDRREQ;
75726 +
75727 + err = gr_search_udp_sendmsg(sk, NULL);
75728 + if (err)
75729 + return err;
75730 +
75731 daddr = inet->inet_daddr;
75732 dport = inet->inet_dport;
75733 /* Open fast path for connected socket.
75734 @@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
75735 udp_lib_checksum_complete(skb)) {
75736 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75737 IS_UDPLITE(sk));
75738 - atomic_inc(&sk->sk_drops);
75739 + atomic_inc_unchecked(&sk->sk_drops);
75740 __skb_unlink(skb, rcvq);
75741 __skb_queue_tail(&list_kill, skb);
75742 }
75743 @@ -1186,6 +1203,10 @@ try_again:
75744 if (!skb)
75745 goto out;
75746
75747 + err = gr_search_udp_recvmsg(sk, skb);
75748 + if (err)
75749 + goto out_free;
75750 +
75751 ulen = skb->len - sizeof(struct udphdr);
75752 copied = len;
75753 if (copied > ulen)
75754 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75755
75756 drop:
75757 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75758 - atomic_inc(&sk->sk_drops);
75759 + atomic_inc_unchecked(&sk->sk_drops);
75760 kfree_skb(skb);
75761 return -1;
75762 }
75763 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75764 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75765
75766 if (!skb1) {
75767 - atomic_inc(&sk->sk_drops);
75768 + atomic_inc_unchecked(&sk->sk_drops);
75769 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75770 IS_UDPLITE(sk));
75771 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75772 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75773 goto csum_error;
75774
75775 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75776 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75777 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75778 +#endif
75779 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75780
75781 /*
75782 @@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75783 sk_wmem_alloc_get(sp),
75784 sk_rmem_alloc_get(sp),
75785 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75786 - atomic_read(&sp->sk_refcnt), sp,
75787 - atomic_read(&sp->sk_drops), len);
75788 + atomic_read(&sp->sk_refcnt),
75789 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75790 + NULL,
75791 +#else
75792 + sp,
75793 +#endif
75794 + atomic_read_unchecked(&sp->sk_drops), len);
75795 }
75796
75797 int udp4_seq_show(struct seq_file *seq, void *v)
75798 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75799 index 6b8ebc5..1d624f4 100644
75800 --- a/net/ipv6/addrconf.c
75801 +++ b/net/ipv6/addrconf.c
75802 @@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75803 p.iph.ihl = 5;
75804 p.iph.protocol = IPPROTO_IPV6;
75805 p.iph.ttl = 64;
75806 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75807 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75808
75809 if (ops->ndo_do_ioctl) {
75810 mm_segment_t oldfs = get_fs();
75811 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75812 index 02dd203..e03fcc9 100644
75813 --- a/net/ipv6/inet6_connection_sock.c
75814 +++ b/net/ipv6/inet6_connection_sock.c
75815 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75816 #ifdef CONFIG_XFRM
75817 {
75818 struct rt6_info *rt = (struct rt6_info *)dst;
75819 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75820 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75821 }
75822 #endif
75823 }
75824 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75825 #ifdef CONFIG_XFRM
75826 if (dst) {
75827 struct rt6_info *rt = (struct rt6_info *)dst;
75828 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75829 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75830 __sk_dst_reset(sk);
75831 dst = NULL;
75832 }
75833 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75834 index 18a2719..779f36a 100644
75835 --- a/net/ipv6/ipv6_sockglue.c
75836 +++ b/net/ipv6/ipv6_sockglue.c
75837 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75838 if (sk->sk_type != SOCK_STREAM)
75839 return -ENOPROTOOPT;
75840
75841 - msg.msg_control = optval;
75842 + msg.msg_control = (void __force_kernel *)optval;
75843 msg.msg_controllen = len;
75844 msg.msg_flags = flags;
75845
75846 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75847 index d02f7e4..2d2a0f1 100644
75848 --- a/net/ipv6/raw.c
75849 +++ b/net/ipv6/raw.c
75850 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75851 {
75852 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75853 skb_checksum_complete(skb)) {
75854 - atomic_inc(&sk->sk_drops);
75855 + atomic_inc_unchecked(&sk->sk_drops);
75856 kfree_skb(skb);
75857 return NET_RX_DROP;
75858 }
75859 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75860 struct raw6_sock *rp = raw6_sk(sk);
75861
75862 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75863 - atomic_inc(&sk->sk_drops);
75864 + atomic_inc_unchecked(&sk->sk_drops);
75865 kfree_skb(skb);
75866 return NET_RX_DROP;
75867 }
75868 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75869
75870 if (inet->hdrincl) {
75871 if (skb_checksum_complete(skb)) {
75872 - atomic_inc(&sk->sk_drops);
75873 + atomic_inc_unchecked(&sk->sk_drops);
75874 kfree_skb(skb);
75875 return NET_RX_DROP;
75876 }
75877 @@ -602,7 +602,7 @@ out:
75878 return err;
75879 }
75880
75881 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75882 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75883 struct flowi6 *fl6, struct dst_entry **dstp,
75884 unsigned int flags)
75885 {
75886 @@ -912,12 +912,15 @@ do_confirm:
75887 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75888 char __user *optval, int optlen)
75889 {
75890 + struct icmp6_filter filter;
75891 +
75892 switch (optname) {
75893 case ICMPV6_FILTER:
75894 if (optlen > sizeof(struct icmp6_filter))
75895 optlen = sizeof(struct icmp6_filter);
75896 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75897 + if (copy_from_user(&filter, optval, optlen))
75898 return -EFAULT;
75899 + raw6_sk(sk)->filter = filter;
75900 return 0;
75901 default:
75902 return -ENOPROTOOPT;
75903 @@ -930,6 +933,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75904 char __user *optval, int __user *optlen)
75905 {
75906 int len;
75907 + struct icmp6_filter filter;
75908
75909 switch (optname) {
75910 case ICMPV6_FILTER:
75911 @@ -941,7 +945,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75912 len = sizeof(struct icmp6_filter);
75913 if (put_user(len, optlen))
75914 return -EFAULT;
75915 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75916 + filter = raw6_sk(sk)->filter;
75917 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
75918 return -EFAULT;
75919 return 0;
75920 default:
75921 @@ -1248,7 +1253,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75922 0, 0L, 0,
75923 sock_i_uid(sp), 0,
75924 sock_i_ino(sp),
75925 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75926 + atomic_read(&sp->sk_refcnt),
75927 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75928 + NULL,
75929 +#else
75930 + sp,
75931 +#endif
75932 + atomic_read_unchecked(&sp->sk_drops));
75933 }
75934
75935 static int raw6_seq_show(struct seq_file *seq, void *v)
75936 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75937 index a89ca8d..12e66b0 100644
75938 --- a/net/ipv6/tcp_ipv6.c
75939 +++ b/net/ipv6/tcp_ipv6.c
75940 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75941 }
75942 #endif
75943
75944 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75945 +extern int grsec_enable_blackhole;
75946 +#endif
75947 +
75948 static void tcp_v6_hash(struct sock *sk)
75949 {
75950 if (sk->sk_state != TCP_CLOSE) {
75951 @@ -1654,6 +1658,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75952 return 0;
75953
75954 reset:
75955 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75956 + if (!grsec_enable_blackhole)
75957 +#endif
75958 tcp_v6_send_reset(sk, skb);
75959 discard:
75960 if (opt_skb)
75961 @@ -1733,12 +1740,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75962 TCP_SKB_CB(skb)->sacked = 0;
75963
75964 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75965 - if (!sk)
75966 + if (!sk) {
75967 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75968 + ret = 1;
75969 +#endif
75970 goto no_tcp_socket;
75971 + }
75972
75973 process:
75974 - if (sk->sk_state == TCP_TIME_WAIT)
75975 + if (sk->sk_state == TCP_TIME_WAIT) {
75976 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75977 + ret = 2;
75978 +#endif
75979 goto do_time_wait;
75980 + }
75981
75982 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75983 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75984 @@ -1786,6 +1801,10 @@ no_tcp_socket:
75985 bad_packet:
75986 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75987 } else {
75988 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75989 + if (!grsec_enable_blackhole || (ret == 1 &&
75990 + (skb->dev->flags & IFF_LOOPBACK)))
75991 +#endif
75992 tcp_v6_send_reset(NULL, skb);
75993 }
75994
75995 @@ -2047,7 +2066,13 @@ static void get_openreq6(struct seq_file *seq,
75996 uid,
75997 0, /* non standard timer */
75998 0, /* open_requests have no inode */
75999 - 0, req);
76000 + 0,
76001 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76002 + NULL
76003 +#else
76004 + req
76005 +#endif
76006 + );
76007 }
76008
76009 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76010 @@ -2097,7 +2122,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76011 sock_i_uid(sp),
76012 icsk->icsk_probes_out,
76013 sock_i_ino(sp),
76014 - atomic_read(&sp->sk_refcnt), sp,
76015 + atomic_read(&sp->sk_refcnt),
76016 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76017 + NULL,
76018 +#else
76019 + sp,
76020 +#endif
76021 jiffies_to_clock_t(icsk->icsk_rto),
76022 jiffies_to_clock_t(icsk->icsk_ack.ato),
76023 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
76024 @@ -2132,7 +2162,13 @@ static void get_timewait6_sock(struct seq_file *seq,
76025 dest->s6_addr32[2], dest->s6_addr32[3], destp,
76026 tw->tw_substate, 0, 0,
76027 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76028 - atomic_read(&tw->tw_refcnt), tw);
76029 + atomic_read(&tw->tw_refcnt),
76030 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76031 + NULL
76032 +#else
76033 + tw
76034 +#endif
76035 + );
76036 }
76037
76038 static int tcp6_seq_show(struct seq_file *seq, void *v)
76039 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
76040 index 4f96b5c..75543d7 100644
76041 --- a/net/ipv6/udp.c
76042 +++ b/net/ipv6/udp.c
76043 @@ -50,6 +50,10 @@
76044 #include <linux/seq_file.h>
76045 #include "udp_impl.h"
76046
76047 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76048 +extern int grsec_enable_blackhole;
76049 +#endif
76050 +
76051 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76052 {
76053 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
76054 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
76055
76056 return 0;
76057 drop:
76058 - atomic_inc(&sk->sk_drops);
76059 + atomic_inc_unchecked(&sk->sk_drops);
76060 drop_no_sk_drops_inc:
76061 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76062 kfree_skb(skb);
76063 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76064 continue;
76065 }
76066 drop:
76067 - atomic_inc(&sk->sk_drops);
76068 + atomic_inc_unchecked(&sk->sk_drops);
76069 UDP6_INC_STATS_BH(sock_net(sk),
76070 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76071 UDP6_INC_STATS_BH(sock_net(sk),
76072 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76073 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76074 proto == IPPROTO_UDPLITE);
76075
76076 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76077 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76078 +#endif
76079 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
76080
76081 kfree_skb(skb);
76082 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76083 if (!sock_owned_by_user(sk))
76084 udpv6_queue_rcv_skb(sk, skb);
76085 else if (sk_add_backlog(sk, skb)) {
76086 - atomic_inc(&sk->sk_drops);
76087 + atomic_inc_unchecked(&sk->sk_drops);
76088 bh_unlock_sock(sk);
76089 sock_put(sk);
76090 goto discard;
76091 @@ -1410,8 +1417,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
76092 0, 0L, 0,
76093 sock_i_uid(sp), 0,
76094 sock_i_ino(sp),
76095 - atomic_read(&sp->sk_refcnt), sp,
76096 - atomic_read(&sp->sk_drops));
76097 + atomic_read(&sp->sk_refcnt),
76098 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76099 + NULL,
76100 +#else
76101 + sp,
76102 +#endif
76103 + atomic_read_unchecked(&sp->sk_drops));
76104 }
76105
76106 int udp6_seq_show(struct seq_file *seq, void *v)
76107 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76108 index 253695d..9481ce8 100644
76109 --- a/net/irda/ircomm/ircomm_tty.c
76110 +++ b/net/irda/ircomm/ircomm_tty.c
76111 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76112 add_wait_queue(&self->open_wait, &wait);
76113
76114 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76115 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76116 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76117
76118 /* As far as I can see, we protect open_count - Jean II */
76119 spin_lock_irqsave(&self->spinlock, flags);
76120 if (!tty_hung_up_p(filp)) {
76121 extra_count = 1;
76122 - self->open_count--;
76123 + local_dec(&self->open_count);
76124 }
76125 spin_unlock_irqrestore(&self->spinlock, flags);
76126 - self->blocked_open++;
76127 + local_inc(&self->blocked_open);
76128
76129 while (1) {
76130 if (tty->termios->c_cflag & CBAUD) {
76131 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76132 }
76133
76134 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76135 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76136 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76137
76138 schedule();
76139 }
76140 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76141 if (extra_count) {
76142 /* ++ is not atomic, so this should be protected - Jean II */
76143 spin_lock_irqsave(&self->spinlock, flags);
76144 - self->open_count++;
76145 + local_inc(&self->open_count);
76146 spin_unlock_irqrestore(&self->spinlock, flags);
76147 }
76148 - self->blocked_open--;
76149 + local_dec(&self->blocked_open);
76150
76151 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76152 - __FILE__,__LINE__, tty->driver->name, self->open_count);
76153 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76154
76155 if (!retval)
76156 self->flags |= ASYNC_NORMAL_ACTIVE;
76157 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
76158 }
76159 /* ++ is not atomic, so this should be protected - Jean II */
76160 spin_lock_irqsave(&self->spinlock, flags);
76161 - self->open_count++;
76162 + local_inc(&self->open_count);
76163
76164 tty->driver_data = self;
76165 self->tty = tty;
76166 spin_unlock_irqrestore(&self->spinlock, flags);
76167
76168 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76169 - self->line, self->open_count);
76170 + self->line, local_read(&self->open_count));
76171
76172 /* Not really used by us, but lets do it anyway */
76173 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76174 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76175 return;
76176 }
76177
76178 - if ((tty->count == 1) && (self->open_count != 1)) {
76179 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76180 /*
76181 * Uh, oh. tty->count is 1, which means that the tty
76182 * structure will be freed. state->count should always
76183 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76184 */
76185 IRDA_DEBUG(0, "%s(), bad serial port count; "
76186 "tty->count is 1, state->count is %d\n", __func__ ,
76187 - self->open_count);
76188 - self->open_count = 1;
76189 + local_read(&self->open_count));
76190 + local_set(&self->open_count, 1);
76191 }
76192
76193 - if (--self->open_count < 0) {
76194 + if (local_dec_return(&self->open_count) < 0) {
76195 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76196 - __func__, self->line, self->open_count);
76197 - self->open_count = 0;
76198 + __func__, self->line, local_read(&self->open_count));
76199 + local_set(&self->open_count, 0);
76200 }
76201 - if (self->open_count) {
76202 + if (local_read(&self->open_count)) {
76203 spin_unlock_irqrestore(&self->spinlock, flags);
76204
76205 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76206 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76207 tty->closing = 0;
76208 self->tty = NULL;
76209
76210 - if (self->blocked_open) {
76211 + if (local_read(&self->blocked_open)) {
76212 if (self->close_delay)
76213 schedule_timeout_interruptible(self->close_delay);
76214 wake_up_interruptible(&self->open_wait);
76215 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
76216 spin_lock_irqsave(&self->spinlock, flags);
76217 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76218 self->tty = NULL;
76219 - self->open_count = 0;
76220 + local_set(&self->open_count, 0);
76221 spin_unlock_irqrestore(&self->spinlock, flags);
76222
76223 wake_up_interruptible(&self->open_wait);
76224 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
76225 seq_putc(m, '\n');
76226
76227 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76228 - seq_printf(m, "Open count: %d\n", self->open_count);
76229 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
76230 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76231 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76232
76233 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76234 index d5c5b8f..33beff0 100644
76235 --- a/net/iucv/af_iucv.c
76236 +++ b/net/iucv/af_iucv.c
76237 @@ -764,10 +764,10 @@ static int iucv_sock_autobind(struct sock *sk)
76238
76239 write_lock_bh(&iucv_sk_list.lock);
76240
76241 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76242 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76243 while (__iucv_get_sock_by_name(name)) {
76244 sprintf(name, "%08x",
76245 - atomic_inc_return(&iucv_sk_list.autobind_name));
76246 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76247 }
76248
76249 write_unlock_bh(&iucv_sk_list.lock);
76250 diff --git a/net/key/af_key.c b/net/key/af_key.c
76251 index 11dbb22..c20f667 100644
76252 --- a/net/key/af_key.c
76253 +++ b/net/key/af_key.c
76254 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
76255 static u32 get_acqseq(void)
76256 {
76257 u32 res;
76258 - static atomic_t acqseq;
76259 + static atomic_unchecked_t acqseq;
76260
76261 do {
76262 - res = atomic_inc_return(&acqseq);
76263 + res = atomic_inc_return_unchecked(&acqseq);
76264 } while (!res);
76265 return res;
76266 }
76267 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76268 index 2f0642d..e5c6fba 100644
76269 --- a/net/mac80211/ieee80211_i.h
76270 +++ b/net/mac80211/ieee80211_i.h
76271 @@ -28,6 +28,7 @@
76272 #include <net/ieee80211_radiotap.h>
76273 #include <net/cfg80211.h>
76274 #include <net/mac80211.h>
76275 +#include <asm/local.h>
76276 #include "key.h"
76277 #include "sta_info.h"
76278
76279 @@ -781,7 +782,7 @@ struct ieee80211_local {
76280 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
76281 spinlock_t queue_stop_reason_lock;
76282
76283 - int open_count;
76284 + local_t open_count;
76285 int monitors, cooked_mntrs;
76286 /* number of interfaces with corresponding FIF_ flags */
76287 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
76288 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76289 index 8e2137b..2974283 100644
76290 --- a/net/mac80211/iface.c
76291 +++ b/net/mac80211/iface.c
76292 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76293 break;
76294 }
76295
76296 - if (local->open_count == 0) {
76297 + if (local_read(&local->open_count) == 0) {
76298 res = drv_start(local);
76299 if (res)
76300 goto err_del_bss;
76301 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76302 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76303
76304 if (!is_valid_ether_addr(dev->dev_addr)) {
76305 - if (!local->open_count)
76306 + if (!local_read(&local->open_count))
76307 drv_stop(local);
76308 return -EADDRNOTAVAIL;
76309 }
76310 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76311 mutex_unlock(&local->mtx);
76312
76313 if (coming_up)
76314 - local->open_count++;
76315 + local_inc(&local->open_count);
76316
76317 if (hw_reconf_flags)
76318 ieee80211_hw_config(local, hw_reconf_flags);
76319 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76320 err_del_interface:
76321 drv_remove_interface(local, sdata);
76322 err_stop:
76323 - if (!local->open_count)
76324 + if (!local_read(&local->open_count))
76325 drv_stop(local);
76326 err_del_bss:
76327 sdata->bss = NULL;
76328 @@ -489,7 +489,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76329 }
76330
76331 if (going_down)
76332 - local->open_count--;
76333 + local_dec(&local->open_count);
76334
76335 switch (sdata->vif.type) {
76336 case NL80211_IFTYPE_AP_VLAN:
76337 @@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76338
76339 ieee80211_recalc_ps(local, -1);
76340
76341 - if (local->open_count == 0) {
76342 + if (local_read(&local->open_count) == 0) {
76343 if (local->ops->napi_poll)
76344 napi_disable(&local->napi);
76345 ieee80211_clear_tx_pending(local);
76346 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76347 index b142bd4..a651749 100644
76348 --- a/net/mac80211/main.c
76349 +++ b/net/mac80211/main.c
76350 @@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
76351 local->hw.conf.power_level = power;
76352 }
76353
76354 - if (changed && local->open_count) {
76355 + if (changed && local_read(&local->open_count)) {
76356 ret = drv_config(local, changed);
76357 /*
76358 * Goal:
76359 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76360 index 596efaf..8f1911f 100644
76361 --- a/net/mac80211/pm.c
76362 +++ b/net/mac80211/pm.c
76363 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76364 struct ieee80211_sub_if_data *sdata;
76365 struct sta_info *sta;
76366
76367 - if (!local->open_count)
76368 + if (!local_read(&local->open_count))
76369 goto suspend;
76370
76371 ieee80211_scan_cancel(local);
76372 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76373 cancel_work_sync(&local->dynamic_ps_enable_work);
76374 del_timer_sync(&local->dynamic_ps_timer);
76375
76376 - local->wowlan = wowlan && local->open_count;
76377 + local->wowlan = wowlan && local_read(&local->open_count);
76378 if (local->wowlan) {
76379 int err = drv_suspend(local, wowlan);
76380 if (err < 0) {
76381 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76382 }
76383
76384 /* stop hardware - this must stop RX */
76385 - if (local->open_count)
76386 + if (local_read(&local->open_count))
76387 ieee80211_stop_device(local);
76388
76389 suspend:
76390 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76391 index f9b8e81..bb89b46 100644
76392 --- a/net/mac80211/rate.c
76393 +++ b/net/mac80211/rate.c
76394 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
76395
76396 ASSERT_RTNL();
76397
76398 - if (local->open_count)
76399 + if (local_read(&local->open_count))
76400 return -EBUSY;
76401
76402 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
76403 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76404 index c97a065..ff61928 100644
76405 --- a/net/mac80211/rc80211_pid_debugfs.c
76406 +++ b/net/mac80211/rc80211_pid_debugfs.c
76407 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
76408
76409 spin_unlock_irqrestore(&events->lock, status);
76410
76411 - if (copy_to_user(buf, pb, p))
76412 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76413 return -EFAULT;
76414
76415 return p;
76416 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76417 index 9919892..8c49803 100644
76418 --- a/net/mac80211/util.c
76419 +++ b/net/mac80211/util.c
76420 @@ -1143,7 +1143,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
76421 }
76422 #endif
76423 /* everything else happens only if HW was up & running */
76424 - if (!local->open_count)
76425 + if (!local_read(&local->open_count))
76426 goto wake_up;
76427
76428 /*
76429 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76430 index f8ac4ef..b02560b 100644
76431 --- a/net/netfilter/Kconfig
76432 +++ b/net/netfilter/Kconfig
76433 @@ -806,6 +806,16 @@ config NETFILTER_XT_MATCH_ESP
76434
76435 To compile it as a module, choose M here. If unsure, say N.
76436
76437 +config NETFILTER_XT_MATCH_GRADM
76438 + tristate '"gradm" match support'
76439 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76440 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76441 + ---help---
76442 + The gradm match allows to match on grsecurity RBAC being enabled.
76443 + It is useful when iptables rules are applied early on bootup to
76444 + prevent connections to the machine (except from a trusted host)
76445 + while the RBAC system is disabled.
76446 +
76447 config NETFILTER_XT_MATCH_HASHLIMIT
76448 tristate '"hashlimit" match support'
76449 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76450 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76451 index 40f4c3d..0d5dd6b 100644
76452 --- a/net/netfilter/Makefile
76453 +++ b/net/netfilter/Makefile
76454 @@ -83,6 +83,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76455 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76456 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
76457 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76458 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76459 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76460 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76461 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76462 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76463 index 29fa5ba..8debc79 100644
76464 --- a/net/netfilter/ipvs/ip_vs_conn.c
76465 +++ b/net/netfilter/ipvs/ip_vs_conn.c
76466 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
76467 /* Increase the refcnt counter of the dest */
76468 atomic_inc(&dest->refcnt);
76469
76470 - conn_flags = atomic_read(&dest->conn_flags);
76471 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
76472 if (cp->protocol != IPPROTO_UDP)
76473 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76474 /* Bind with the destination and its corresponding transmitter */
76475 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
76476 atomic_set(&cp->refcnt, 1);
76477
76478 atomic_set(&cp->n_control, 0);
76479 - atomic_set(&cp->in_pkts, 0);
76480 + atomic_set_unchecked(&cp->in_pkts, 0);
76481
76482 atomic_inc(&ipvs->conn_count);
76483 if (flags & IP_VS_CONN_F_NO_CPORT)
76484 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
76485
76486 /* Don't drop the entry if its number of incoming packets is not
76487 located in [0, 8] */
76488 - i = atomic_read(&cp->in_pkts);
76489 + i = atomic_read_unchecked(&cp->in_pkts);
76490 if (i > 8 || i < 0) return 0;
76491
76492 if (!todrop_rate[i]) return 0;
76493 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76494 index 2555816..31492d9 100644
76495 --- a/net/netfilter/ipvs/ip_vs_core.c
76496 +++ b/net/netfilter/ipvs/ip_vs_core.c
76497 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
76498 ret = cp->packet_xmit(skb, cp, pd->pp);
76499 /* do not touch skb anymore */
76500
76501 - atomic_inc(&cp->in_pkts);
76502 + atomic_inc_unchecked(&cp->in_pkts);
76503 ip_vs_conn_put(cp);
76504 return ret;
76505 }
76506 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
76507 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76508 pkts = sysctl_sync_threshold(ipvs);
76509 else
76510 - pkts = atomic_add_return(1, &cp->in_pkts);
76511 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76512
76513 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
76514 cp->protocol == IPPROTO_SCTP) {
76515 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76516 index b3afe18..08ec940 100644
76517 --- a/net/netfilter/ipvs/ip_vs_ctl.c
76518 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
76519 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
76520 ip_vs_rs_hash(ipvs, dest);
76521 write_unlock_bh(&ipvs->rs_lock);
76522 }
76523 - atomic_set(&dest->conn_flags, conn_flags);
76524 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
76525
76526 /* bind the service */
76527 if (!dest->svc) {
76528 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76529 " %-7s %-6d %-10d %-10d\n",
76530 &dest->addr.in6,
76531 ntohs(dest->port),
76532 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76533 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76534 atomic_read(&dest->weight),
76535 atomic_read(&dest->activeconns),
76536 atomic_read(&dest->inactconns));
76537 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76538 "%-7s %-6d %-10d %-10d\n",
76539 ntohl(dest->addr.ip),
76540 ntohs(dest->port),
76541 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76542 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76543 atomic_read(&dest->weight),
76544 atomic_read(&dest->activeconns),
76545 atomic_read(&dest->inactconns));
76546 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
76547
76548 entry.addr = dest->addr.ip;
76549 entry.port = dest->port;
76550 - entry.conn_flags = atomic_read(&dest->conn_flags);
76551 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76552 entry.weight = atomic_read(&dest->weight);
76553 entry.u_threshold = dest->u_threshold;
76554 entry.l_threshold = dest->l_threshold;
76555 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
76556 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76557
76558 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76559 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76560 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76561 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76562 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76563 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
76564 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76565 index 8a0d6d6..90ec197 100644
76566 --- a/net/netfilter/ipvs/ip_vs_sync.c
76567 +++ b/net/netfilter/ipvs/ip_vs_sync.c
76568 @@ -649,7 +649,7 @@ control:
76569 * i.e only increment in_pkts for Templates.
76570 */
76571 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76572 - int pkts = atomic_add_return(1, &cp->in_pkts);
76573 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76574
76575 if (pkts % sysctl_sync_period(ipvs) != 1)
76576 return;
76577 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
76578
76579 if (opt)
76580 memcpy(&cp->in_seq, opt, sizeof(*opt));
76581 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76582 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76583 cp->state = state;
76584 cp->old_state = cp->state;
76585 /*
76586 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76587 index 7fd66de..e6fb361 100644
76588 --- a/net/netfilter/ipvs/ip_vs_xmit.c
76589 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
76590 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
76591 else
76592 rc = NF_ACCEPT;
76593 /* do not touch skb anymore */
76594 - atomic_inc(&cp->in_pkts);
76595 + atomic_inc_unchecked(&cp->in_pkts);
76596 goto out;
76597 }
76598
76599 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
76600 else
76601 rc = NF_ACCEPT;
76602 /* do not touch skb anymore */
76603 - atomic_inc(&cp->in_pkts);
76604 + atomic_inc_unchecked(&cp->in_pkts);
76605 goto out;
76606 }
76607
76608 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76609 index 66b2c54..c7884e3 100644
76610 --- a/net/netfilter/nfnetlink_log.c
76611 +++ b/net/netfilter/nfnetlink_log.c
76612 @@ -70,7 +70,7 @@ struct nfulnl_instance {
76613 };
76614
76615 static DEFINE_SPINLOCK(instances_lock);
76616 -static atomic_t global_seq;
76617 +static atomic_unchecked_t global_seq;
76618
76619 #define INSTANCE_BUCKETS 16
76620 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76621 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
76622 /* global sequence number */
76623 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76624 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76625 - htonl(atomic_inc_return(&global_seq)));
76626 + htonl(atomic_inc_return_unchecked(&global_seq)));
76627
76628 if (data_len) {
76629 struct nlattr *nla;
76630 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76631 new file mode 100644
76632 index 0000000..6905327
76633 --- /dev/null
76634 +++ b/net/netfilter/xt_gradm.c
76635 @@ -0,0 +1,51 @@
76636 +/*
76637 + * gradm match for netfilter
76638 + * Copyright © Zbigniew Krzystolik, 2010
76639 + *
76640 + * This program is free software; you can redistribute it and/or modify
76641 + * it under the terms of the GNU General Public License; either version
76642 + * 2 or 3 as published by the Free Software Foundation.
76643 + */
76644 +#include <linux/module.h>
76645 +#include <linux/moduleparam.h>
76646 +#include <linux/skbuff.h>
76647 +#include <linux/netfilter/x_tables.h>
76648 +#include <linux/grsecurity.h>
76649 +#include <linux/netfilter/xt_gradm.h>
76650 +
76651 +static bool
76652 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
76653 +{
76654 + const struct xt_gradm_mtinfo *info = par->matchinfo;
76655 + bool retval = false;
76656 + if (gr_acl_is_enabled())
76657 + retval = true;
76658 + return retval ^ info->invflags;
76659 +}
76660 +
76661 +static struct xt_match gradm_mt_reg __read_mostly = {
76662 + .name = "gradm",
76663 + .revision = 0,
76664 + .family = NFPROTO_UNSPEC,
76665 + .match = gradm_mt,
76666 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
76667 + .me = THIS_MODULE,
76668 +};
76669 +
76670 +static int __init gradm_mt_init(void)
76671 +{
76672 + return xt_register_match(&gradm_mt_reg);
76673 +}
76674 +
76675 +static void __exit gradm_mt_exit(void)
76676 +{
76677 + xt_unregister_match(&gradm_mt_reg);
76678 +}
76679 +
76680 +module_init(gradm_mt_init);
76681 +module_exit(gradm_mt_exit);
76682 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
76683 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
76684 +MODULE_LICENSE("GPL");
76685 +MODULE_ALIAS("ipt_gradm");
76686 +MODULE_ALIAS("ip6t_gradm");
76687 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
76688 index 4fe4fb4..87a89e5 100644
76689 --- a/net/netfilter/xt_statistic.c
76690 +++ b/net/netfilter/xt_statistic.c
76691 @@ -19,7 +19,7 @@
76692 #include <linux/module.h>
76693
76694 struct xt_statistic_priv {
76695 - atomic_t count;
76696 + atomic_unchecked_t count;
76697 } ____cacheline_aligned_in_smp;
76698
76699 MODULE_LICENSE("GPL");
76700 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
76701 break;
76702 case XT_STATISTIC_MODE_NTH:
76703 do {
76704 - oval = atomic_read(&info->master->count);
76705 + oval = atomic_read_unchecked(&info->master->count);
76706 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
76707 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
76708 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
76709 if (nval == 0)
76710 ret = !ret;
76711 break;
76712 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
76713 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
76714 if (info->master == NULL)
76715 return -ENOMEM;
76716 - atomic_set(&info->master->count, info->u.nth.count);
76717 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
76718
76719 return 0;
76720 }
76721 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
76722 index 467af9c..8f415cc 100644
76723 --- a/net/netlink/af_netlink.c
76724 +++ b/net/netlink/af_netlink.c
76725 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
76726 sk->sk_error_report(sk);
76727 }
76728 }
76729 - atomic_inc(&sk->sk_drops);
76730 + atomic_inc_unchecked(&sk->sk_drops);
76731 }
76732
76733 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
76734 @@ -1997,7 +1997,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
76735 sk_wmem_alloc_get(s),
76736 nlk->cb,
76737 atomic_read(&s->sk_refcnt),
76738 - atomic_read(&s->sk_drops),
76739 + atomic_read_unchecked(&s->sk_drops),
76740 sock_i_ino(s)
76741 );
76742
76743 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
76744 index 7dab229..212156f 100644
76745 --- a/net/netrom/af_netrom.c
76746 +++ b/net/netrom/af_netrom.c
76747 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76748 struct sock *sk = sock->sk;
76749 struct nr_sock *nr = nr_sk(sk);
76750
76751 + memset(sax, 0, sizeof(*sax));
76752 lock_sock(sk);
76753 if (peer != 0) {
76754 if (sk->sk_state != TCP_ESTABLISHED) {
76755 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76756 *uaddr_len = sizeof(struct full_sockaddr_ax25);
76757 } else {
76758 sax->fsa_ax25.sax25_family = AF_NETROM;
76759 - sax->fsa_ax25.sax25_ndigis = 0;
76760 sax->fsa_ax25.sax25_call = nr->source_addr;
76761 *uaddr_len = sizeof(struct sockaddr_ax25);
76762 }
76763 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
76764 index 2c03050..5cf68c1 100644
76765 --- a/net/openvswitch/datapath.c
76766 +++ b/net/openvswitch/datapath.c
76767 @@ -322,7 +322,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
76768 return -ENOMEM;
76769
76770 nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
76771 - if (!skb)
76772 + if (!nskb)
76773 return -ENOMEM;
76774
76775 nskb->vlan_tci = 0;
76776 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
76777 index 2dbb32b..a1b4722 100644
76778 --- a/net/packet/af_packet.c
76779 +++ b/net/packet/af_packet.c
76780 @@ -1676,7 +1676,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76781
76782 spin_lock(&sk->sk_receive_queue.lock);
76783 po->stats.tp_packets++;
76784 - skb->dropcount = atomic_read(&sk->sk_drops);
76785 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76786 __skb_queue_tail(&sk->sk_receive_queue, skb);
76787 spin_unlock(&sk->sk_receive_queue.lock);
76788 sk->sk_data_ready(sk, skb->len);
76789 @@ -1685,7 +1685,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76790 drop_n_acct:
76791 spin_lock(&sk->sk_receive_queue.lock);
76792 po->stats.tp_drops++;
76793 - atomic_inc(&sk->sk_drops);
76794 + atomic_inc_unchecked(&sk->sk_drops);
76795 spin_unlock(&sk->sk_receive_queue.lock);
76796
76797 drop_n_restore:
76798 @@ -3271,7 +3271,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76799 case PACKET_HDRLEN:
76800 if (len > sizeof(int))
76801 len = sizeof(int);
76802 - if (copy_from_user(&val, optval, len))
76803 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
76804 return -EFAULT;
76805 switch (val) {
76806 case TPACKET_V1:
76807 @@ -3321,7 +3321,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76808
76809 if (put_user(len, optlen))
76810 return -EFAULT;
76811 - if (copy_to_user(optval, data, len))
76812 + if (len > sizeof(st) || copy_to_user(optval, data, len))
76813 return -EFAULT;
76814 return 0;
76815 }
76816 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
76817 index d65f699..05aa6ce 100644
76818 --- a/net/phonet/af_phonet.c
76819 +++ b/net/phonet/af_phonet.c
76820 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
76821 {
76822 struct phonet_protocol *pp;
76823
76824 - if (protocol >= PHONET_NPROTO)
76825 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76826 return NULL;
76827
76828 rcu_read_lock();
76829 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
76830 {
76831 int err = 0;
76832
76833 - if (protocol >= PHONET_NPROTO)
76834 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76835 return -EINVAL;
76836
76837 err = proto_register(pp->prot, 1);
76838 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
76839 index 9726fe6..fc4e3a4 100644
76840 --- a/net/phonet/pep.c
76841 +++ b/net/phonet/pep.c
76842 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76843
76844 case PNS_PEP_CTRL_REQ:
76845 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
76846 - atomic_inc(&sk->sk_drops);
76847 + atomic_inc_unchecked(&sk->sk_drops);
76848 break;
76849 }
76850 __skb_pull(skb, 4);
76851 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76852 }
76853
76854 if (pn->rx_credits == 0) {
76855 - atomic_inc(&sk->sk_drops);
76856 + atomic_inc_unchecked(&sk->sk_drops);
76857 err = -ENOBUFS;
76858 break;
76859 }
76860 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
76861 }
76862
76863 if (pn->rx_credits == 0) {
76864 - atomic_inc(&sk->sk_drops);
76865 + atomic_inc_unchecked(&sk->sk_drops);
76866 err = NET_RX_DROP;
76867 break;
76868 }
76869 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
76870 index 4c7eff3..59c727f 100644
76871 --- a/net/phonet/socket.c
76872 +++ b/net/phonet/socket.c
76873 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
76874 pn->resource, sk->sk_state,
76875 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
76876 sock_i_uid(sk), sock_i_ino(sk),
76877 - atomic_read(&sk->sk_refcnt), sk,
76878 - atomic_read(&sk->sk_drops), &len);
76879 + atomic_read(&sk->sk_refcnt),
76880 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76881 + NULL,
76882 +#else
76883 + sk,
76884 +#endif
76885 + atomic_read_unchecked(&sk->sk_drops), &len);
76886 }
76887 seq_printf(seq, "%*s\n", 127 - len, "");
76888 return 0;
76889 diff --git a/net/rds/cong.c b/net/rds/cong.c
76890 index e5b65ac..f3b6fb7 100644
76891 --- a/net/rds/cong.c
76892 +++ b/net/rds/cong.c
76893 @@ -78,7 +78,7 @@
76894 * finds that the saved generation number is smaller than the global generation
76895 * number, it wakes up the process.
76896 */
76897 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
76898 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
76899
76900 /*
76901 * Congestion monitoring
76902 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
76903 rdsdebug("waking map %p for %pI4\n",
76904 map, &map->m_addr);
76905 rds_stats_inc(s_cong_update_received);
76906 - atomic_inc(&rds_cong_generation);
76907 + atomic_inc_unchecked(&rds_cong_generation);
76908 if (waitqueue_active(&map->m_waitq))
76909 wake_up(&map->m_waitq);
76910 if (waitqueue_active(&rds_poll_waitq))
76911 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
76912
76913 int rds_cong_updated_since(unsigned long *recent)
76914 {
76915 - unsigned long gen = atomic_read(&rds_cong_generation);
76916 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
76917
76918 if (likely(*recent == gen))
76919 return 0;
76920 diff --git a/net/rds/ib.h b/net/rds/ib.h
76921 index edfaaaf..8c89879 100644
76922 --- a/net/rds/ib.h
76923 +++ b/net/rds/ib.h
76924 @@ -128,7 +128,7 @@ struct rds_ib_connection {
76925 /* sending acks */
76926 unsigned long i_ack_flags;
76927 #ifdef KERNEL_HAS_ATOMIC64
76928 - atomic64_t i_ack_next; /* next ACK to send */
76929 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76930 #else
76931 spinlock_t i_ack_lock; /* protect i_ack_next */
76932 u64 i_ack_next; /* next ACK to send */
76933 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
76934 index 51c8689..36c555f 100644
76935 --- a/net/rds/ib_cm.c
76936 +++ b/net/rds/ib_cm.c
76937 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
76938 /* Clear the ACK state */
76939 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76940 #ifdef KERNEL_HAS_ATOMIC64
76941 - atomic64_set(&ic->i_ack_next, 0);
76942 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76943 #else
76944 ic->i_ack_next = 0;
76945 #endif
76946 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
76947 index e29e0ca..fa3a6a3 100644
76948 --- a/net/rds/ib_recv.c
76949 +++ b/net/rds/ib_recv.c
76950 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76951 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
76952 int ack_required)
76953 {
76954 - atomic64_set(&ic->i_ack_next, seq);
76955 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76956 if (ack_required) {
76957 smp_mb__before_clear_bit();
76958 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76959 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76960 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76961 smp_mb__after_clear_bit();
76962
76963 - return atomic64_read(&ic->i_ack_next);
76964 + return atomic64_read_unchecked(&ic->i_ack_next);
76965 }
76966 #endif
76967
76968 diff --git a/net/rds/iw.h b/net/rds/iw.h
76969 index 04ce3b1..48119a6 100644
76970 --- a/net/rds/iw.h
76971 +++ b/net/rds/iw.h
76972 @@ -134,7 +134,7 @@ struct rds_iw_connection {
76973 /* sending acks */
76974 unsigned long i_ack_flags;
76975 #ifdef KERNEL_HAS_ATOMIC64
76976 - atomic64_t i_ack_next; /* next ACK to send */
76977 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76978 #else
76979 spinlock_t i_ack_lock; /* protect i_ack_next */
76980 u64 i_ack_next; /* next ACK to send */
76981 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
76982 index 9556d28..f046d0e 100644
76983 --- a/net/rds/iw_cm.c
76984 +++ b/net/rds/iw_cm.c
76985 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
76986 /* Clear the ACK state */
76987 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76988 #ifdef KERNEL_HAS_ATOMIC64
76989 - atomic64_set(&ic->i_ack_next, 0);
76990 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76991 #else
76992 ic->i_ack_next = 0;
76993 #endif
76994 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
76995 index 5e57347..3916042 100644
76996 --- a/net/rds/iw_recv.c
76997 +++ b/net/rds/iw_recv.c
76998 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76999 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
77000 int ack_required)
77001 {
77002 - atomic64_set(&ic->i_ack_next, seq);
77003 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77004 if (ack_required) {
77005 smp_mb__before_clear_bit();
77006 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77007 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77008 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77009 smp_mb__after_clear_bit();
77010
77011 - return atomic64_read(&ic->i_ack_next);
77012 + return atomic64_read_unchecked(&ic->i_ack_next);
77013 }
77014 #endif
77015
77016 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
77017 index edac9ef..16bcb98 100644
77018 --- a/net/rds/tcp.c
77019 +++ b/net/rds/tcp.c
77020 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
77021 int val = 1;
77022
77023 set_fs(KERNEL_DS);
77024 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
77025 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
77026 sizeof(val));
77027 set_fs(oldfs);
77028 }
77029 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
77030 index 1b4fd68..2234175 100644
77031 --- a/net/rds/tcp_send.c
77032 +++ b/net/rds/tcp_send.c
77033 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
77034
77035 oldfs = get_fs();
77036 set_fs(KERNEL_DS);
77037 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
77038 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
77039 sizeof(val));
77040 set_fs(oldfs);
77041 }
77042 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
77043 index 74c064c..fdec26f 100644
77044 --- a/net/rxrpc/af_rxrpc.c
77045 +++ b/net/rxrpc/af_rxrpc.c
77046 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
77047 __be32 rxrpc_epoch;
77048
77049 /* current debugging ID */
77050 -atomic_t rxrpc_debug_id;
77051 +atomic_unchecked_t rxrpc_debug_id;
77052
77053 /* count of skbs currently in use */
77054 atomic_t rxrpc_n_skbs;
77055 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
77056 index c3126e8..21facc7 100644
77057 --- a/net/rxrpc/ar-ack.c
77058 +++ b/net/rxrpc/ar-ack.c
77059 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77060
77061 _enter("{%d,%d,%d,%d},",
77062 call->acks_hard, call->acks_unacked,
77063 - atomic_read(&call->sequence),
77064 + atomic_read_unchecked(&call->sequence),
77065 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
77066
77067 stop = 0;
77068 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77069
77070 /* each Tx packet has a new serial number */
77071 sp->hdr.serial =
77072 - htonl(atomic_inc_return(&call->conn->serial));
77073 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
77074
77075 hdr = (struct rxrpc_header *) txb->head;
77076 hdr->serial = sp->hdr.serial;
77077 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
77078 */
77079 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
77080 {
77081 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
77082 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
77083 }
77084
77085 /*
77086 @@ -629,7 +629,7 @@ process_further:
77087
77088 latest = ntohl(sp->hdr.serial);
77089 hard = ntohl(ack.firstPacket);
77090 - tx = atomic_read(&call->sequence);
77091 + tx = atomic_read_unchecked(&call->sequence);
77092
77093 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77094 latest,
77095 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
77096 goto maybe_reschedule;
77097
77098 send_ACK_with_skew:
77099 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
77100 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
77101 ntohl(ack.serial));
77102 send_ACK:
77103 mtu = call->conn->trans->peer->if_mtu;
77104 @@ -1173,7 +1173,7 @@ send_ACK:
77105 ackinfo.rxMTU = htonl(5692);
77106 ackinfo.jumbo_max = htonl(4);
77107
77108 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77109 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77110 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77111 ntohl(hdr.serial),
77112 ntohs(ack.maxSkew),
77113 @@ -1191,7 +1191,7 @@ send_ACK:
77114 send_message:
77115 _debug("send message");
77116
77117 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77118 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77119 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
77120 send_message_2:
77121
77122 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
77123 index bf656c2..48f9d27 100644
77124 --- a/net/rxrpc/ar-call.c
77125 +++ b/net/rxrpc/ar-call.c
77126 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
77127 spin_lock_init(&call->lock);
77128 rwlock_init(&call->state_lock);
77129 atomic_set(&call->usage, 1);
77130 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
77131 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77132 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
77133
77134 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
77135 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
77136 index 4106ca9..a338d7a 100644
77137 --- a/net/rxrpc/ar-connection.c
77138 +++ b/net/rxrpc/ar-connection.c
77139 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
77140 rwlock_init(&conn->lock);
77141 spin_lock_init(&conn->state_lock);
77142 atomic_set(&conn->usage, 1);
77143 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77144 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77145 conn->avail_calls = RXRPC_MAXCALLS;
77146 conn->size_align = 4;
77147 conn->header_size = sizeof(struct rxrpc_header);
77148 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
77149 index e7ed43a..6afa140 100644
77150 --- a/net/rxrpc/ar-connevent.c
77151 +++ b/net/rxrpc/ar-connevent.c
77152 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
77153
77154 len = iov[0].iov_len + iov[1].iov_len;
77155
77156 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77157 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77158 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
77159
77160 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77161 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
77162 index 1a2b0633..e8d1382 100644
77163 --- a/net/rxrpc/ar-input.c
77164 +++ b/net/rxrpc/ar-input.c
77165 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
77166 /* track the latest serial number on this connection for ACK packet
77167 * information */
77168 serial = ntohl(sp->hdr.serial);
77169 - hi_serial = atomic_read(&call->conn->hi_serial);
77170 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
77171 while (serial > hi_serial)
77172 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
77173 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
77174 serial);
77175
77176 /* request ACK generation for any ACK or DATA packet that requests
77177 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
77178 index 8e22bd3..f66d1c0 100644
77179 --- a/net/rxrpc/ar-internal.h
77180 +++ b/net/rxrpc/ar-internal.h
77181 @@ -272,8 +272,8 @@ struct rxrpc_connection {
77182 int error; /* error code for local abort */
77183 int debug_id; /* debug ID for printks */
77184 unsigned call_counter; /* call ID counter */
77185 - atomic_t serial; /* packet serial number counter */
77186 - atomic_t hi_serial; /* highest serial number received */
77187 + atomic_unchecked_t serial; /* packet serial number counter */
77188 + atomic_unchecked_t hi_serial; /* highest serial number received */
77189 u8 avail_calls; /* number of calls available */
77190 u8 size_align; /* data size alignment (for security) */
77191 u8 header_size; /* rxrpc + security header size */
77192 @@ -346,7 +346,7 @@ struct rxrpc_call {
77193 spinlock_t lock;
77194 rwlock_t state_lock; /* lock for state transition */
77195 atomic_t usage;
77196 - atomic_t sequence; /* Tx data packet sequence counter */
77197 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
77198 u32 abort_code; /* local/remote abort code */
77199 enum { /* current state of call */
77200 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
77201 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
77202 */
77203 extern atomic_t rxrpc_n_skbs;
77204 extern __be32 rxrpc_epoch;
77205 -extern atomic_t rxrpc_debug_id;
77206 +extern atomic_unchecked_t rxrpc_debug_id;
77207 extern struct workqueue_struct *rxrpc_workqueue;
77208
77209 /*
77210 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
77211 index 87f7135..74d3703 100644
77212 --- a/net/rxrpc/ar-local.c
77213 +++ b/net/rxrpc/ar-local.c
77214 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
77215 spin_lock_init(&local->lock);
77216 rwlock_init(&local->services_lock);
77217 atomic_set(&local->usage, 1);
77218 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
77219 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77220 memcpy(&local->srx, srx, sizeof(*srx));
77221 }
77222
77223 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
77224 index 16ae887..d24f12b 100644
77225 --- a/net/rxrpc/ar-output.c
77226 +++ b/net/rxrpc/ar-output.c
77227 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
77228 sp->hdr.cid = call->cid;
77229 sp->hdr.callNumber = call->call_id;
77230 sp->hdr.seq =
77231 - htonl(atomic_inc_return(&call->sequence));
77232 + htonl(atomic_inc_return_unchecked(&call->sequence));
77233 sp->hdr.serial =
77234 - htonl(atomic_inc_return(&conn->serial));
77235 + htonl(atomic_inc_return_unchecked(&conn->serial));
77236 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
77237 sp->hdr.userStatus = 0;
77238 sp->hdr.securityIndex = conn->security_ix;
77239 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
77240 index 2754f09..b20e38f 100644
77241 --- a/net/rxrpc/ar-peer.c
77242 +++ b/net/rxrpc/ar-peer.c
77243 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
77244 INIT_LIST_HEAD(&peer->error_targets);
77245 spin_lock_init(&peer->lock);
77246 atomic_set(&peer->usage, 1);
77247 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
77248 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77249 memcpy(&peer->srx, srx, sizeof(*srx));
77250
77251 rxrpc_assess_MTU_size(peer);
77252 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
77253 index 38047f7..9f48511 100644
77254 --- a/net/rxrpc/ar-proc.c
77255 +++ b/net/rxrpc/ar-proc.c
77256 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
77257 atomic_read(&conn->usage),
77258 rxrpc_conn_states[conn->state],
77259 key_serial(conn->key),
77260 - atomic_read(&conn->serial),
77261 - atomic_read(&conn->hi_serial));
77262 + atomic_read_unchecked(&conn->serial),
77263 + atomic_read_unchecked(&conn->hi_serial));
77264
77265 return 0;
77266 }
77267 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
77268 index 92df566..87ec1bf 100644
77269 --- a/net/rxrpc/ar-transport.c
77270 +++ b/net/rxrpc/ar-transport.c
77271 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
77272 spin_lock_init(&trans->client_lock);
77273 rwlock_init(&trans->conn_lock);
77274 atomic_set(&trans->usage, 1);
77275 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
77276 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77277
77278 if (peer->srx.transport.family == AF_INET) {
77279 switch (peer->srx.transport_type) {
77280 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
77281 index 7635107..4670276 100644
77282 --- a/net/rxrpc/rxkad.c
77283 +++ b/net/rxrpc/rxkad.c
77284 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
77285
77286 len = iov[0].iov_len + iov[1].iov_len;
77287
77288 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77289 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77290 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
77291
77292 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77293 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
77294
77295 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
77296
77297 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
77298 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77299 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
77300
77301 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
77302 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
77303 index 1e2eee8..ce3967e 100644
77304 --- a/net/sctp/proc.c
77305 +++ b/net/sctp/proc.c
77306 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
77307 seq_printf(seq,
77308 "%8pK %8pK %-3d %-3d %-2d %-4d "
77309 "%4d %8d %8d %7d %5lu %-5d %5d ",
77310 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
77311 + assoc, sk,
77312 + sctp_sk(sk)->type, sk->sk_state,
77313 assoc->state, hash,
77314 assoc->assoc_id,
77315 assoc->sndbuf_used,
77316 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
77317 index d043722..6903416 100644
77318 --- a/net/sctp/socket.c
77319 +++ b/net/sctp/socket.c
77320 @@ -4575,7 +4575,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
77321 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
77322 if (space_left < addrlen)
77323 return -ENOMEM;
77324 - if (copy_to_user(to, &temp, addrlen))
77325 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
77326 return -EFAULT;
77327 to += addrlen;
77328 cnt++;
77329 diff --git a/net/socket.c b/net/socket.c
77330 index 0de4131..7e7ddab 100644
77331 --- a/net/socket.c
77332 +++ b/net/socket.c
77333 @@ -88,6 +88,7 @@
77334 #include <linux/nsproxy.h>
77335 #include <linux/magic.h>
77336 #include <linux/slab.h>
77337 +#include <linux/in.h>
77338
77339 #include <asm/uaccess.h>
77340 #include <asm/unistd.h>
77341 @@ -105,6 +106,8 @@
77342 #include <linux/sockios.h>
77343 #include <linux/atalk.h>
77344
77345 +#include <linux/grsock.h>
77346 +
77347 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
77348 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
77349 unsigned long nr_segs, loff_t pos);
77350 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
77351 &sockfs_dentry_operations, SOCKFS_MAGIC);
77352 }
77353
77354 -static struct vfsmount *sock_mnt __read_mostly;
77355 +struct vfsmount *sock_mnt __read_mostly;
77356
77357 static struct file_system_type sock_fs_type = {
77358 .name = "sockfs",
77359 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
77360 return -EAFNOSUPPORT;
77361 if (type < 0 || type >= SOCK_MAX)
77362 return -EINVAL;
77363 + if (protocol < 0)
77364 + return -EINVAL;
77365
77366 /* Compatibility.
77367
77368 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
77369 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
77370 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
77371
77372 + if(!gr_search_socket(family, type, protocol)) {
77373 + retval = -EACCES;
77374 + goto out;
77375 + }
77376 +
77377 + if (gr_handle_sock_all(family, type, protocol)) {
77378 + retval = -EACCES;
77379 + goto out;
77380 + }
77381 +
77382 retval = sock_create(family, type, protocol, &sock);
77383 if (retval < 0)
77384 goto out;
77385 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77386 if (sock) {
77387 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
77388 if (err >= 0) {
77389 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
77390 + err = -EACCES;
77391 + goto error;
77392 + }
77393 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
77394 + if (err)
77395 + goto error;
77396 +
77397 err = security_socket_bind(sock,
77398 (struct sockaddr *)&address,
77399 addrlen);
77400 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77401 (struct sockaddr *)
77402 &address, addrlen);
77403 }
77404 +error:
77405 fput_light(sock->file, fput_needed);
77406 }
77407 return err;
77408 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
77409 if ((unsigned)backlog > somaxconn)
77410 backlog = somaxconn;
77411
77412 + if (gr_handle_sock_server_other(sock->sk)) {
77413 + err = -EPERM;
77414 + goto error;
77415 + }
77416 +
77417 + err = gr_search_listen(sock);
77418 + if (err)
77419 + goto error;
77420 +
77421 err = security_socket_listen(sock, backlog);
77422 if (!err)
77423 err = sock->ops->listen(sock, backlog);
77424
77425 +error:
77426 fput_light(sock->file, fput_needed);
77427 }
77428 return err;
77429 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77430 newsock->type = sock->type;
77431 newsock->ops = sock->ops;
77432
77433 + if (gr_handle_sock_server_other(sock->sk)) {
77434 + err = -EPERM;
77435 + sock_release(newsock);
77436 + goto out_put;
77437 + }
77438 +
77439 + err = gr_search_accept(sock);
77440 + if (err) {
77441 + sock_release(newsock);
77442 + goto out_put;
77443 + }
77444 +
77445 /*
77446 * We don't need try_module_get here, as the listening socket (sock)
77447 * has the protocol module (sock->ops->owner) held.
77448 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77449 fd_install(newfd, newfile);
77450 err = newfd;
77451
77452 + gr_attach_curr_ip(newsock->sk);
77453 +
77454 out_put:
77455 fput_light(sock->file, fput_needed);
77456 out:
77457 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77458 int, addrlen)
77459 {
77460 struct socket *sock;
77461 + struct sockaddr *sck;
77462 struct sockaddr_storage address;
77463 int err, fput_needed;
77464
77465 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77466 if (err < 0)
77467 goto out_put;
77468
77469 + sck = (struct sockaddr *)&address;
77470 +
77471 + if (gr_handle_sock_client(sck)) {
77472 + err = -EACCES;
77473 + goto out_put;
77474 + }
77475 +
77476 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
77477 + if (err)
77478 + goto out_put;
77479 +
77480 err =
77481 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
77482 if (err)
77483 @@ -1970,7 +2030,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
77484 * checking falls down on this.
77485 */
77486 if (copy_from_user(ctl_buf,
77487 - (void __user __force *)msg_sys->msg_control,
77488 + (void __force_user *)msg_sys->msg_control,
77489 ctl_len))
77490 goto out_freectl;
77491 msg_sys->msg_control = ctl_buf;
77492 @@ -2140,7 +2200,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
77493 * kernel msghdr to use the kernel address space)
77494 */
77495
77496 - uaddr = (__force void __user *)msg_sys->msg_name;
77497 + uaddr = (void __force_user *)msg_sys->msg_name;
77498 uaddr_len = COMPAT_NAMELEN(msg);
77499 if (MSG_CMSG_COMPAT & flags) {
77500 err = verify_compat_iovec(msg_sys, iov,
77501 @@ -2768,7 +2828,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77502 }
77503
77504 ifr = compat_alloc_user_space(buf_size);
77505 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
77506 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
77507
77508 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
77509 return -EFAULT;
77510 @@ -2792,12 +2852,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77511 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
77512
77513 if (copy_in_user(rxnfc, compat_rxnfc,
77514 - (void *)(&rxnfc->fs.m_ext + 1) -
77515 - (void *)rxnfc) ||
77516 + (void __user *)(&rxnfc->fs.m_ext + 1) -
77517 + (void __user *)rxnfc) ||
77518 copy_in_user(&rxnfc->fs.ring_cookie,
77519 &compat_rxnfc->fs.ring_cookie,
77520 - (void *)(&rxnfc->fs.location + 1) -
77521 - (void *)&rxnfc->fs.ring_cookie) ||
77522 + (void __user *)(&rxnfc->fs.location + 1) -
77523 + (void __user *)&rxnfc->fs.ring_cookie) ||
77524 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
77525 sizeof(rxnfc->rule_cnt)))
77526 return -EFAULT;
77527 @@ -2809,12 +2869,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77528
77529 if (convert_out) {
77530 if (copy_in_user(compat_rxnfc, rxnfc,
77531 - (const void *)(&rxnfc->fs.m_ext + 1) -
77532 - (const void *)rxnfc) ||
77533 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
77534 + (const void __user *)rxnfc) ||
77535 copy_in_user(&compat_rxnfc->fs.ring_cookie,
77536 &rxnfc->fs.ring_cookie,
77537 - (const void *)(&rxnfc->fs.location + 1) -
77538 - (const void *)&rxnfc->fs.ring_cookie) ||
77539 + (const void __user *)(&rxnfc->fs.location + 1) -
77540 + (const void __user *)&rxnfc->fs.ring_cookie) ||
77541 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
77542 sizeof(rxnfc->rule_cnt)))
77543 return -EFAULT;
77544 @@ -2884,7 +2944,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
77545 old_fs = get_fs();
77546 set_fs(KERNEL_DS);
77547 err = dev_ioctl(net, cmd,
77548 - (struct ifreq __user __force *) &kifr);
77549 + (struct ifreq __force_user *) &kifr);
77550 set_fs(old_fs);
77551
77552 return err;
77553 @@ -2993,7 +3053,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
77554
77555 old_fs = get_fs();
77556 set_fs(KERNEL_DS);
77557 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
77558 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
77559 set_fs(old_fs);
77560
77561 if (cmd == SIOCGIFMAP && !err) {
77562 @@ -3098,7 +3158,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
77563 ret |= __get_user(rtdev, &(ur4->rt_dev));
77564 if (rtdev) {
77565 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
77566 - r4.rt_dev = (char __user __force *)devname;
77567 + r4.rt_dev = (char __force_user *)devname;
77568 devname[15] = 0;
77569 } else
77570 r4.rt_dev = NULL;
77571 @@ -3324,8 +3384,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
77572 int __user *uoptlen;
77573 int err;
77574
77575 - uoptval = (char __user __force *) optval;
77576 - uoptlen = (int __user __force *) optlen;
77577 + uoptval = (char __force_user *) optval;
77578 + uoptlen = (int __force_user *) optlen;
77579
77580 set_fs(KERNEL_DS);
77581 if (level == SOL_SOCKET)
77582 @@ -3345,7 +3405,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
77583 char __user *uoptval;
77584 int err;
77585
77586 - uoptval = (char __user __force *) optval;
77587 + uoptval = (char __force_user *) optval;
77588
77589 set_fs(KERNEL_DS);
77590 if (level == SOL_SOCKET)
77591 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
77592 index 8efd96c..b492ab2 100644
77593 --- a/net/sunrpc/sched.c
77594 +++ b/net/sunrpc/sched.c
77595 @@ -239,9 +239,9 @@ static int rpc_wait_bit_killable(void *word)
77596 #ifdef RPC_DEBUG
77597 static void rpc_task_set_debuginfo(struct rpc_task *task)
77598 {
77599 - static atomic_t rpc_pid;
77600 + static atomic_unchecked_t rpc_pid;
77601
77602 - task->tk_pid = atomic_inc_return(&rpc_pid);
77603 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
77604 }
77605 #else
77606 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
77607 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
77608 index 4645709..d41d668 100644
77609 --- a/net/sunrpc/svcsock.c
77610 +++ b/net/sunrpc/svcsock.c
77611 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
77612 int buflen, unsigned int base)
77613 {
77614 size_t save_iovlen;
77615 - void __user *save_iovbase;
77616 + void *save_iovbase;
77617 unsigned int i;
77618 int ret;
77619
77620 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
77621 index 09af4fa..77110a9 100644
77622 --- a/net/sunrpc/xprtrdma/svc_rdma.c
77623 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
77624 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
77625 static unsigned int min_max_inline = 4096;
77626 static unsigned int max_max_inline = 65536;
77627
77628 -atomic_t rdma_stat_recv;
77629 -atomic_t rdma_stat_read;
77630 -atomic_t rdma_stat_write;
77631 -atomic_t rdma_stat_sq_starve;
77632 -atomic_t rdma_stat_rq_starve;
77633 -atomic_t rdma_stat_rq_poll;
77634 -atomic_t rdma_stat_rq_prod;
77635 -atomic_t rdma_stat_sq_poll;
77636 -atomic_t rdma_stat_sq_prod;
77637 +atomic_unchecked_t rdma_stat_recv;
77638 +atomic_unchecked_t rdma_stat_read;
77639 +atomic_unchecked_t rdma_stat_write;
77640 +atomic_unchecked_t rdma_stat_sq_starve;
77641 +atomic_unchecked_t rdma_stat_rq_starve;
77642 +atomic_unchecked_t rdma_stat_rq_poll;
77643 +atomic_unchecked_t rdma_stat_rq_prod;
77644 +atomic_unchecked_t rdma_stat_sq_poll;
77645 +atomic_unchecked_t rdma_stat_sq_prod;
77646
77647 /* Temporary NFS request map and context caches */
77648 struct kmem_cache *svc_rdma_map_cachep;
77649 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
77650 len -= *ppos;
77651 if (len > *lenp)
77652 len = *lenp;
77653 - if (len && copy_to_user(buffer, str_buf, len))
77654 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
77655 return -EFAULT;
77656 *lenp = len;
77657 *ppos += len;
77658 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
77659 {
77660 .procname = "rdma_stat_read",
77661 .data = &rdma_stat_read,
77662 - .maxlen = sizeof(atomic_t),
77663 + .maxlen = sizeof(atomic_unchecked_t),
77664 .mode = 0644,
77665 .proc_handler = read_reset_stat,
77666 },
77667 {
77668 .procname = "rdma_stat_recv",
77669 .data = &rdma_stat_recv,
77670 - .maxlen = sizeof(atomic_t),
77671 + .maxlen = sizeof(atomic_unchecked_t),
77672 .mode = 0644,
77673 .proc_handler = read_reset_stat,
77674 },
77675 {
77676 .procname = "rdma_stat_write",
77677 .data = &rdma_stat_write,
77678 - .maxlen = sizeof(atomic_t),
77679 + .maxlen = sizeof(atomic_unchecked_t),
77680 .mode = 0644,
77681 .proc_handler = read_reset_stat,
77682 },
77683 {
77684 .procname = "rdma_stat_sq_starve",
77685 .data = &rdma_stat_sq_starve,
77686 - .maxlen = sizeof(atomic_t),
77687 + .maxlen = sizeof(atomic_unchecked_t),
77688 .mode = 0644,
77689 .proc_handler = read_reset_stat,
77690 },
77691 {
77692 .procname = "rdma_stat_rq_starve",
77693 .data = &rdma_stat_rq_starve,
77694 - .maxlen = sizeof(atomic_t),
77695 + .maxlen = sizeof(atomic_unchecked_t),
77696 .mode = 0644,
77697 .proc_handler = read_reset_stat,
77698 },
77699 {
77700 .procname = "rdma_stat_rq_poll",
77701 .data = &rdma_stat_rq_poll,
77702 - .maxlen = sizeof(atomic_t),
77703 + .maxlen = sizeof(atomic_unchecked_t),
77704 .mode = 0644,
77705 .proc_handler = read_reset_stat,
77706 },
77707 {
77708 .procname = "rdma_stat_rq_prod",
77709 .data = &rdma_stat_rq_prod,
77710 - .maxlen = sizeof(atomic_t),
77711 + .maxlen = sizeof(atomic_unchecked_t),
77712 .mode = 0644,
77713 .proc_handler = read_reset_stat,
77714 },
77715 {
77716 .procname = "rdma_stat_sq_poll",
77717 .data = &rdma_stat_sq_poll,
77718 - .maxlen = sizeof(atomic_t),
77719 + .maxlen = sizeof(atomic_unchecked_t),
77720 .mode = 0644,
77721 .proc_handler = read_reset_stat,
77722 },
77723 {
77724 .procname = "rdma_stat_sq_prod",
77725 .data = &rdma_stat_sq_prod,
77726 - .maxlen = sizeof(atomic_t),
77727 + .maxlen = sizeof(atomic_unchecked_t),
77728 .mode = 0644,
77729 .proc_handler = read_reset_stat,
77730 },
77731 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77732 index df67211..c354b13 100644
77733 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77734 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77735 @@ -499,7 +499,7 @@ next_sge:
77736 svc_rdma_put_context(ctxt, 0);
77737 goto out;
77738 }
77739 - atomic_inc(&rdma_stat_read);
77740 + atomic_inc_unchecked(&rdma_stat_read);
77741
77742 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
77743 chl_map->ch[ch_no].count -= read_wr.num_sge;
77744 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77745 dto_q);
77746 list_del_init(&ctxt->dto_q);
77747 } else {
77748 - atomic_inc(&rdma_stat_rq_starve);
77749 + atomic_inc_unchecked(&rdma_stat_rq_starve);
77750 clear_bit(XPT_DATA, &xprt->xpt_flags);
77751 ctxt = NULL;
77752 }
77753 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77754 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
77755 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
77756 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
77757 - atomic_inc(&rdma_stat_recv);
77758 + atomic_inc_unchecked(&rdma_stat_recv);
77759
77760 /* Build up the XDR from the receive buffers. */
77761 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
77762 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77763 index 249a835..fb2794b 100644
77764 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77765 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77766 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
77767 write_wr.wr.rdma.remote_addr = to;
77768
77769 /* Post It */
77770 - atomic_inc(&rdma_stat_write);
77771 + atomic_inc_unchecked(&rdma_stat_write);
77772 if (svc_rdma_send(xprt, &write_wr))
77773 goto err;
77774 return 0;
77775 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77776 index 894cb42..cf5bafb 100644
77777 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
77778 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77779 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77780 return;
77781
77782 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
77783 - atomic_inc(&rdma_stat_rq_poll);
77784 + atomic_inc_unchecked(&rdma_stat_rq_poll);
77785
77786 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
77787 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
77788 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77789 }
77790
77791 if (ctxt)
77792 - atomic_inc(&rdma_stat_rq_prod);
77793 + atomic_inc_unchecked(&rdma_stat_rq_prod);
77794
77795 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
77796 /*
77797 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77798 return;
77799
77800 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
77801 - atomic_inc(&rdma_stat_sq_poll);
77802 + atomic_inc_unchecked(&rdma_stat_sq_poll);
77803 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
77804 if (wc.status != IB_WC_SUCCESS)
77805 /* Close the transport */
77806 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77807 }
77808
77809 if (ctxt)
77810 - atomic_inc(&rdma_stat_sq_prod);
77811 + atomic_inc_unchecked(&rdma_stat_sq_prod);
77812 }
77813
77814 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
77815 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
77816 spin_lock_bh(&xprt->sc_lock);
77817 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
77818 spin_unlock_bh(&xprt->sc_lock);
77819 - atomic_inc(&rdma_stat_sq_starve);
77820 + atomic_inc_unchecked(&rdma_stat_sq_starve);
77821
77822 /* See if we can opportunistically reap SQ WR to make room */
77823 sq_cq_reap(xprt);
77824 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
77825 index e758139..d29ea47 100644
77826 --- a/net/sysctl_net.c
77827 +++ b/net/sysctl_net.c
77828 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
77829 struct ctl_table *table)
77830 {
77831 /* Allow network administrator to have same access as root. */
77832 - if (capable(CAP_NET_ADMIN)) {
77833 + if (capable_nolog(CAP_NET_ADMIN)) {
77834 int mode = (table->mode >> 6) & 7;
77835 return (mode << 6) | (mode << 3) | mode;
77836 }
77837 diff --git a/net/tipc/link.c b/net/tipc/link.c
77838 index ac1832a..533ed97 100644
77839 --- a/net/tipc/link.c
77840 +++ b/net/tipc/link.c
77841 @@ -1205,7 +1205,7 @@ static int link_send_sections_long(struct tipc_port *sender,
77842 struct tipc_msg fragm_hdr;
77843 struct sk_buff *buf, *buf_chain, *prev;
77844 u32 fragm_crs, fragm_rest, hsz, sect_rest;
77845 - const unchar *sect_crs;
77846 + const unchar __user *sect_crs;
77847 int curr_sect;
77848 u32 fragm_no;
77849
77850 @@ -1249,7 +1249,7 @@ again:
77851
77852 if (!sect_rest) {
77853 sect_rest = msg_sect[++curr_sect].iov_len;
77854 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
77855 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
77856 }
77857
77858 if (sect_rest < fragm_rest)
77859 @@ -1268,7 +1268,7 @@ error:
77860 }
77861 } else
77862 skb_copy_to_linear_data_offset(buf, fragm_crs,
77863 - sect_crs, sz);
77864 + (const void __force_kernel *)sect_crs, sz);
77865 sect_crs += sz;
77866 sect_rest -= sz;
77867 fragm_crs += sz;
77868 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
77869 index 3e4d3e2..27b55dc 100644
77870 --- a/net/tipc/msg.c
77871 +++ b/net/tipc/msg.c
77872 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
77873 msg_sect[cnt].iov_len);
77874 else
77875 skb_copy_to_linear_data_offset(*buf, pos,
77876 - msg_sect[cnt].iov_base,
77877 + (const void __force_kernel *)msg_sect[cnt].iov_base,
77878 msg_sect[cnt].iov_len);
77879 pos += msg_sect[cnt].iov_len;
77880 }
77881 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
77882 index 8c49566..14510cb 100644
77883 --- a/net/tipc/subscr.c
77884 +++ b/net/tipc/subscr.c
77885 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
77886 {
77887 struct iovec msg_sect;
77888
77889 - msg_sect.iov_base = (void *)&sub->evt;
77890 + msg_sect.iov_base = (void __force_user *)&sub->evt;
77891 msg_sect.iov_len = sizeof(struct tipc_event);
77892
77893 sub->evt.event = htohl(event, sub->swap);
77894 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
77895 index 85d3bb7..79f4487 100644
77896 --- a/net/unix/af_unix.c
77897 +++ b/net/unix/af_unix.c
77898 @@ -770,6 +770,12 @@ static struct sock *unix_find_other(struct net *net,
77899 err = -ECONNREFUSED;
77900 if (!S_ISSOCK(inode->i_mode))
77901 goto put_fail;
77902 +
77903 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
77904 + err = -EACCES;
77905 + goto put_fail;
77906 + }
77907 +
77908 u = unix_find_socket_byinode(inode);
77909 if (!u)
77910 goto put_fail;
77911 @@ -790,6 +796,13 @@ static struct sock *unix_find_other(struct net *net,
77912 if (u) {
77913 struct dentry *dentry;
77914 dentry = unix_sk(u)->dentry;
77915 +
77916 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
77917 + err = -EPERM;
77918 + sock_put(u);
77919 + goto fail;
77920 + }
77921 +
77922 if (dentry)
77923 touch_atime(unix_sk(u)->mnt, dentry);
77924 } else
77925 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
77926 err = security_path_mknod(&path, dentry, mode, 0);
77927 if (err)
77928 goto out_mknod_drop_write;
77929 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
77930 + err = -EACCES;
77931 + goto out_mknod_drop_write;
77932 + }
77933 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
77934 out_mknod_drop_write:
77935 mnt_drop_write(path.mnt);
77936 if (err)
77937 goto out_mknod_dput;
77938 +
77939 + gr_handle_create(dentry, path.mnt);
77940 +
77941 mutex_unlock(&path.dentry->d_inode->i_mutex);
77942 dput(path.dentry);
77943 path.dentry = dentry;
77944 diff --git a/net/wireless/core.h b/net/wireless/core.h
77945 index 43ad9c8..ab5127c 100644
77946 --- a/net/wireless/core.h
77947 +++ b/net/wireless/core.h
77948 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
77949 struct mutex mtx;
77950
77951 /* rfkill support */
77952 - struct rfkill_ops rfkill_ops;
77953 + rfkill_ops_no_const rfkill_ops;
77954 struct rfkill *rfkill;
77955 struct work_struct rfkill_sync;
77956
77957 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
77958 index 0af7f54..c916d2f 100644
77959 --- a/net/wireless/wext-core.c
77960 +++ b/net/wireless/wext-core.c
77961 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77962 */
77963
77964 /* Support for very large requests */
77965 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
77966 - (user_length > descr->max_tokens)) {
77967 + if (user_length > descr->max_tokens) {
77968 /* Allow userspace to GET more than max so
77969 * we can support any size GET requests.
77970 * There is still a limit : -ENOMEM.
77971 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77972 }
77973 }
77974
77975 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
77976 - /*
77977 - * If this is a GET, but not NOMAX, it means that the extra
77978 - * data is not bounded by userspace, but by max_tokens. Thus
77979 - * set the length to max_tokens. This matches the extra data
77980 - * allocation.
77981 - * The driver should fill it with the number of tokens it
77982 - * provided, and it may check iwp->length rather than having
77983 - * knowledge of max_tokens. If the driver doesn't change the
77984 - * iwp->length, this ioctl just copies back max_token tokens
77985 - * filled with zeroes. Hopefully the driver isn't claiming
77986 - * them to be valid data.
77987 - */
77988 - iwp->length = descr->max_tokens;
77989 - }
77990 -
77991 err = handler(dev, info, (union iwreq_data *) iwp, extra);
77992
77993 iwp->length += essid_compat;
77994 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
77995 index 7661576..80f7627 100644
77996 --- a/net/xfrm/xfrm_policy.c
77997 +++ b/net/xfrm/xfrm_policy.c
77998 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
77999 {
78000 policy->walk.dead = 1;
78001
78002 - atomic_inc(&policy->genid);
78003 + atomic_inc_unchecked(&policy->genid);
78004
78005 if (del_timer(&policy->timer))
78006 xfrm_pol_put(policy);
78007 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
78008 hlist_add_head(&policy->bydst, chain);
78009 xfrm_pol_hold(policy);
78010 net->xfrm.policy_count[dir]++;
78011 - atomic_inc(&flow_cache_genid);
78012 + atomic_inc_unchecked(&flow_cache_genid);
78013 if (delpol)
78014 __xfrm_policy_unlink(delpol, dir);
78015 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
78016 @@ -1530,7 +1530,7 @@ free_dst:
78017 goto out;
78018 }
78019
78020 -static int inline
78021 +static inline int
78022 xfrm_dst_alloc_copy(void **target, const void *src, int size)
78023 {
78024 if (!*target) {
78025 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
78026 return 0;
78027 }
78028
78029 -static int inline
78030 +static inline int
78031 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78032 {
78033 #ifdef CONFIG_XFRM_SUB_POLICY
78034 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78035 #endif
78036 }
78037
78038 -static int inline
78039 +static inline int
78040 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
78041 {
78042 #ifdef CONFIG_XFRM_SUB_POLICY
78043 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
78044
78045 xdst->num_pols = num_pols;
78046 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
78047 - xdst->policy_genid = atomic_read(&pols[0]->genid);
78048 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
78049
78050 return xdst;
78051 }
78052 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
78053 if (xdst->xfrm_genid != dst->xfrm->genid)
78054 return 0;
78055 if (xdst->num_pols > 0 &&
78056 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
78057 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
78058 return 0;
78059
78060 mtu = dst_mtu(dst->child);
78061 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
78062 sizeof(pol->xfrm_vec[i].saddr));
78063 pol->xfrm_vec[i].encap_family = mp->new_family;
78064 /* flush bundles */
78065 - atomic_inc(&pol->genid);
78066 + atomic_inc_unchecked(&pol->genid);
78067 }
78068 }
78069
78070 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
78071 index d2b366c..51ff91ebc 100644
78072 --- a/scripts/Makefile.build
78073 +++ b/scripts/Makefile.build
78074 @@ -109,7 +109,7 @@ endif
78075 endif
78076
78077 # Do not include host rules unless needed
78078 -ifneq ($(hostprogs-y)$(hostprogs-m),)
78079 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
78080 include scripts/Makefile.host
78081 endif
78082
78083 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
78084 index 686cb0d..9d653bf 100644
78085 --- a/scripts/Makefile.clean
78086 +++ b/scripts/Makefile.clean
78087 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
78088 __clean-files := $(extra-y) $(always) \
78089 $(targets) $(clean-files) \
78090 $(host-progs) \
78091 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
78092 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
78093 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
78094
78095 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
78096
78097 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
78098 index 1ac414f..a1c1451 100644
78099 --- a/scripts/Makefile.host
78100 +++ b/scripts/Makefile.host
78101 @@ -31,6 +31,7 @@
78102 # Note: Shared libraries consisting of C++ files are not supported
78103
78104 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
78105 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
78106
78107 # C code
78108 # Executables compiled from a single .c file
78109 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
78110 # Shared libaries (only .c supported)
78111 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
78112 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
78113 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
78114 # Remove .so files from "xxx-objs"
78115 host-cobjs := $(filter-out %.so,$(host-cobjs))
78116
78117 diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
78118 index 00c368c..bb3f3e9 100644
78119 --- a/scripts/Makefile.lib
78120 +++ b/scripts/Makefile.lib
78121 @@ -144,14 +144,14 @@ __a_flags = $(call flags,_a_flags)
78122 __cpp_flags = $(call flags,_cpp_flags)
78123 endif
78124
78125 -c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
78126 +c_flags = -Wp,-MD,$(depfile) $(LINUXINCLUDE) $(NOSTDINC_FLAGS) \
78127 $(__c_flags) $(modkern_cflags) \
78128 -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
78129
78130 -a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
78131 +a_flags = -Wp,-MD,$(depfile) $(LINUXINCLUDE) $(NOSTDINC_FLAGS) \
78132 $(__a_flags) $(modkern_aflags)
78133
78134 -cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
78135 +cpp_flags = -Wp,-MD,$(depfile) $(LINUXINCLUDE) $(NOSTDINC_FLAGS) \
78136 $(__cpp_flags)
78137
78138 ld_flags = $(LDFLAGS) $(ldflags-y)
78139 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
78140 index cb1f50c..cef2a7c 100644
78141 --- a/scripts/basic/fixdep.c
78142 +++ b/scripts/basic/fixdep.c
78143 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
78144 /*
78145 * Lookup a value in the configuration string.
78146 */
78147 -static int is_defined_config(const char *name, int len, unsigned int hash)
78148 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
78149 {
78150 struct item *aux;
78151
78152 @@ -211,10 +211,10 @@ static void clear_config(void)
78153 /*
78154 * Record the use of a CONFIG_* word.
78155 */
78156 -static void use_config(const char *m, int slen)
78157 +static void use_config(const char *m, unsigned int slen)
78158 {
78159 unsigned int hash = strhash(m, slen);
78160 - int c, i;
78161 + unsigned int c, i;
78162
78163 if (is_defined_config(m, slen, hash))
78164 return;
78165 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
78166
78167 static void parse_config_file(const char *map, size_t len)
78168 {
78169 - const int *end = (const int *) (map + len);
78170 + const unsigned int *end = (const unsigned int *) (map + len);
78171 /* start at +1, so that p can never be < map */
78172 - const int *m = (const int *) map + 1;
78173 + const unsigned int *m = (const unsigned int *) map + 1;
78174 const char *p, *q;
78175
78176 for (; m < end; m++) {
78177 @@ -406,7 +406,7 @@ static void print_deps(void)
78178 static void traps(void)
78179 {
78180 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
78181 - int *p = (int *)test;
78182 + unsigned int *p = (unsigned int *)test;
78183
78184 if (*p != INT_CONF) {
78185 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
78186 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
78187 new file mode 100644
78188 index 0000000..8729101
78189 --- /dev/null
78190 +++ b/scripts/gcc-plugin.sh
78191 @@ -0,0 +1,2 @@
78192 +#!/bin/sh
78193 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
78194 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
78195 index e047e17..ea646ec 100644
78196 --- a/scripts/mod/file2alias.c
78197 +++ b/scripts/mod/file2alias.c
78198 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
78199 unsigned long size, unsigned long id_size,
78200 void *symval)
78201 {
78202 - int i;
78203 + unsigned int i;
78204
78205 if (size % id_size || size < id_size) {
78206 if (cross_build != 0)
78207 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
78208 /* USB is special because the bcdDevice can be matched against a numeric range */
78209 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
78210 static void do_usb_entry(struct usb_device_id *id,
78211 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
78212 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
78213 unsigned char range_lo, unsigned char range_hi,
78214 unsigned char max, struct module *mod)
78215 {
78216 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
78217 {
78218 unsigned int devlo, devhi;
78219 unsigned char chi, clo, max;
78220 - int ndigits;
78221 + unsigned int ndigits;
78222
78223 id->match_flags = TO_NATIVE(id->match_flags);
78224 id->idVendor = TO_NATIVE(id->idVendor);
78225 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
78226 for (i = 0; i < count; i++) {
78227 const char *id = (char *)devs[i].id;
78228 char acpi_id[sizeof(devs[0].id)];
78229 - int j;
78230 + unsigned int j;
78231
78232 buf_printf(&mod->dev_table_buf,
78233 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78234 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78235
78236 for (j = 0; j < PNP_MAX_DEVICES; j++) {
78237 const char *id = (char *)card->devs[j].id;
78238 - int i2, j2;
78239 + unsigned int i2, j2;
78240 int dup = 0;
78241
78242 if (!id[0])
78243 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78244 /* add an individual alias for every device entry */
78245 if (!dup) {
78246 char acpi_id[sizeof(card->devs[0].id)];
78247 - int k;
78248 + unsigned int k;
78249
78250 buf_printf(&mod->dev_table_buf,
78251 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78252 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
78253 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
78254 char *alias)
78255 {
78256 - int i, j;
78257 + unsigned int i, j;
78258
78259 sprintf(alias, "dmi*");
78260
78261 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
78262 index c4e7d15..4241aef 100644
78263 --- a/scripts/mod/modpost.c
78264 +++ b/scripts/mod/modpost.c
78265 @@ -922,6 +922,7 @@ enum mismatch {
78266 ANY_INIT_TO_ANY_EXIT,
78267 ANY_EXIT_TO_ANY_INIT,
78268 EXPORT_TO_INIT_EXIT,
78269 + DATA_TO_TEXT
78270 };
78271
78272 struct sectioncheck {
78273 @@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[] = {
78274 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
78275 .mismatch = EXPORT_TO_INIT_EXIT,
78276 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
78277 +},
78278 +/* Do not reference code from writable data */
78279 +{
78280 + .fromsec = { DATA_SECTIONS, NULL },
78281 + .tosec = { TEXT_SECTIONS, NULL },
78282 + .mismatch = DATA_TO_TEXT
78283 }
78284 };
78285
78286 @@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
78287 continue;
78288 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
78289 continue;
78290 - if (sym->st_value == addr)
78291 - return sym;
78292 /* Find a symbol nearby - addr are maybe negative */
78293 d = sym->st_value - addr;
78294 + if (d == 0)
78295 + return sym;
78296 if (d < 0)
78297 d = addr - sym->st_value;
78298 if (d < distance) {
78299 @@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const char *modname,
78300 tosym, prl_to, prl_to, tosym);
78301 free(prl_to);
78302 break;
78303 + case DATA_TO_TEXT:
78304 +/*
78305 + fprintf(stderr,
78306 + "The variable %s references\n"
78307 + "the %s %s%s%s\n",
78308 + fromsym, to, sec2annotation(tosec), tosym, to_p);
78309 +*/
78310 + break;
78311 }
78312 fprintf(stderr, "\n");
78313 }
78314 @@ -1668,7 +1683,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
78315 static void check_sec_ref(struct module *mod, const char *modname,
78316 struct elf_info *elf)
78317 {
78318 - int i;
78319 + unsigned int i;
78320 Elf_Shdr *sechdrs = elf->sechdrs;
78321
78322 /* Walk through all sections */
78323 @@ -1766,7 +1781,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
78324 va_end(ap);
78325 }
78326
78327 -void buf_write(struct buffer *buf, const char *s, int len)
78328 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
78329 {
78330 if (buf->size - buf->pos < len) {
78331 buf->size += len + SZ;
78332 @@ -1984,7 +1999,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
78333 if (fstat(fileno(file), &st) < 0)
78334 goto close_write;
78335
78336 - if (st.st_size != b->pos)
78337 + if (st.st_size != (off_t)b->pos)
78338 goto close_write;
78339
78340 tmp = NOFAIL(malloc(b->pos));
78341 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
78342 index 51207e4..f7d603d 100644
78343 --- a/scripts/mod/modpost.h
78344 +++ b/scripts/mod/modpost.h
78345 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
78346
78347 struct buffer {
78348 char *p;
78349 - int pos;
78350 - int size;
78351 + unsigned int pos;
78352 + unsigned int size;
78353 };
78354
78355 void __attribute__((format(printf, 2, 3)))
78356 buf_printf(struct buffer *buf, const char *fmt, ...);
78357
78358 void
78359 -buf_write(struct buffer *buf, const char *s, int len);
78360 +buf_write(struct buffer *buf, const char *s, unsigned int len);
78361
78362 struct module {
78363 struct module *next;
78364 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
78365 index 9dfcd6d..099068e 100644
78366 --- a/scripts/mod/sumversion.c
78367 +++ b/scripts/mod/sumversion.c
78368 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
78369 goto out;
78370 }
78371
78372 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
78373 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
78374 warn("writing sum in %s failed: %s\n",
78375 filename, strerror(errno));
78376 goto out;
78377 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
78378 index 5c11312..72742b5 100644
78379 --- a/scripts/pnmtologo.c
78380 +++ b/scripts/pnmtologo.c
78381 @@ -237,14 +237,14 @@ static void write_header(void)
78382 fprintf(out, " * Linux logo %s\n", logoname);
78383 fputs(" */\n\n", out);
78384 fputs("#include <linux/linux_logo.h>\n\n", out);
78385 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
78386 + fprintf(out, "static unsigned char %s_data[] = {\n",
78387 logoname);
78388 }
78389
78390 static void write_footer(void)
78391 {
78392 fputs("\n};\n\n", out);
78393 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
78394 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
78395 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
78396 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
78397 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
78398 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
78399 fputs("\n};\n\n", out);
78400
78401 /* write logo clut */
78402 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
78403 + fprintf(out, "static unsigned char %s_clut[] = {\n",
78404 logoname);
78405 write_hex_cnt = 0;
78406 for (i = 0; i < logo_clutsize; i++) {
78407 diff --git a/scripts/tags.sh b/scripts/tags.sh
78408 index 833813a..0bc8588 100755
78409 --- a/scripts/tags.sh
78410 +++ b/scripts/tags.sh
78411 @@ -116,7 +116,7 @@ docscope()
78412
78413 dogtags()
78414 {
78415 - all_sources | gtags -f -
78416 + all_sources | gtags -i -f -
78417 }
78418
78419 exuberant()
78420 diff --git a/security/Kconfig b/security/Kconfig
78421 index 51bd5a0..c37f5e6 100644
78422 --- a/security/Kconfig
78423 +++ b/security/Kconfig
78424 @@ -4,6 +4,640 @@
78425
78426 menu "Security options"
78427
78428 +source grsecurity/Kconfig
78429 +
78430 +menu "PaX"
78431 +
78432 + config ARCH_TRACK_EXEC_LIMIT
78433 + bool
78434 +
78435 + config PAX_KERNEXEC_PLUGIN
78436 + bool
78437 +
78438 + config PAX_PER_CPU_PGD
78439 + bool
78440 +
78441 + config TASK_SIZE_MAX_SHIFT
78442 + int
78443 + depends on X86_64
78444 + default 47 if !PAX_PER_CPU_PGD
78445 + default 42 if PAX_PER_CPU_PGD
78446 +
78447 + config PAX_ENABLE_PAE
78448 + bool
78449 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
78450 +
78451 +config PAX
78452 + bool "Enable various PaX features"
78453 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
78454 + help
78455 + This allows you to enable various PaX features. PaX adds
78456 + intrusion prevention mechanisms to the kernel that reduce
78457 + the risks posed by exploitable memory corruption bugs.
78458 +
78459 +menu "PaX Control"
78460 + depends on PAX
78461 +
78462 +config PAX_SOFTMODE
78463 + bool 'Support soft mode'
78464 + help
78465 + Enabling this option will allow you to run PaX in soft mode, that
78466 + is, PaX features will not be enforced by default, only on executables
78467 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
78468 + support as they are the only way to mark executables for soft mode use.
78469 +
78470 + Soft mode can be activated by using the "pax_softmode=1" kernel command
78471 + line option on boot. Furthermore you can control various PaX features
78472 + at runtime via the entries in /proc/sys/kernel/pax.
78473 +
78474 +config PAX_EI_PAX
78475 + bool 'Use legacy ELF header marking'
78476 + help
78477 + Enabling this option will allow you to control PaX features on
78478 + a per executable basis via the 'chpax' utility available at
78479 + http://pax.grsecurity.net/. The control flags will be read from
78480 + an otherwise reserved part of the ELF header. This marking has
78481 + numerous drawbacks (no support for soft-mode, toolchain does not
78482 + know about the non-standard use of the ELF header) therefore it
78483 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
78484 + support.
78485 +
78486 + If you have applications not marked by the PT_PAX_FLAGS ELF program
78487 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
78488 + option otherwise they will not get any protection.
78489 +
78490 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
78491 + support as well, they will override the legacy EI_PAX marks.
78492 +
78493 +config PAX_PT_PAX_FLAGS
78494 + bool 'Use ELF program header marking'
78495 + help
78496 + Enabling this option will allow you to control PaX features on
78497 + a per executable basis via the 'paxctl' utility available at
78498 + http://pax.grsecurity.net/. The control flags will be read from
78499 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
78500 + has the benefits of supporting both soft mode and being fully
78501 + integrated into the toolchain (the binutils patch is available
78502 + from http://pax.grsecurity.net).
78503 +
78504 + If you have applications not marked by the PT_PAX_FLAGS ELF program
78505 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
78506 + support otherwise they will not get any protection.
78507 +
78508 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
78509 + must make sure that the marks are the same if a binary has both marks.
78510 +
78511 + Note that if you enable the legacy EI_PAX marking support as well,
78512 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
78513 +
78514 +config PAX_XATTR_PAX_FLAGS
78515 + bool 'Use filesystem extended attributes marking'
78516 + select CIFS_XATTR if CIFS
78517 + select EXT2_FS_XATTR if EXT2_FS
78518 + select EXT3_FS_XATTR if EXT3_FS
78519 + select EXT4_FS_XATTR if EXT4_FS
78520 + select JFFS2_FS_XATTR if JFFS2_FS
78521 + select REISERFS_FS_XATTR if REISERFS_FS
78522 + select SQUASHFS_XATTR if SQUASHFS
78523 + select TMPFS_XATTR if TMPFS
78524 + select UBIFS_FS_XATTR if UBIFS_FS
78525 + help
78526 + Enabling this option will allow you to control PaX features on
78527 + a per executable basis via the 'setfattr' utility. The control
78528 + flags will be read from the user.pax.flags extended attribute of
78529 + the file. This marking has the benefit of supporting binary-only
78530 + applications that self-check themselves (e.g., skype) and would
78531 + not tolerate chpax/paxctl changes. The main drawback is that
78532 + extended attributes are not supported by some filesystems (e.g.,
78533 + isofs, udf, vfat) so copying files through such filesystems will
78534 + lose the extended attributes and these PaX markings.
78535 +
78536 + If you have applications not marked by the PT_PAX_FLAGS ELF program
78537 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
78538 + support otherwise they will not get any protection.
78539 +
78540 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
78541 + must make sure that the marks are the same if a binary has both marks.
78542 +
78543 + Note that if you enable the legacy EI_PAX marking support as well,
78544 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
78545 +
78546 +choice
78547 + prompt 'MAC system integration'
78548 + default PAX_HAVE_ACL_FLAGS
78549 + help
78550 + Mandatory Access Control systems have the option of controlling
78551 + PaX flags on a per executable basis, choose the method supported
78552 + by your particular system.
78553 +
78554 + - "none": if your MAC system does not interact with PaX,
78555 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
78556 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
78557 +
78558 + NOTE: this option is for developers/integrators only.
78559 +
78560 + config PAX_NO_ACL_FLAGS
78561 + bool 'none'
78562 +
78563 + config PAX_HAVE_ACL_FLAGS
78564 + bool 'direct'
78565 +
78566 + config PAX_HOOK_ACL_FLAGS
78567 + bool 'hook'
78568 +endchoice
78569 +
78570 +endmenu
78571 +
78572 +menu "Non-executable pages"
78573 + depends on PAX
78574 +
78575 +config PAX_NOEXEC
78576 + bool "Enforce non-executable pages"
78577 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
78578 + help
78579 + By design some architectures do not allow for protecting memory
78580 + pages against execution or even if they do, Linux does not make
78581 + use of this feature. In practice this means that if a page is
78582 + readable (such as the stack or heap) it is also executable.
78583 +
78584 + There is a well known exploit technique that makes use of this
78585 + fact and a common programming mistake where an attacker can
78586 + introduce code of his choice somewhere in the attacked program's
78587 + memory (typically the stack or the heap) and then execute it.
78588 +
78589 + If the attacked program was running with different (typically
78590 + higher) privileges than that of the attacker, then he can elevate
78591 + his own privilege level (e.g. get a root shell, write to files for
78592 + which he does not have write access to, etc).
78593 +
78594 + Enabling this option will let you choose from various features
78595 + that prevent the injection and execution of 'foreign' code in
78596 + a program.
78597 +
78598 + This will also break programs that rely on the old behaviour and
78599 + expect that dynamically allocated memory via the malloc() family
78600 + of functions is executable (which it is not). Notable examples
78601 + are the XFree86 4.x server, the java runtime and wine.
78602 +
78603 +config PAX_PAGEEXEC
78604 + bool "Paging based non-executable pages"
78605 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
78606 + select S390_SWITCH_AMODE if S390
78607 + select S390_EXEC_PROTECT if S390
78608 + select ARCH_TRACK_EXEC_LIMIT if X86_32
78609 + help
78610 + This implementation is based on the paging feature of the CPU.
78611 + On i386 without hardware non-executable bit support there is a
78612 + variable but usually low performance impact, however on Intel's
78613 + P4 core based CPUs it is very high so you should not enable this
78614 + for kernels meant to be used on such CPUs.
78615 +
78616 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
78617 + with hardware non-executable bit support there is no performance
78618 + impact, on ppc the impact is negligible.
78619 +
78620 + Note that several architectures require various emulations due to
78621 + badly designed userland ABIs, this will cause a performance impact
78622 + but will disappear as soon as userland is fixed. For example, ppc
78623 + userland MUST have been built with secure-plt by a recent toolchain.
78624 +
78625 +config PAX_SEGMEXEC
78626 + bool "Segmentation based non-executable pages"
78627 + depends on PAX_NOEXEC && X86_32
78628 + help
78629 + This implementation is based on the segmentation feature of the
78630 + CPU and has a very small performance impact, however applications
78631 + will be limited to a 1.5 GB address space instead of the normal
78632 + 3 GB.
78633 +
78634 +config PAX_EMUTRAMP
78635 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
78636 + default y if PARISC
78637 + help
78638 + There are some programs and libraries that for one reason or
78639 + another attempt to execute special small code snippets from
78640 + non-executable memory pages. Most notable examples are the
78641 + signal handler return code generated by the kernel itself and
78642 + the GCC trampolines.
78643 +
78644 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
78645 + such programs will no longer work under your kernel.
78646 +
78647 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
78648 + utilities to enable trampoline emulation for the affected programs
78649 + yet still have the protection provided by the non-executable pages.
78650 +
78651 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
78652 + your system will not even boot.
78653 +
78654 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
78655 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
78656 + for the affected files.
78657 +
78658 + NOTE: enabling this feature *may* open up a loophole in the
78659 + protection provided by non-executable pages that an attacker
78660 + could abuse. Therefore the best solution is to not have any
78661 + files on your system that would require this option. This can
78662 + be achieved by not using libc5 (which relies on the kernel
78663 + signal handler return code) and not using or rewriting programs
78664 + that make use of the nested function implementation of GCC.
78665 + Skilled users can just fix GCC itself so that it implements
78666 + nested function calls in a way that does not interfere with PaX.
78667 +
78668 +config PAX_EMUSIGRT
78669 + bool "Automatically emulate sigreturn trampolines"
78670 + depends on PAX_EMUTRAMP && PARISC
78671 + default y
78672 + help
78673 + Enabling this option will have the kernel automatically detect
78674 + and emulate signal return trampolines executing on the stack
78675 + that would otherwise lead to task termination.
78676 +
78677 + This solution is intended as a temporary one for users with
78678 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
78679 + Modula-3 runtime, etc) or executables linked to such, basically
78680 + everything that does not specify its own SA_RESTORER function in
78681 + normal executable memory like glibc 2.1+ does.
78682 +
78683 + On parisc you MUST enable this option, otherwise your system will
78684 + not even boot.
78685 +
78686 + NOTE: this feature cannot be disabled on a per executable basis
78687 + and since it *does* open up a loophole in the protection provided
78688 + by non-executable pages, the best solution is to not have any
78689 + files on your system that would require this option.
78690 +
78691 +config PAX_MPROTECT
78692 + bool "Restrict mprotect()"
78693 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
78694 + help
78695 + Enabling this option will prevent programs from
78696 + - changing the executable status of memory pages that were
78697 + not originally created as executable,
78698 + - making read-only executable pages writable again,
78699 + - creating executable pages from anonymous memory,
78700 + - making read-only-after-relocations (RELRO) data pages writable again.
78701 +
78702 + You should say Y here to complete the protection provided by
78703 + the enforcement of non-executable pages.
78704 +
78705 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78706 + this feature on a per file basis.
78707 +
78708 +config PAX_MPROTECT_COMPAT
78709 + bool "Use legacy/compat protection demoting (read help)"
78710 + depends on PAX_MPROTECT
78711 + default n
78712 + help
78713 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
78714 + by sending the proper error code to the application. For some broken
78715 + userland, this can cause problems with Python or other applications. The
78716 + current implementation however allows for applications like clamav to
78717 + detect if JIT compilation/execution is allowed and to fall back gracefully
78718 + to an interpreter-based mode if it does not. While we encourage everyone
78719 + to use the current implementation as-is and push upstream to fix broken
78720 + userland (note that the RWX logging option can assist with this), in some
78721 + environments this may not be possible. Having to disable MPROTECT
78722 + completely on certain binaries reduces the security benefit of PaX,
78723 + so this option is provided for those environments to revert to the old
78724 + behavior.
78725 +
78726 +config PAX_ELFRELOCS
78727 + bool "Allow ELF text relocations (read help)"
78728 + depends on PAX_MPROTECT
78729 + default n
78730 + help
78731 + Non-executable pages and mprotect() restrictions are effective
78732 + in preventing the introduction of new executable code into an
78733 + attacked task's address space. There remain only two venues
78734 + for this kind of attack: if the attacker can execute already
78735 + existing code in the attacked task then he can either have it
78736 + create and mmap() a file containing his code or have it mmap()
78737 + an already existing ELF library that does not have position
78738 + independent code in it and use mprotect() on it to make it
78739 + writable and copy his code there. While protecting against
78740 + the former approach is beyond PaX, the latter can be prevented
78741 + by having only PIC ELF libraries on one's system (which do not
78742 + need to relocate their code). If you are sure this is your case,
78743 + as is the case with all modern Linux distributions, then leave
78744 + this option disabled. You should say 'n' here.
78745 +
78746 +config PAX_ETEXECRELOCS
78747 + bool "Allow ELF ET_EXEC text relocations"
78748 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
78749 + select PAX_ELFRELOCS
78750 + default y
78751 + help
78752 + On some architectures there are incorrectly created applications
78753 + that require text relocations and would not work without enabling
78754 + this option. If you are an alpha, ia64 or parisc user, you should
78755 + enable this option and disable it once you have made sure that
78756 + none of your applications need it.
78757 +
78758 +config PAX_EMUPLT
78759 + bool "Automatically emulate ELF PLT"
78760 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
78761 + default y
78762 + help
78763 + Enabling this option will have the kernel automatically detect
78764 + and emulate the Procedure Linkage Table entries in ELF files.
78765 + On some architectures such entries are in writable memory, and
78766 + become non-executable leading to task termination. Therefore
78767 + it is mandatory that you enable this option on alpha, parisc,
78768 + sparc and sparc64, otherwise your system would not even boot.
78769 +
78770 + NOTE: this feature *does* open up a loophole in the protection
78771 + provided by the non-executable pages, therefore the proper
78772 + solution is to modify the toolchain to produce a PLT that does
78773 + not need to be writable.
78774 +
78775 +config PAX_DLRESOLVE
78776 + bool 'Emulate old glibc resolver stub'
78777 + depends on PAX_EMUPLT && SPARC
78778 + default n
78779 + help
78780 + This option is needed if userland has an old glibc (before 2.4)
78781 + that puts a 'save' instruction into the runtime generated resolver
78782 + stub that needs special emulation.
78783 +
78784 +config PAX_KERNEXEC
78785 + bool "Enforce non-executable kernel pages"
78786 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
78787 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
78788 + select PAX_KERNEXEC_PLUGIN if X86_64
78789 + help
78790 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
78791 + that is, enabling this option will make it harder to inject
78792 + and execute 'foreign' code in kernel memory itself.
78793 +
78794 + Note that on x86_64 kernels there is a known regression when
78795 + this feature and KVM/VMX are both enabled in the host kernel.
78796 +
78797 +choice
78798 + prompt "Return Address Instrumentation Method"
78799 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
78800 + depends on PAX_KERNEXEC_PLUGIN
78801 + help
78802 + Select the method used to instrument function pointer dereferences.
78803 + Note that binary modules cannot be instrumented by this approach.
78804 +
78805 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
78806 + bool "bts"
78807 + help
78808 + This method is compatible with binary only modules but has
78809 + a higher runtime overhead.
78810 +
78811 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
78812 + bool "or"
78813 + depends on !PARAVIRT
78814 + help
78815 + This method is incompatible with binary only modules but has
78816 + a lower runtime overhead.
78817 +endchoice
78818 +
78819 +config PAX_KERNEXEC_PLUGIN_METHOD
78820 + string
78821 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
78822 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
78823 + default ""
78824 +
78825 +config PAX_KERNEXEC_MODULE_TEXT
78826 + int "Minimum amount of memory reserved for module code"
78827 + default "4"
78828 + depends on PAX_KERNEXEC && X86_32 && MODULES
78829 + help
78830 + Due to implementation details the kernel must reserve a fixed
78831 + amount of memory for module code at compile time that cannot be
78832 + changed at runtime. Here you can specify the minimum amount
78833 + in MB that will be reserved. Due to the same implementation
78834 + details this size will always be rounded up to the next 2/4 MB
78835 + boundary (depends on PAE) so the actually available memory for
78836 + module code will usually be more than this minimum.
78837 +
78838 + The default 4 MB should be enough for most users but if you have
78839 + an excessive number of modules (e.g., most distribution configs
78840 + compile many drivers as modules) or use huge modules such as
78841 + nvidia's kernel driver, you will need to adjust this amount.
78842 + A good rule of thumb is to look at your currently loaded kernel
78843 + modules and add up their sizes.
78844 +
78845 +endmenu
78846 +
78847 +menu "Address Space Layout Randomization"
78848 + depends on PAX
78849 +
78850 +config PAX_ASLR
78851 + bool "Address Space Layout Randomization"
78852 + help
78853 + Many if not most exploit techniques rely on the knowledge of
78854 + certain addresses in the attacked program. The following options
78855 + will allow the kernel to apply a certain amount of randomization
78856 + to specific parts of the program thereby forcing an attacker to
78857 + guess them in most cases. Any failed guess will most likely crash
78858 + the attacked program which allows the kernel to detect such attempts
78859 + and react on them. PaX itself provides no reaction mechanisms,
78860 + instead it is strongly encouraged that you make use of Nergal's
78861 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
78862 + (http://www.grsecurity.net/) built-in crash detection features or
78863 + develop one yourself.
78864 +
78865 + By saying Y here you can choose to randomize the following areas:
78866 + - top of the task's kernel stack
78867 + - top of the task's userland stack
78868 + - base address for mmap() requests that do not specify one
78869 + (this includes all libraries)
78870 + - base address of the main executable
78871 +
78872 + It is strongly recommended to say Y here as address space layout
78873 + randomization has negligible impact on performance yet it provides
78874 + a very effective protection.
78875 +
78876 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78877 + this feature on a per file basis.
78878 +
78879 +config PAX_RANDKSTACK
78880 + bool "Randomize kernel stack base"
78881 + depends on X86_TSC && X86
78882 + help
78883 + By saying Y here the kernel will randomize every task's kernel
78884 + stack on every system call. This will not only force an attacker
78885 + to guess it but also prevent him from making use of possible
78886 + leaked information about it.
78887 +
78888 + Since the kernel stack is a rather scarce resource, randomization
78889 + may cause unexpected stack overflows, therefore you should very
78890 + carefully test your system. Note that once enabled in the kernel
78891 + configuration, this feature cannot be disabled on a per file basis.
78892 +
78893 +config PAX_RANDUSTACK
78894 + bool "Randomize user stack base"
78895 + depends on PAX_ASLR
78896 + help
78897 + By saying Y here the kernel will randomize every task's userland
78898 + stack. The randomization is done in two steps where the second
78899 + one may apply a big amount of shift to the top of the stack and
78900 + cause problems for programs that want to use lots of memory (more
78901 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
78902 + For this reason the second step can be controlled by 'chpax' or
78903 + 'paxctl' on a per file basis.
78904 +
78905 +config PAX_RANDMMAP
78906 + bool "Randomize mmap() base"
78907 + depends on PAX_ASLR
78908 + help
78909 + By saying Y here the kernel will use a randomized base address for
78910 + mmap() requests that do not specify one themselves. As a result
78911 + all dynamically loaded libraries will appear at random addresses
78912 + and therefore be harder to exploit by a technique where an attacker
78913 + attempts to execute library code for his purposes (e.g. spawn a
78914 + shell from an exploited program that is running at an elevated
78915 + privilege level).
78916 +
78917 + Furthermore, if a program is relinked as a dynamic ELF file, its
78918 + base address will be randomized as well, completing the full
78919 + randomization of the address space layout. Attacking such programs
78920 + becomes a guess game. You can find an example of doing this at
78921 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
78922 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
78923 +
78924 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
78925 + feature on a per file basis.
78926 +
78927 +endmenu
78928 +
78929 +menu "Miscellaneous hardening features"
78930 +
78931 +config PAX_MEMORY_SANITIZE
78932 + bool "Sanitize all freed memory"
78933 + depends on !HIBERNATION
78934 + help
78935 + By saying Y here the kernel will erase memory pages as soon as they
78936 + are freed. This in turn reduces the lifetime of data stored in the
78937 + pages, making it less likely that sensitive information such as
78938 + passwords, cryptographic secrets, etc stay in memory for too long.
78939 +
78940 + This is especially useful for programs whose runtime is short, long
78941 + lived processes and the kernel itself benefit from this as long as
78942 + they operate on whole memory pages and ensure timely freeing of pages
78943 + that may hold sensitive information.
78944 +
78945 + The tradeoff is performance impact, on a single CPU system kernel
78946 + compilation sees a 3% slowdown, other systems and workloads may vary
78947 + and you are advised to test this feature on your expected workload
78948 + before deploying it.
78949 +
78950 + Note that this feature does not protect data stored in live pages,
78951 + e.g., process memory swapped to disk may stay there for a long time.
78952 +
78953 +config PAX_MEMORY_STACKLEAK
78954 + bool "Sanitize kernel stack"
78955 + depends on X86
78956 + help
78957 + By saying Y here the kernel will erase the kernel stack before it
78958 + returns from a system call. This in turn reduces the information
78959 + that a kernel stack leak bug can reveal.
78960 +
78961 + Note that such a bug can still leak information that was put on
78962 + the stack by the current system call (the one eventually triggering
78963 + the bug) but traces of earlier system calls on the kernel stack
78964 + cannot leak anymore.
78965 +
78966 + The tradeoff is performance impact: on a single CPU system kernel
78967 + compilation sees a 1% slowdown, other systems and workloads may vary
78968 + and you are advised to test this feature on your expected workload
78969 + before deploying it.
78970 +
78971 + Note: full support for this feature requires gcc with plugin support
78972 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
78973 + versions means that functions with large enough stack frames may
78974 + leave uninitialized memory behind that may be exposed to a later
78975 + syscall leaking the stack.
78976 +
78977 +config PAX_MEMORY_UDEREF
78978 + bool "Prevent invalid userland pointer dereference"
78979 + depends on X86 && !UML_X86 && !XEN
78980 + select PAX_PER_CPU_PGD if X86_64
78981 + help
78982 + By saying Y here the kernel will be prevented from dereferencing
78983 + userland pointers in contexts where the kernel expects only kernel
78984 + pointers. This is both a useful runtime debugging feature and a
78985 + security measure that prevents exploiting a class of kernel bugs.
78986 +
78987 + The tradeoff is that some virtualization solutions may experience
78988 + a huge slowdown and therefore you should not enable this feature
78989 + for kernels meant to run in such environments. Whether a given VM
78990 + solution is affected or not is best determined by simply trying it
78991 + out, the performance impact will be obvious right on boot as this
78992 + mechanism engages from very early on. A good rule of thumb is that
78993 + VMs running on CPUs without hardware virtualization support (i.e.,
78994 + the majority of IA-32 CPUs) will likely experience the slowdown.
78995 +
78996 +config PAX_REFCOUNT
78997 + bool "Prevent various kernel object reference counter overflows"
78998 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
78999 + help
79000 + By saying Y here the kernel will detect and prevent overflowing
79001 + various (but not all) kinds of object reference counters. Such
79002 + overflows can normally occur due to bugs only and are often, if
79003 + not always, exploitable.
79004 +
79005 + The tradeoff is that data structures protected by an overflowed
79006 + refcount will never be freed and therefore will leak memory. Note
79007 + that this leak also happens even without this protection but in
79008 + that case the overflow can eventually trigger the freeing of the
79009 + data structure while it is still being used elsewhere, resulting
79010 + in the exploitable situation that this feature prevents.
79011 +
79012 + Since this has a negligible performance impact, you should enable
79013 + this feature.
79014 +
79015 +config PAX_USERCOPY
79016 + bool "Harden heap object copies between kernel and userland"
79017 + depends on X86 || PPC || SPARC || ARM
79018 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
79019 + help
79020 + By saying Y here the kernel will enforce the size of heap objects
79021 + when they are copied in either direction between the kernel and
79022 + userland, even if only a part of the heap object is copied.
79023 +
79024 + Specifically, this checking prevents information leaking from the
79025 + kernel heap during kernel to userland copies (if the kernel heap
79026 + object is otherwise fully initialized) and prevents kernel heap
79027 + overflows during userland to kernel copies.
79028 +
79029 + Note that the current implementation provides the strictest bounds
79030 + checks for the SLUB allocator.
79031 +
79032 + Enabling this option also enables per-slab cache protection against
79033 + data in a given cache being copied into/out of via userland
79034 + accessors. Though the whitelist of regions will be reduced over
79035 + time, it notably protects important data structures like task structs.
79036 +
79037 + If frame pointers are enabled on x86, this option will also restrict
79038 + copies into and out of the kernel stack to local variables within a
79039 + single frame.
79040 +
79041 + Since this has a negligible performance impact, you should enable
79042 + this feature.
79043 +
79044 +config PAX_SIZE_OVERFLOW
79045 + bool "Prevent various integer overflows in function size parameters"
79046 + depends on X86
79047 + help
79048 + By saying Y here the kernel recomputes expressions of function
79049 + arguments marked by a size_overflow attribute with double integer
79050 + precision (DImode/TImode for 32/64 bit integer types).
79051 +
79052 + The recomputed argument is checked against INT_MAX and an event
79053 + is logged on overflow and the triggering process is killed.
79054 +
79055 + Homepage:
79056 + http://www.grsecurity.net/~ephox/overflow_plugin/
79057 +
79058 +endmenu
79059 +
79060 +endmenu
79061 +
79062 config KEYS
79063 bool "Enable access key retention support"
79064 help
79065 @@ -169,7 +803,7 @@ config INTEL_TXT
79066 config LSM_MMAP_MIN_ADDR
79067 int "Low address space for LSM to protect from user allocation"
79068 depends on SECURITY && SECURITY_SELINUX
79069 - default 32768 if ARM
79070 + default 32768 if ALPHA || ARM || PARISC || SPARC32
79071 default 65536
79072 help
79073 This is the portion of low virtual memory which should be protected
79074 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
79075 index 97ce8fa..23dad96 100644
79076 --- a/security/apparmor/lsm.c
79077 +++ b/security/apparmor/lsm.c
79078 @@ -620,7 +620,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
79079 return error;
79080 }
79081
79082 -static struct security_operations apparmor_ops = {
79083 +static struct security_operations apparmor_ops __read_only = {
79084 .name = "apparmor",
79085
79086 .ptrace_access_check = apparmor_ptrace_access_check,
79087 diff --git a/security/commoncap.c b/security/commoncap.c
79088 index b8d2bb9..980069e 100644
79089 --- a/security/commoncap.c
79090 +++ b/security/commoncap.c
79091 @@ -29,6 +29,7 @@
79092 #include <linux/securebits.h>
79093 #include <linux/user_namespace.h>
79094 #include <linux/personality.h>
79095 +#include <net/sock.h>
79096
79097 /*
79098 * If a non-root user executes a setuid-root binary in
79099 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
79100 {
79101 const struct cred *cred = current_cred();
79102
79103 + if (gr_acl_enable_at_secure())
79104 + return 1;
79105 +
79106 if (cred->uid != 0) {
79107 if (bprm->cap_effective)
79108 return 1;
79109 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
79110 index 3ccf7ac..d73ad64 100644
79111 --- a/security/integrity/ima/ima.h
79112 +++ b/security/integrity/ima/ima.h
79113 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79114 extern spinlock_t ima_queue_lock;
79115
79116 struct ima_h_table {
79117 - atomic_long_t len; /* number of stored measurements in the list */
79118 - atomic_long_t violations;
79119 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
79120 + atomic_long_unchecked_t violations;
79121 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
79122 };
79123 extern struct ima_h_table ima_htable;
79124 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
79125 index 88a2788..581ab92 100644
79126 --- a/security/integrity/ima/ima_api.c
79127 +++ b/security/integrity/ima/ima_api.c
79128 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79129 int result;
79130
79131 /* can overflow, only indicator */
79132 - atomic_long_inc(&ima_htable.violations);
79133 + atomic_long_inc_unchecked(&ima_htable.violations);
79134
79135 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
79136 if (!entry) {
79137 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
79138 index e1aa2b4..52027bf 100644
79139 --- a/security/integrity/ima/ima_fs.c
79140 +++ b/security/integrity/ima/ima_fs.c
79141 @@ -28,12 +28,12 @@
79142 static int valid_policy = 1;
79143 #define TMPBUFLEN 12
79144 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
79145 - loff_t *ppos, atomic_long_t *val)
79146 + loff_t *ppos, atomic_long_unchecked_t *val)
79147 {
79148 char tmpbuf[TMPBUFLEN];
79149 ssize_t len;
79150
79151 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
79152 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
79153 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
79154 }
79155
79156 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
79157 index 55a6271..ad829c3 100644
79158 --- a/security/integrity/ima/ima_queue.c
79159 +++ b/security/integrity/ima/ima_queue.c
79160 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
79161 INIT_LIST_HEAD(&qe->later);
79162 list_add_tail_rcu(&qe->later, &ima_measurements);
79163
79164 - atomic_long_inc(&ima_htable.len);
79165 + atomic_long_inc_unchecked(&ima_htable.len);
79166 key = ima_hash_key(entry->digest);
79167 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
79168 return 0;
79169 diff --git a/security/keys/compat.c b/security/keys/compat.c
79170 index 4c48e13..7abdac9 100644
79171 --- a/security/keys/compat.c
79172 +++ b/security/keys/compat.c
79173 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
79174 if (ret == 0)
79175 goto no_payload_free;
79176
79177 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79178 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79179
79180 if (iov != iovstack)
79181 kfree(iov);
79182 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
79183 index 0b3f5d7..892c8a6 100644
79184 --- a/security/keys/keyctl.c
79185 +++ b/security/keys/keyctl.c
79186 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
79187 /*
79188 * Copy the iovec data from userspace
79189 */
79190 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79191 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
79192 unsigned ioc)
79193 {
79194 for (; ioc > 0; ioc--) {
79195 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79196 * If successful, 0 will be returned.
79197 */
79198 long keyctl_instantiate_key_common(key_serial_t id,
79199 - const struct iovec *payload_iov,
79200 + const struct iovec __user *payload_iov,
79201 unsigned ioc,
79202 size_t plen,
79203 key_serial_t ringid)
79204 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
79205 [0].iov_len = plen
79206 };
79207
79208 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
79209 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
79210 }
79211
79212 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
79213 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
79214 if (ret == 0)
79215 goto no_payload_free;
79216
79217 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79218 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79219
79220 if (iov != iovstack)
79221 kfree(iov);
79222 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
79223 index d605f75..2bc6be9 100644
79224 --- a/security/keys/keyring.c
79225 +++ b/security/keys/keyring.c
79226 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
79227 ret = -EFAULT;
79228
79229 for (loop = 0; loop < klist->nkeys; loop++) {
79230 + key_serial_t serial;
79231 key = klist->keys[loop];
79232 + serial = key->serial;
79233
79234 tmp = sizeof(key_serial_t);
79235 if (tmp > buflen)
79236 tmp = buflen;
79237
79238 - if (copy_to_user(buffer,
79239 - &key->serial,
79240 - tmp) != 0)
79241 + if (copy_to_user(buffer, &serial, tmp))
79242 goto error;
79243
79244 buflen -= tmp;
79245 diff --git a/security/min_addr.c b/security/min_addr.c
79246 index f728728..6457a0c 100644
79247 --- a/security/min_addr.c
79248 +++ b/security/min_addr.c
79249 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
79250 */
79251 static void update_mmap_min_addr(void)
79252 {
79253 +#ifndef SPARC
79254 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
79255 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
79256 mmap_min_addr = dac_mmap_min_addr;
79257 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
79258 #else
79259 mmap_min_addr = dac_mmap_min_addr;
79260 #endif
79261 +#endif
79262 }
79263
79264 /*
79265 diff --git a/security/security.c b/security/security.c
79266 index d754249..8bf426e 100644
79267 --- a/security/security.c
79268 +++ b/security/security.c
79269 @@ -26,8 +26,8 @@
79270 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
79271 CONFIG_DEFAULT_SECURITY;
79272
79273 -static struct security_operations *security_ops;
79274 -static struct security_operations default_security_ops = {
79275 +static struct security_operations *security_ops __read_only;
79276 +static struct security_operations default_security_ops __read_only = {
79277 .name = "default",
79278 };
79279
79280 @@ -68,7 +68,9 @@ int __init security_init(void)
79281
79282 void reset_security_ops(void)
79283 {
79284 + pax_open_kernel();
79285 security_ops = &default_security_ops;
79286 + pax_close_kernel();
79287 }
79288
79289 /* Save user chosen LSM */
79290 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
79291 index 6a3683e..f52f4c0 100644
79292 --- a/security/selinux/hooks.c
79293 +++ b/security/selinux/hooks.c
79294 @@ -94,8 +94,6 @@
79295
79296 #define NUM_SEL_MNT_OPTS 5
79297
79298 -extern struct security_operations *security_ops;
79299 -
79300 /* SECMARK reference count */
79301 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
79302
79303 @@ -5429,7 +5427,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
79304
79305 #endif
79306
79307 -static struct security_operations selinux_ops = {
79308 +static struct security_operations selinux_ops __read_only = {
79309 .name = "selinux",
79310
79311 .ptrace_access_check = selinux_ptrace_access_check,
79312 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
79313 index b43813c..74be837 100644
79314 --- a/security/selinux/include/xfrm.h
79315 +++ b/security/selinux/include/xfrm.h
79316 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
79317
79318 static inline void selinux_xfrm_notify_policyload(void)
79319 {
79320 - atomic_inc(&flow_cache_genid);
79321 + atomic_inc_unchecked(&flow_cache_genid);
79322 }
79323 #else
79324 static inline int selinux_xfrm_enabled(void)
79325 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
79326 index e8af5b0b..78527ef 100644
79327 --- a/security/smack/smack_lsm.c
79328 +++ b/security/smack/smack_lsm.c
79329 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
79330 return 0;
79331 }
79332
79333 -struct security_operations smack_ops = {
79334 +struct security_operations smack_ops __read_only = {
79335 .name = "smack",
79336
79337 .ptrace_access_check = smack_ptrace_access_check,
79338 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
79339 index 620d37c..e2ad89b 100644
79340 --- a/security/tomoyo/tomoyo.c
79341 +++ b/security/tomoyo/tomoyo.c
79342 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
79343 * tomoyo_security_ops is a "struct security_operations" which is used for
79344 * registering TOMOYO.
79345 */
79346 -static struct security_operations tomoyo_security_ops = {
79347 +static struct security_operations tomoyo_security_ops __read_only = {
79348 .name = "tomoyo",
79349 .cred_alloc_blank = tomoyo_cred_alloc_blank,
79350 .cred_prepare = tomoyo_cred_prepare,
79351 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
79352 index 762af68..7103453 100644
79353 --- a/sound/aoa/codecs/onyx.c
79354 +++ b/sound/aoa/codecs/onyx.c
79355 @@ -54,7 +54,7 @@ struct onyx {
79356 spdif_locked:1,
79357 analog_locked:1,
79358 original_mute:2;
79359 - int open_count;
79360 + local_t open_count;
79361 struct codec_info *codec_info;
79362
79363 /* mutex serializes concurrent access to the device
79364 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
79365 struct onyx *onyx = cii->codec_data;
79366
79367 mutex_lock(&onyx->mutex);
79368 - onyx->open_count++;
79369 + local_inc(&onyx->open_count);
79370 mutex_unlock(&onyx->mutex);
79371
79372 return 0;
79373 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
79374 struct onyx *onyx = cii->codec_data;
79375
79376 mutex_lock(&onyx->mutex);
79377 - onyx->open_count--;
79378 - if (!onyx->open_count)
79379 + if (local_dec_and_test(&onyx->open_count))
79380 onyx->spdif_locked = onyx->analog_locked = 0;
79381 mutex_unlock(&onyx->mutex);
79382
79383 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
79384 index ffd2025..df062c9 100644
79385 --- a/sound/aoa/codecs/onyx.h
79386 +++ b/sound/aoa/codecs/onyx.h
79387 @@ -11,6 +11,7 @@
79388 #include <linux/i2c.h>
79389 #include <asm/pmac_low_i2c.h>
79390 #include <asm/prom.h>
79391 +#include <asm/local.h>
79392
79393 /* PCM3052 register definitions */
79394
79395 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
79396 index 08fde00..0bf641a 100644
79397 --- a/sound/core/oss/pcm_oss.c
79398 +++ b/sound/core/oss/pcm_oss.c
79399 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
79400 if (in_kernel) {
79401 mm_segment_t fs;
79402 fs = snd_enter_user();
79403 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79404 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79405 snd_leave_user(fs);
79406 } else {
79407 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79408 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79409 }
79410 if (ret != -EPIPE && ret != -ESTRPIPE)
79411 break;
79412 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
79413 if (in_kernel) {
79414 mm_segment_t fs;
79415 fs = snd_enter_user();
79416 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79417 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79418 snd_leave_user(fs);
79419 } else {
79420 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79421 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79422 }
79423 if (ret == -EPIPE) {
79424 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
79425 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
79426 struct snd_pcm_plugin_channel *channels;
79427 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
79428 if (!in_kernel) {
79429 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
79430 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
79431 return -EFAULT;
79432 buf = runtime->oss.buffer;
79433 }
79434 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
79435 }
79436 } else {
79437 tmp = snd_pcm_oss_write2(substream,
79438 - (const char __force *)buf,
79439 + (const char __force_kernel *)buf,
79440 runtime->oss.period_bytes, 0);
79441 if (tmp <= 0)
79442 goto err;
79443 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
79444 struct snd_pcm_runtime *runtime = substream->runtime;
79445 snd_pcm_sframes_t frames, frames1;
79446 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
79447 - char __user *final_dst = (char __force __user *)buf;
79448 + char __user *final_dst = (char __force_user *)buf;
79449 if (runtime->oss.plugin_first) {
79450 struct snd_pcm_plugin_channel *channels;
79451 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
79452 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
79453 xfer += tmp;
79454 runtime->oss.buffer_used -= tmp;
79455 } else {
79456 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
79457 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
79458 runtime->oss.period_bytes, 0);
79459 if (tmp <= 0)
79460 goto err;
79461 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
79462 size1);
79463 size1 /= runtime->channels; /* frames */
79464 fs = snd_enter_user();
79465 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
79466 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
79467 snd_leave_user(fs);
79468 }
79469 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
79470 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
79471 index 91cdf94..4085161 100644
79472 --- a/sound/core/pcm_compat.c
79473 +++ b/sound/core/pcm_compat.c
79474 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
79475 int err;
79476
79477 fs = snd_enter_user();
79478 - err = snd_pcm_delay(substream, &delay);
79479 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
79480 snd_leave_user(fs);
79481 if (err < 0)
79482 return err;
79483 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
79484 index 25ed9fe..24c46e9 100644
79485 --- a/sound/core/pcm_native.c
79486 +++ b/sound/core/pcm_native.c
79487 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
79488 switch (substream->stream) {
79489 case SNDRV_PCM_STREAM_PLAYBACK:
79490 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
79491 - (void __user *)arg);
79492 + (void __force_user *)arg);
79493 break;
79494 case SNDRV_PCM_STREAM_CAPTURE:
79495 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
79496 - (void __user *)arg);
79497 + (void __force_user *)arg);
79498 break;
79499 default:
79500 result = -EINVAL;
79501 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
79502 index 5cf8d65..912a79c 100644
79503 --- a/sound/core/seq/seq_device.c
79504 +++ b/sound/core/seq/seq_device.c
79505 @@ -64,7 +64,7 @@ struct ops_list {
79506 int argsize; /* argument size */
79507
79508 /* operators */
79509 - struct snd_seq_dev_ops ops;
79510 + struct snd_seq_dev_ops *ops;
79511
79512 /* registred devices */
79513 struct list_head dev_list; /* list of devices */
79514 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
79515
79516 mutex_lock(&ops->reg_mutex);
79517 /* copy driver operators */
79518 - ops->ops = *entry;
79519 + ops->ops = entry;
79520 ops->driver |= DRIVER_LOADED;
79521 ops->argsize = argsize;
79522
79523 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
79524 dev->name, ops->id, ops->argsize, dev->argsize);
79525 return -EINVAL;
79526 }
79527 - if (ops->ops.init_device(dev) >= 0) {
79528 + if (ops->ops->init_device(dev) >= 0) {
79529 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
79530 ops->num_init_devices++;
79531 } else {
79532 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
79533 dev->name, ops->id, ops->argsize, dev->argsize);
79534 return -EINVAL;
79535 }
79536 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
79537 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
79538 dev->status = SNDRV_SEQ_DEVICE_FREE;
79539 dev->driver_data = NULL;
79540 ops->num_init_devices--;
79541 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
79542 index 621e60e..f4543f5 100644
79543 --- a/sound/drivers/mts64.c
79544 +++ b/sound/drivers/mts64.c
79545 @@ -29,6 +29,7 @@
79546 #include <sound/initval.h>
79547 #include <sound/rawmidi.h>
79548 #include <sound/control.h>
79549 +#include <asm/local.h>
79550
79551 #define CARD_NAME "Miditerminal 4140"
79552 #define DRIVER_NAME "MTS64"
79553 @@ -67,7 +68,7 @@ struct mts64 {
79554 struct pardevice *pardev;
79555 int pardev_claimed;
79556
79557 - int open_count;
79558 + local_t open_count;
79559 int current_midi_output_port;
79560 int current_midi_input_port;
79561 u8 mode[MTS64_NUM_INPUT_PORTS];
79562 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79563 {
79564 struct mts64 *mts = substream->rmidi->private_data;
79565
79566 - if (mts->open_count == 0) {
79567 + if (local_read(&mts->open_count) == 0) {
79568 /* We don't need a spinlock here, because this is just called
79569 if the device has not been opened before.
79570 So there aren't any IRQs from the device */
79571 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79572
79573 msleep(50);
79574 }
79575 - ++(mts->open_count);
79576 + local_inc(&mts->open_count);
79577
79578 return 0;
79579 }
79580 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79581 struct mts64 *mts = substream->rmidi->private_data;
79582 unsigned long flags;
79583
79584 - --(mts->open_count);
79585 - if (mts->open_count == 0) {
79586 + if (local_dec_return(&mts->open_count) == 0) {
79587 /* We need the spinlock_irqsave here because we can still
79588 have IRQs at this point */
79589 spin_lock_irqsave(&mts->lock, flags);
79590 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79591
79592 msleep(500);
79593
79594 - } else if (mts->open_count < 0)
79595 - mts->open_count = 0;
79596 + } else if (local_read(&mts->open_count) < 0)
79597 + local_set(&mts->open_count, 0);
79598
79599 return 0;
79600 }
79601 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
79602 index b953fb4..1999c01 100644
79603 --- a/sound/drivers/opl4/opl4_lib.c
79604 +++ b/sound/drivers/opl4/opl4_lib.c
79605 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
79606 MODULE_DESCRIPTION("OPL4 driver");
79607 MODULE_LICENSE("GPL");
79608
79609 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
79610 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
79611 {
79612 int timeout = 10;
79613 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
79614 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
79615 index 3e32bd3..46fc152 100644
79616 --- a/sound/drivers/portman2x4.c
79617 +++ b/sound/drivers/portman2x4.c
79618 @@ -48,6 +48,7 @@
79619 #include <sound/initval.h>
79620 #include <sound/rawmidi.h>
79621 #include <sound/control.h>
79622 +#include <asm/local.h>
79623
79624 #define CARD_NAME "Portman 2x4"
79625 #define DRIVER_NAME "portman"
79626 @@ -85,7 +86,7 @@ struct portman {
79627 struct pardevice *pardev;
79628 int pardev_claimed;
79629
79630 - int open_count;
79631 + local_t open_count;
79632 int mode[PORTMAN_NUM_INPUT_PORTS];
79633 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
79634 };
79635 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
79636 index 87657dd..a8268d4 100644
79637 --- a/sound/firewire/amdtp.c
79638 +++ b/sound/firewire/amdtp.c
79639 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
79640 ptr = s->pcm_buffer_pointer + data_blocks;
79641 if (ptr >= pcm->runtime->buffer_size)
79642 ptr -= pcm->runtime->buffer_size;
79643 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
79644 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
79645
79646 s->pcm_period_pointer += data_blocks;
79647 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
79648 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
79649 */
79650 void amdtp_out_stream_update(struct amdtp_out_stream *s)
79651 {
79652 - ACCESS_ONCE(s->source_node_id_field) =
79653 + ACCESS_ONCE_RW(s->source_node_id_field) =
79654 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
79655 }
79656 EXPORT_SYMBOL(amdtp_out_stream_update);
79657 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
79658 index 537a9cb..8e8c8e9 100644
79659 --- a/sound/firewire/amdtp.h
79660 +++ b/sound/firewire/amdtp.h
79661 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
79662 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
79663 struct snd_pcm_substream *pcm)
79664 {
79665 - ACCESS_ONCE(s->pcm) = pcm;
79666 + ACCESS_ONCE_RW(s->pcm) = pcm;
79667 }
79668
79669 /**
79670 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
79671 index cd094ec..eca1277 100644
79672 --- a/sound/firewire/isight.c
79673 +++ b/sound/firewire/isight.c
79674 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
79675 ptr += count;
79676 if (ptr >= runtime->buffer_size)
79677 ptr -= runtime->buffer_size;
79678 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
79679 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
79680
79681 isight->period_counter += count;
79682 if (isight->period_counter >= runtime->period_size) {
79683 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
79684 if (err < 0)
79685 return err;
79686
79687 - ACCESS_ONCE(isight->pcm_active) = true;
79688 + ACCESS_ONCE_RW(isight->pcm_active) = true;
79689
79690 return 0;
79691 }
79692 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
79693 {
79694 struct isight *isight = substream->private_data;
79695
79696 - ACCESS_ONCE(isight->pcm_active) = false;
79697 + ACCESS_ONCE_RW(isight->pcm_active) = false;
79698
79699 mutex_lock(&isight->mutex);
79700 isight_stop_streaming(isight);
79701 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
79702
79703 switch (cmd) {
79704 case SNDRV_PCM_TRIGGER_START:
79705 - ACCESS_ONCE(isight->pcm_running) = true;
79706 + ACCESS_ONCE_RW(isight->pcm_running) = true;
79707 break;
79708 case SNDRV_PCM_TRIGGER_STOP:
79709 - ACCESS_ONCE(isight->pcm_running) = false;
79710 + ACCESS_ONCE_RW(isight->pcm_running) = false;
79711 break;
79712 default:
79713 return -EINVAL;
79714 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
79715 index 7bd5e33..1fcab12 100644
79716 --- a/sound/isa/cmi8330.c
79717 +++ b/sound/isa/cmi8330.c
79718 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
79719
79720 struct snd_pcm *pcm;
79721 struct snd_cmi8330_stream {
79722 - struct snd_pcm_ops ops;
79723 + snd_pcm_ops_no_const ops;
79724 snd_pcm_open_callback_t open;
79725 void *private_data; /* sb or wss */
79726 } streams[2];
79727 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
79728 index 733b014..56ce96f 100644
79729 --- a/sound/oss/sb_audio.c
79730 +++ b/sound/oss/sb_audio.c
79731 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
79732 buf16 = (signed short *)(localbuf + localoffs);
79733 while (c)
79734 {
79735 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79736 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79737 if (copy_from_user(lbuf8,
79738 userbuf+useroffs + p,
79739 locallen))
79740 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
79741 index 09d4648..cf234c7 100644
79742 --- a/sound/oss/swarm_cs4297a.c
79743 +++ b/sound/oss/swarm_cs4297a.c
79744 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
79745 {
79746 struct cs4297a_state *s;
79747 u32 pwr, id;
79748 - mm_segment_t fs;
79749 int rval;
79750 #ifndef CONFIG_BCM_CS4297A_CSWARM
79751 u64 cfg;
79752 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
79753 if (!rval) {
79754 char *sb1250_duart_present;
79755
79756 +#if 0
79757 + mm_segment_t fs;
79758 fs = get_fs();
79759 set_fs(KERNEL_DS);
79760 -#if 0
79761 val = SOUND_MASK_LINE;
79762 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
79763 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
79764 val = initvol[i].vol;
79765 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
79766 }
79767 + set_fs(fs);
79768 // cs4297a_write_ac97(s, 0x18, 0x0808);
79769 #else
79770 // cs4297a_write_ac97(s, 0x5e, 0x180);
79771 cs4297a_write_ac97(s, 0x02, 0x0808);
79772 cs4297a_write_ac97(s, 0x18, 0x0808);
79773 #endif
79774 - set_fs(fs);
79775
79776 list_add(&s->list, &cs4297a_devs);
79777
79778 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
79779 index f0f1943..8e1f96c 100644
79780 --- a/sound/pci/hda/hda_codec.h
79781 +++ b/sound/pci/hda/hda_codec.h
79782 @@ -611,7 +611,7 @@ struct hda_bus_ops {
79783 /* notify power-up/down from codec to controller */
79784 void (*pm_notify)(struct hda_bus *bus);
79785 #endif
79786 -};
79787 +} __no_const;
79788
79789 /* template to pass to the bus constructor */
79790 struct hda_bus_template {
79791 @@ -713,6 +713,7 @@ struct hda_codec_ops {
79792 #endif
79793 void (*reboot_notify)(struct hda_codec *codec);
79794 };
79795 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
79796
79797 /* record for amp information cache */
79798 struct hda_cache_head {
79799 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
79800 struct snd_pcm_substream *substream);
79801 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
79802 struct snd_pcm_substream *substream);
79803 -};
79804 +} __no_const;
79805
79806 /* PCM information for each substream */
79807 struct hda_pcm_stream {
79808 @@ -801,7 +802,7 @@ struct hda_codec {
79809 const char *modelname; /* model name for preset */
79810
79811 /* set by patch */
79812 - struct hda_codec_ops patch_ops;
79813 + hda_codec_ops_no_const patch_ops;
79814
79815 /* PCM to create, set by patch_ops.build_pcms callback */
79816 unsigned int num_pcms;
79817 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
79818 index 0da778a..bc38b84 100644
79819 --- a/sound/pci/ice1712/ice1712.h
79820 +++ b/sound/pci/ice1712/ice1712.h
79821 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
79822 unsigned int mask_flags; /* total mask bits */
79823 struct snd_akm4xxx_ops {
79824 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
79825 - } ops;
79826 + } __no_const ops;
79827 };
79828
79829 struct snd_ice1712_spdif {
79830 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
79831 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79832 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79833 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79834 - } ops;
79835 + } __no_const ops;
79836 };
79837
79838
79839 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
79840 index 12a9a2b..2b6138f 100644
79841 --- a/sound/pci/ymfpci/ymfpci_main.c
79842 +++ b/sound/pci/ymfpci/ymfpci_main.c
79843 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
79844 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
79845 break;
79846 }
79847 - if (atomic_read(&chip->interrupt_sleep_count)) {
79848 - atomic_set(&chip->interrupt_sleep_count, 0);
79849 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79850 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79851 wake_up(&chip->interrupt_sleep);
79852 }
79853 __end:
79854 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
79855 continue;
79856 init_waitqueue_entry(&wait, current);
79857 add_wait_queue(&chip->interrupt_sleep, &wait);
79858 - atomic_inc(&chip->interrupt_sleep_count);
79859 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
79860 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
79861 remove_wait_queue(&chip->interrupt_sleep, &wait);
79862 }
79863 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
79864 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
79865 spin_unlock(&chip->reg_lock);
79866
79867 - if (atomic_read(&chip->interrupt_sleep_count)) {
79868 - atomic_set(&chip->interrupt_sleep_count, 0);
79869 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79870 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79871 wake_up(&chip->interrupt_sleep);
79872 }
79873 }
79874 @@ -2389,7 +2389,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
79875 spin_lock_init(&chip->reg_lock);
79876 spin_lock_init(&chip->voice_lock);
79877 init_waitqueue_head(&chip->interrupt_sleep);
79878 - atomic_set(&chip->interrupt_sleep_count, 0);
79879 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79880 chip->card = card;
79881 chip->pci = pci;
79882 chip->irq = -1;
79883 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
79884 index cdc860a..db34a93 100644
79885 --- a/sound/soc/soc-pcm.c
79886 +++ b/sound/soc/soc-pcm.c
79887 @@ -605,7 +605,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
79888 struct snd_soc_platform *platform = rtd->platform;
79889 struct snd_soc_dai *codec_dai = rtd->codec_dai;
79890 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
79891 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
79892 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
79893 struct snd_pcm *pcm;
79894 char new_name[64];
79895 int ret = 0, playback = 0, capture = 0;
79896 diff --git a/sound/usb/card.h b/sound/usb/card.h
79897 index da5fa1a..113cd02 100644
79898 --- a/sound/usb/card.h
79899 +++ b/sound/usb/card.h
79900 @@ -45,6 +45,7 @@ struct snd_urb_ops {
79901 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79902 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79903 };
79904 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
79905
79906 struct snd_usb_substream {
79907 struct snd_usb_stream *stream;
79908 @@ -94,7 +95,7 @@ struct snd_usb_substream {
79909 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
79910 spinlock_t lock;
79911
79912 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
79913 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
79914 int last_frame_number; /* stored frame number */
79915 int last_delay; /* stored delay */
79916 };
79917 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
79918 new file mode 100644
79919 index 0000000..ca64170
79920 --- /dev/null
79921 +++ b/tools/gcc/Makefile
79922 @@ -0,0 +1,26 @@
79923 +#CC := gcc
79924 +#PLUGIN_SOURCE_FILES := pax_plugin.c
79925 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
79926 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
79927 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
79928 +
79929 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
79930 +CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer
79931 +
79932 +hostlibs-y := constify_plugin.so
79933 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
79934 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
79935 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
79936 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
79937 +hostlibs-y += colorize_plugin.so
79938 +hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
79939 +
79940 +always := $(hostlibs-y)
79941 +
79942 +constify_plugin-objs := constify_plugin.o
79943 +stackleak_plugin-objs := stackleak_plugin.o
79944 +kallocstat_plugin-objs := kallocstat_plugin.o
79945 +kernexec_plugin-objs := kernexec_plugin.o
79946 +checker_plugin-objs := checker_plugin.o
79947 +colorize_plugin-objs := colorize_plugin.o
79948 +size_overflow_plugin-objs := size_overflow_plugin.o
79949 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
79950 new file mode 100644
79951 index 0000000..d41b5af
79952 --- /dev/null
79953 +++ b/tools/gcc/checker_plugin.c
79954 @@ -0,0 +1,171 @@
79955 +/*
79956 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79957 + * Licensed under the GPL v2
79958 + *
79959 + * Note: the choice of the license means that the compilation process is
79960 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79961 + * but for the kernel it doesn't matter since it doesn't link against
79962 + * any of the gcc libraries
79963 + *
79964 + * gcc plugin to implement various sparse (source code checker) features
79965 + *
79966 + * TODO:
79967 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
79968 + *
79969 + * BUGS:
79970 + * - none known
79971 + */
79972 +#include "gcc-plugin.h"
79973 +#include "config.h"
79974 +#include "system.h"
79975 +#include "coretypes.h"
79976 +#include "tree.h"
79977 +#include "tree-pass.h"
79978 +#include "flags.h"
79979 +#include "intl.h"
79980 +#include "toplev.h"
79981 +#include "plugin.h"
79982 +//#include "expr.h" where are you...
79983 +#include "diagnostic.h"
79984 +#include "plugin-version.h"
79985 +#include "tm.h"
79986 +#include "function.h"
79987 +#include "basic-block.h"
79988 +#include "gimple.h"
79989 +#include "rtl.h"
79990 +#include "emit-rtl.h"
79991 +#include "tree-flow.h"
79992 +#include "target.h"
79993 +
79994 +extern void c_register_addr_space (const char *str, addr_space_t as);
79995 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
79996 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
79997 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
79998 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
79999 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
80000 +
80001 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80002 +extern rtx emit_move_insn(rtx x, rtx y);
80003 +
80004 +int plugin_is_GPL_compatible;
80005 +
80006 +static struct plugin_info checker_plugin_info = {
80007 + .version = "201111150100",
80008 +};
80009 +
80010 +#define ADDR_SPACE_KERNEL 0
80011 +#define ADDR_SPACE_FORCE_KERNEL 1
80012 +#define ADDR_SPACE_USER 2
80013 +#define ADDR_SPACE_FORCE_USER 3
80014 +#define ADDR_SPACE_IOMEM 0
80015 +#define ADDR_SPACE_FORCE_IOMEM 0
80016 +#define ADDR_SPACE_PERCPU 0
80017 +#define ADDR_SPACE_FORCE_PERCPU 0
80018 +#define ADDR_SPACE_RCU 0
80019 +#define ADDR_SPACE_FORCE_RCU 0
80020 +
80021 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
80022 +{
80023 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
80024 +}
80025 +
80026 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
80027 +{
80028 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
80029 +}
80030 +
80031 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
80032 +{
80033 + return default_addr_space_valid_pointer_mode(mode, as);
80034 +}
80035 +
80036 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
80037 +{
80038 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
80039 +}
80040 +
80041 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
80042 +{
80043 + return default_addr_space_legitimize_address(x, oldx, mode, as);
80044 +}
80045 +
80046 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
80047 +{
80048 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
80049 + return true;
80050 +
80051 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
80052 + return true;
80053 +
80054 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
80055 + return true;
80056 +
80057 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
80058 + return true;
80059 +
80060 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
80061 + return true;
80062 +
80063 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
80064 + return true;
80065 +
80066 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
80067 + return true;
80068 +
80069 + return subset == superset;
80070 +}
80071 +
80072 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
80073 +{
80074 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
80075 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
80076 +
80077 + return op;
80078 +}
80079 +
80080 +static void register_checker_address_spaces(void *event_data, void *data)
80081 +{
80082 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
80083 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
80084 + c_register_addr_space("__user", ADDR_SPACE_USER);
80085 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
80086 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
80087 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
80088 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
80089 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
80090 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
80091 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
80092 +
80093 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
80094 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
80095 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
80096 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
80097 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
80098 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
80099 + targetm.addr_space.convert = checker_addr_space_convert;
80100 +}
80101 +
80102 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80103 +{
80104 + const char * const plugin_name = plugin_info->base_name;
80105 + const int argc = plugin_info->argc;
80106 + const struct plugin_argument * const argv = plugin_info->argv;
80107 + int i;
80108 +
80109 + if (!plugin_default_version_check(version, &gcc_version)) {
80110 + error(G_("incompatible gcc/plugin versions"));
80111 + return 1;
80112 + }
80113 +
80114 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
80115 +
80116 + for (i = 0; i < argc; ++i)
80117 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80118 +
80119 + if (TARGET_64BIT == 0)
80120 + return 0;
80121 +
80122 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
80123 +
80124 + return 0;
80125 +}
80126 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
80127 new file mode 100644
80128 index 0000000..ee950d0
80129 --- /dev/null
80130 +++ b/tools/gcc/colorize_plugin.c
80131 @@ -0,0 +1,147 @@
80132 +/*
80133 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
80134 + * Licensed under the GPL v2
80135 + *
80136 + * Note: the choice of the license means that the compilation process is
80137 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80138 + * but for the kernel it doesn't matter since it doesn't link against
80139 + * any of the gcc libraries
80140 + *
80141 + * gcc plugin to colorize diagnostic output
80142 + *
80143 + */
80144 +
80145 +#include "gcc-plugin.h"
80146 +#include "config.h"
80147 +#include "system.h"
80148 +#include "coretypes.h"
80149 +#include "tree.h"
80150 +#include "tree-pass.h"
80151 +#include "flags.h"
80152 +#include "intl.h"
80153 +#include "toplev.h"
80154 +#include "plugin.h"
80155 +#include "diagnostic.h"
80156 +#include "plugin-version.h"
80157 +#include "tm.h"
80158 +
80159 +int plugin_is_GPL_compatible;
80160 +
80161 +static struct plugin_info colorize_plugin_info = {
80162 + .version = "201203092200",
80163 +};
80164 +
80165 +#define GREEN "\033[32m\033[2m"
80166 +#define LIGHTGREEN "\033[32m\033[1m"
80167 +#define YELLOW "\033[33m\033[2m"
80168 +#define LIGHTYELLOW "\033[33m\033[1m"
80169 +#define RED "\033[31m\033[2m"
80170 +#define LIGHTRED "\033[31m\033[1m"
80171 +#define BLUE "\033[34m\033[2m"
80172 +#define LIGHTBLUE "\033[34m\033[1m"
80173 +#define BRIGHT "\033[m\033[1m"
80174 +#define NORMAL "\033[m"
80175 +
80176 +static diagnostic_starter_fn old_starter;
80177 +static diagnostic_finalizer_fn old_finalizer;
80178 +
80179 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80180 +{
80181 + const char *color;
80182 + char *newprefix;
80183 +
80184 + switch (diagnostic->kind) {
80185 + case DK_NOTE:
80186 + color = LIGHTBLUE;
80187 + break;
80188 +
80189 + case DK_PEDWARN:
80190 + case DK_WARNING:
80191 + color = LIGHTYELLOW;
80192 + break;
80193 +
80194 + case DK_ERROR:
80195 + case DK_FATAL:
80196 + case DK_ICE:
80197 + case DK_PERMERROR:
80198 + case DK_SORRY:
80199 + color = LIGHTRED;
80200 + break;
80201 +
80202 + default:
80203 + color = NORMAL;
80204 + }
80205 +
80206 + old_starter(context, diagnostic);
80207 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
80208 + return;
80209 + pp_destroy_prefix(context->printer);
80210 + pp_set_prefix(context->printer, newprefix);
80211 +}
80212 +
80213 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80214 +{
80215 + old_finalizer(context, diagnostic);
80216 +}
80217 +
80218 +static void colorize_arm(void)
80219 +{
80220 + old_starter = diagnostic_starter(global_dc);
80221 + old_finalizer = diagnostic_finalizer(global_dc);
80222 +
80223 + diagnostic_starter(global_dc) = start_colorize;
80224 + diagnostic_finalizer(global_dc) = finalize_colorize;
80225 +}
80226 +
80227 +static unsigned int execute_colorize_rearm(void)
80228 +{
80229 + if (diagnostic_starter(global_dc) == start_colorize)
80230 + return 0;
80231 +
80232 + colorize_arm();
80233 + return 0;
80234 +}
80235 +
80236 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
80237 + .pass = {
80238 + .type = SIMPLE_IPA_PASS,
80239 + .name = "colorize_rearm",
80240 + .gate = NULL,
80241 + .execute = execute_colorize_rearm,
80242 + .sub = NULL,
80243 + .next = NULL,
80244 + .static_pass_number = 0,
80245 + .tv_id = TV_NONE,
80246 + .properties_required = 0,
80247 + .properties_provided = 0,
80248 + .properties_destroyed = 0,
80249 + .todo_flags_start = 0,
80250 + .todo_flags_finish = 0
80251 + }
80252 +};
80253 +
80254 +static void colorize_start_unit(void *gcc_data, void *user_data)
80255 +{
80256 + colorize_arm();
80257 +}
80258 +
80259 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80260 +{
80261 + const char * const plugin_name = plugin_info->base_name;
80262 + struct register_pass_info colorize_rearm_pass_info = {
80263 + .pass = &pass_ipa_colorize_rearm.pass,
80264 + .reference_pass_name = "*free_lang_data",
80265 + .ref_pass_instance_number = 0,
80266 + .pos_op = PASS_POS_INSERT_AFTER
80267 + };
80268 +
80269 + if (!plugin_default_version_check(version, &gcc_version)) {
80270 + error(G_("incompatible gcc/plugin versions"));
80271 + return 1;
80272 + }
80273 +
80274 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
80275 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
80276 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
80277 + return 0;
80278 +}
80279 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
80280 new file mode 100644
80281 index 0000000..88a7438
80282 --- /dev/null
80283 +++ b/tools/gcc/constify_plugin.c
80284 @@ -0,0 +1,303 @@
80285 +/*
80286 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
80287 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
80288 + * Licensed under the GPL v2, or (at your option) v3
80289 + *
80290 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
80291 + *
80292 + * Homepage:
80293 + * http://www.grsecurity.net/~ephox/const_plugin/
80294 + *
80295 + * Usage:
80296 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
80297 + * $ gcc -fplugin=constify_plugin.so test.c -O2
80298 + */
80299 +
80300 +#include "gcc-plugin.h"
80301 +#include "config.h"
80302 +#include "system.h"
80303 +#include "coretypes.h"
80304 +#include "tree.h"
80305 +#include "tree-pass.h"
80306 +#include "flags.h"
80307 +#include "intl.h"
80308 +#include "toplev.h"
80309 +#include "plugin.h"
80310 +#include "diagnostic.h"
80311 +#include "plugin-version.h"
80312 +#include "tm.h"
80313 +#include "function.h"
80314 +#include "basic-block.h"
80315 +#include "gimple.h"
80316 +#include "rtl.h"
80317 +#include "emit-rtl.h"
80318 +#include "tree-flow.h"
80319 +
80320 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
80321 +
80322 +int plugin_is_GPL_compatible;
80323 +
80324 +static struct plugin_info const_plugin_info = {
80325 + .version = "201111150100",
80326 + .help = "no-constify\tturn off constification\n",
80327 +};
80328 +
80329 +static void constify_type(tree type);
80330 +static bool walk_struct(tree node);
80331 +
80332 +static tree deconstify_type(tree old_type)
80333 +{
80334 + tree new_type, field;
80335 +
80336 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
80337 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
80338 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
80339 + DECL_FIELD_CONTEXT(field) = new_type;
80340 + TYPE_READONLY(new_type) = 0;
80341 + C_TYPE_FIELDS_READONLY(new_type) = 0;
80342 + return new_type;
80343 +}
80344 +
80345 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80346 +{
80347 + tree type;
80348 +
80349 + *no_add_attrs = true;
80350 + if (TREE_CODE(*node) == FUNCTION_DECL) {
80351 + error("%qE attribute does not apply to functions", name);
80352 + return NULL_TREE;
80353 + }
80354 +
80355 + if (TREE_CODE(*node) == VAR_DECL) {
80356 + error("%qE attribute does not apply to variables", name);
80357 + return NULL_TREE;
80358 + }
80359 +
80360 + if (TYPE_P(*node)) {
80361 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
80362 + *no_add_attrs = false;
80363 + else
80364 + error("%qE attribute applies to struct and union types only", name);
80365 + return NULL_TREE;
80366 + }
80367 +
80368 + type = TREE_TYPE(*node);
80369 +
80370 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
80371 + error("%qE attribute applies to struct and union types only", name);
80372 + return NULL_TREE;
80373 + }
80374 +
80375 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
80376 + error("%qE attribute is already applied to the type", name);
80377 + return NULL_TREE;
80378 + }
80379 +
80380 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
80381 + error("%qE attribute used on type that is not constified", name);
80382 + return NULL_TREE;
80383 + }
80384 +
80385 + if (TREE_CODE(*node) == TYPE_DECL) {
80386 + TREE_TYPE(*node) = deconstify_type(type);
80387 + TREE_READONLY(*node) = 0;
80388 + return NULL_TREE;
80389 + }
80390 +
80391 + return NULL_TREE;
80392 +}
80393 +
80394 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80395 +{
80396 + *no_add_attrs = true;
80397 + if (!TYPE_P(*node)) {
80398 + error("%qE attribute applies to types only", name);
80399 + return NULL_TREE;
80400 + }
80401 +
80402 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
80403 + error("%qE attribute applies to struct and union types only", name);
80404 + return NULL_TREE;
80405 + }
80406 +
80407 + *no_add_attrs = false;
80408 + constify_type(*node);
80409 + return NULL_TREE;
80410 +}
80411 +
80412 +static struct attribute_spec no_const_attr = {
80413 + .name = "no_const",
80414 + .min_length = 0,
80415 + .max_length = 0,
80416 + .decl_required = false,
80417 + .type_required = false,
80418 + .function_type_required = false,
80419 + .handler = handle_no_const_attribute,
80420 +#if BUILDING_GCC_VERSION >= 4007
80421 + .affects_type_identity = true
80422 +#endif
80423 +};
80424 +
80425 +static struct attribute_spec do_const_attr = {
80426 + .name = "do_const",
80427 + .min_length = 0,
80428 + .max_length = 0,
80429 + .decl_required = false,
80430 + .type_required = false,
80431 + .function_type_required = false,
80432 + .handler = handle_do_const_attribute,
80433 +#if BUILDING_GCC_VERSION >= 4007
80434 + .affects_type_identity = true
80435 +#endif
80436 +};
80437 +
80438 +static void register_attributes(void *event_data, void *data)
80439 +{
80440 + register_attribute(&no_const_attr);
80441 + register_attribute(&do_const_attr);
80442 +}
80443 +
80444 +static void constify_type(tree type)
80445 +{
80446 + TYPE_READONLY(type) = 1;
80447 + C_TYPE_FIELDS_READONLY(type) = 1;
80448 +}
80449 +
80450 +static bool is_fptr(tree field)
80451 +{
80452 + tree ptr = TREE_TYPE(field);
80453 +
80454 + if (TREE_CODE(ptr) != POINTER_TYPE)
80455 + return false;
80456 +
80457 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
80458 +}
80459 +
80460 +static bool walk_struct(tree node)
80461 +{
80462 + tree field;
80463 +
80464 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
80465 + return false;
80466 +
80467 + if (TYPE_FIELDS(node) == NULL_TREE)
80468 + return false;
80469 +
80470 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
80471 + tree type = TREE_TYPE(field);
80472 + enum tree_code code = TREE_CODE(type);
80473 + if (code == RECORD_TYPE || code == UNION_TYPE) {
80474 + if (!(walk_struct(type)))
80475 + return false;
80476 + } else if (!is_fptr(field) && !TREE_READONLY(field))
80477 + return false;
80478 + }
80479 + return true;
80480 +}
80481 +
80482 +static void finish_type(void *event_data, void *data)
80483 +{
80484 + tree type = (tree)event_data;
80485 +
80486 + if (type == NULL_TREE)
80487 + return;
80488 +
80489 + if (TYPE_READONLY(type))
80490 + return;
80491 +
80492 + if (walk_struct(type))
80493 + constify_type(type);
80494 +}
80495 +
80496 +static unsigned int check_local_variables(void);
80497 +
80498 +struct gimple_opt_pass pass_local_variable = {
80499 + {
80500 + .type = GIMPLE_PASS,
80501 + .name = "check_local_variables",
80502 + .gate = NULL,
80503 + .execute = check_local_variables,
80504 + .sub = NULL,
80505 + .next = NULL,
80506 + .static_pass_number = 0,
80507 + .tv_id = TV_NONE,
80508 + .properties_required = 0,
80509 + .properties_provided = 0,
80510 + .properties_destroyed = 0,
80511 + .todo_flags_start = 0,
80512 + .todo_flags_finish = 0
80513 + }
80514 +};
80515 +
80516 +static unsigned int check_local_variables(void)
80517 +{
80518 + tree var;
80519 + referenced_var_iterator rvi;
80520 +
80521 +#if BUILDING_GCC_VERSION == 4005
80522 + FOR_EACH_REFERENCED_VAR(var, rvi) {
80523 +#else
80524 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
80525 +#endif
80526 + tree type = TREE_TYPE(var);
80527 +
80528 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
80529 + continue;
80530 +
80531 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
80532 + continue;
80533 +
80534 + if (!TYPE_READONLY(type))
80535 + continue;
80536 +
80537 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
80538 +// continue;
80539 +
80540 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
80541 +// continue;
80542 +
80543 + if (walk_struct(type)) {
80544 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
80545 + return 1;
80546 + }
80547 + }
80548 + return 0;
80549 +}
80550 +
80551 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80552 +{
80553 + const char * const plugin_name = plugin_info->base_name;
80554 + const int argc = plugin_info->argc;
80555 + const struct plugin_argument * const argv = plugin_info->argv;
80556 + int i;
80557 + bool constify = true;
80558 +
80559 + struct register_pass_info local_variable_pass_info = {
80560 + .pass = &pass_local_variable.pass,
80561 + .reference_pass_name = "*referenced_vars",
80562 + .ref_pass_instance_number = 0,
80563 + .pos_op = PASS_POS_INSERT_AFTER
80564 + };
80565 +
80566 + if (!plugin_default_version_check(version, &gcc_version)) {
80567 + error(G_("incompatible gcc/plugin versions"));
80568 + return 1;
80569 + }
80570 +
80571 + for (i = 0; i < argc; ++i) {
80572 + if (!(strcmp(argv[i].key, "no-constify"))) {
80573 + constify = false;
80574 + continue;
80575 + }
80576 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80577 + }
80578 +
80579 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
80580 + if (constify) {
80581 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
80582 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
80583 + }
80584 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
80585 +
80586 + return 0;
80587 +}
80588 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
80589 new file mode 100644
80590 index 0000000..a5eabce
80591 --- /dev/null
80592 +++ b/tools/gcc/kallocstat_plugin.c
80593 @@ -0,0 +1,167 @@
80594 +/*
80595 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80596 + * Licensed under the GPL v2
80597 + *
80598 + * Note: the choice of the license means that the compilation process is
80599 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80600 + * but for the kernel it doesn't matter since it doesn't link against
80601 + * any of the gcc libraries
80602 + *
80603 + * gcc plugin to find the distribution of k*alloc sizes
80604 + *
80605 + * TODO:
80606 + *
80607 + * BUGS:
80608 + * - none known
80609 + */
80610 +#include "gcc-plugin.h"
80611 +#include "config.h"
80612 +#include "system.h"
80613 +#include "coretypes.h"
80614 +#include "tree.h"
80615 +#include "tree-pass.h"
80616 +#include "flags.h"
80617 +#include "intl.h"
80618 +#include "toplev.h"
80619 +#include "plugin.h"
80620 +//#include "expr.h" where are you...
80621 +#include "diagnostic.h"
80622 +#include "plugin-version.h"
80623 +#include "tm.h"
80624 +#include "function.h"
80625 +#include "basic-block.h"
80626 +#include "gimple.h"
80627 +#include "rtl.h"
80628 +#include "emit-rtl.h"
80629 +
80630 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80631 +
80632 +int plugin_is_GPL_compatible;
80633 +
80634 +static const char * const kalloc_functions[] = {
80635 + "__kmalloc",
80636 + "kmalloc",
80637 + "kmalloc_large",
80638 + "kmalloc_node",
80639 + "kmalloc_order",
80640 + "kmalloc_order_trace",
80641 + "kmalloc_slab",
80642 + "kzalloc",
80643 + "kzalloc_node",
80644 +};
80645 +
80646 +static struct plugin_info kallocstat_plugin_info = {
80647 + .version = "201111150100",
80648 +};
80649 +
80650 +static unsigned int execute_kallocstat(void);
80651 +
80652 +static struct gimple_opt_pass kallocstat_pass = {
80653 + .pass = {
80654 + .type = GIMPLE_PASS,
80655 + .name = "kallocstat",
80656 + .gate = NULL,
80657 + .execute = execute_kallocstat,
80658 + .sub = NULL,
80659 + .next = NULL,
80660 + .static_pass_number = 0,
80661 + .tv_id = TV_NONE,
80662 + .properties_required = 0,
80663 + .properties_provided = 0,
80664 + .properties_destroyed = 0,
80665 + .todo_flags_start = 0,
80666 + .todo_flags_finish = 0
80667 + }
80668 +};
80669 +
80670 +static bool is_kalloc(const char *fnname)
80671 +{
80672 + size_t i;
80673 +
80674 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
80675 + if (!strcmp(fnname, kalloc_functions[i]))
80676 + return true;
80677 + return false;
80678 +}
80679 +
80680 +static unsigned int execute_kallocstat(void)
80681 +{
80682 + basic_block bb;
80683 +
80684 + // 1. loop through BBs and GIMPLE statements
80685 + FOR_EACH_BB(bb) {
80686 + gimple_stmt_iterator gsi;
80687 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80688 + // gimple match:
80689 + tree fndecl, size;
80690 + gimple call_stmt;
80691 + const char *fnname;
80692 +
80693 + // is it a call
80694 + call_stmt = gsi_stmt(gsi);
80695 + if (!is_gimple_call(call_stmt))
80696 + continue;
80697 + fndecl = gimple_call_fndecl(call_stmt);
80698 + if (fndecl == NULL_TREE)
80699 + continue;
80700 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
80701 + continue;
80702 +
80703 + // is it a call to k*alloc
80704 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
80705 + if (!is_kalloc(fnname))
80706 + continue;
80707 +
80708 + // is the size arg the result of a simple const assignment
80709 + size = gimple_call_arg(call_stmt, 0);
80710 + while (true) {
80711 + gimple def_stmt;
80712 + expanded_location xloc;
80713 + size_t size_val;
80714 +
80715 + if (TREE_CODE(size) != SSA_NAME)
80716 + break;
80717 + def_stmt = SSA_NAME_DEF_STMT(size);
80718 + if (!def_stmt || !is_gimple_assign(def_stmt))
80719 + break;
80720 + if (gimple_num_ops(def_stmt) != 2)
80721 + break;
80722 + size = gimple_assign_rhs1(def_stmt);
80723 + if (!TREE_CONSTANT(size))
80724 + continue;
80725 + xloc = expand_location(gimple_location(def_stmt));
80726 + if (!xloc.file)
80727 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
80728 + size_val = TREE_INT_CST_LOW(size);
80729 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
80730 + break;
80731 + }
80732 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80733 +//debug_tree(gimple_call_fn(call_stmt));
80734 +//print_node(stderr, "pax", fndecl, 4);
80735 + }
80736 + }
80737 +
80738 + return 0;
80739 +}
80740 +
80741 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80742 +{
80743 + const char * const plugin_name = plugin_info->base_name;
80744 + struct register_pass_info kallocstat_pass_info = {
80745 + .pass = &kallocstat_pass.pass,
80746 + .reference_pass_name = "ssa",
80747 + .ref_pass_instance_number = 0,
80748 + .pos_op = PASS_POS_INSERT_AFTER
80749 + };
80750 +
80751 + if (!plugin_default_version_check(version, &gcc_version)) {
80752 + error(G_("incompatible gcc/plugin versions"));
80753 + return 1;
80754 + }
80755 +
80756 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
80757 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
80758 +
80759 + return 0;
80760 +}
80761 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
80762 new file mode 100644
80763 index 0000000..d8a8da2
80764 --- /dev/null
80765 +++ b/tools/gcc/kernexec_plugin.c
80766 @@ -0,0 +1,427 @@
80767 +/*
80768 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80769 + * Licensed under the GPL v2
80770 + *
80771 + * Note: the choice of the license means that the compilation process is
80772 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80773 + * but for the kernel it doesn't matter since it doesn't link against
80774 + * any of the gcc libraries
80775 + *
80776 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
80777 + *
80778 + * TODO:
80779 + *
80780 + * BUGS:
80781 + * - none known
80782 + */
80783 +#include "gcc-plugin.h"
80784 +#include "config.h"
80785 +#include "system.h"
80786 +#include "coretypes.h"
80787 +#include "tree.h"
80788 +#include "tree-pass.h"
80789 +#include "flags.h"
80790 +#include "intl.h"
80791 +#include "toplev.h"
80792 +#include "plugin.h"
80793 +//#include "expr.h" where are you...
80794 +#include "diagnostic.h"
80795 +#include "plugin-version.h"
80796 +#include "tm.h"
80797 +#include "function.h"
80798 +#include "basic-block.h"
80799 +#include "gimple.h"
80800 +#include "rtl.h"
80801 +#include "emit-rtl.h"
80802 +#include "tree-flow.h"
80803 +
80804 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80805 +extern rtx emit_move_insn(rtx x, rtx y);
80806 +
80807 +int plugin_is_GPL_compatible;
80808 +
80809 +static struct plugin_info kernexec_plugin_info = {
80810 + .version = "201111291120",
80811 + .help = "method=[bts|or]\tinstrumentation method\n"
80812 +};
80813 +
80814 +static unsigned int execute_kernexec_reload(void);
80815 +static unsigned int execute_kernexec_fptr(void);
80816 +static unsigned int execute_kernexec_retaddr(void);
80817 +static bool kernexec_cmodel_check(void);
80818 +
80819 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
80820 +static void (*kernexec_instrument_retaddr)(rtx);
80821 +
80822 +static struct gimple_opt_pass kernexec_reload_pass = {
80823 + .pass = {
80824 + .type = GIMPLE_PASS,
80825 + .name = "kernexec_reload",
80826 + .gate = kernexec_cmodel_check,
80827 + .execute = execute_kernexec_reload,
80828 + .sub = NULL,
80829 + .next = NULL,
80830 + .static_pass_number = 0,
80831 + .tv_id = TV_NONE,
80832 + .properties_required = 0,
80833 + .properties_provided = 0,
80834 + .properties_destroyed = 0,
80835 + .todo_flags_start = 0,
80836 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80837 + }
80838 +};
80839 +
80840 +static struct gimple_opt_pass kernexec_fptr_pass = {
80841 + .pass = {
80842 + .type = GIMPLE_PASS,
80843 + .name = "kernexec_fptr",
80844 + .gate = kernexec_cmodel_check,
80845 + .execute = execute_kernexec_fptr,
80846 + .sub = NULL,
80847 + .next = NULL,
80848 + .static_pass_number = 0,
80849 + .tv_id = TV_NONE,
80850 + .properties_required = 0,
80851 + .properties_provided = 0,
80852 + .properties_destroyed = 0,
80853 + .todo_flags_start = 0,
80854 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80855 + }
80856 +};
80857 +
80858 +static struct rtl_opt_pass kernexec_retaddr_pass = {
80859 + .pass = {
80860 + .type = RTL_PASS,
80861 + .name = "kernexec_retaddr",
80862 + .gate = kernexec_cmodel_check,
80863 + .execute = execute_kernexec_retaddr,
80864 + .sub = NULL,
80865 + .next = NULL,
80866 + .static_pass_number = 0,
80867 + .tv_id = TV_NONE,
80868 + .properties_required = 0,
80869 + .properties_provided = 0,
80870 + .properties_destroyed = 0,
80871 + .todo_flags_start = 0,
80872 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
80873 + }
80874 +};
80875 +
80876 +static bool kernexec_cmodel_check(void)
80877 +{
80878 + tree section;
80879 +
80880 + if (ix86_cmodel != CM_KERNEL)
80881 + return false;
80882 +
80883 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
80884 + if (!section || !TREE_VALUE(section))
80885 + return true;
80886 +
80887 + section = TREE_VALUE(TREE_VALUE(section));
80888 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
80889 + return true;
80890 +
80891 + return false;
80892 +}
80893 +
80894 +/*
80895 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
80896 + */
80897 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
80898 +{
80899 + gimple asm_movabs_stmt;
80900 +
80901 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
80902 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
80903 + gimple_asm_set_volatile(asm_movabs_stmt, true);
80904 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
80905 + update_stmt(asm_movabs_stmt);
80906 +}
80907 +
80908 +/*
80909 + * find all asm() stmts that clobber r10 and add a reload of r10
80910 + */
80911 +static unsigned int execute_kernexec_reload(void)
80912 +{
80913 + basic_block bb;
80914 +
80915 + // 1. loop through BBs and GIMPLE statements
80916 + FOR_EACH_BB(bb) {
80917 + gimple_stmt_iterator gsi;
80918 +
80919 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80920 + // gimple match: __asm__ ("" : : : "r10");
80921 + gimple asm_stmt;
80922 + size_t nclobbers;
80923 +
80924 + // is it an asm ...
80925 + asm_stmt = gsi_stmt(gsi);
80926 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
80927 + continue;
80928 +
80929 + // ... clobbering r10
80930 + nclobbers = gimple_asm_nclobbers(asm_stmt);
80931 + while (nclobbers--) {
80932 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
80933 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
80934 + continue;
80935 + kernexec_reload_fptr_mask(&gsi);
80936 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
80937 + break;
80938 + }
80939 + }
80940 + }
80941 +
80942 + return 0;
80943 +}
80944 +
80945 +/*
80946 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
80947 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
80948 + */
80949 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
80950 +{
80951 + gimple assign_intptr, assign_new_fptr, call_stmt;
80952 + tree intptr, old_fptr, new_fptr, kernexec_mask;
80953 +
80954 + call_stmt = gsi_stmt(*gsi);
80955 + old_fptr = gimple_call_fn(call_stmt);
80956 +
80957 + // create temporary unsigned long variable used for bitops and cast fptr to it
80958 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
80959 + add_referenced_var(intptr);
80960 + mark_sym_for_renaming(intptr);
80961 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
80962 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80963 + update_stmt(assign_intptr);
80964 +
80965 + // apply logical or to temporary unsigned long and bitmask
80966 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
80967 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
80968 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
80969 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80970 + update_stmt(assign_intptr);
80971 +
80972 + // cast temporary unsigned long back to a temporary fptr variable
80973 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
80974 + add_referenced_var(new_fptr);
80975 + mark_sym_for_renaming(new_fptr);
80976 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
80977 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
80978 + update_stmt(assign_new_fptr);
80979 +
80980 + // replace call stmt fn with the new fptr
80981 + gimple_call_set_fn(call_stmt, new_fptr);
80982 + update_stmt(call_stmt);
80983 +}
80984 +
80985 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
80986 +{
80987 + gimple asm_or_stmt, call_stmt;
80988 + tree old_fptr, new_fptr, input, output;
80989 + VEC(tree, gc) *inputs = NULL;
80990 + VEC(tree, gc) *outputs = NULL;
80991 +
80992 + call_stmt = gsi_stmt(*gsi);
80993 + old_fptr = gimple_call_fn(call_stmt);
80994 +
80995 + // create temporary fptr variable
80996 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
80997 + add_referenced_var(new_fptr);
80998 + mark_sym_for_renaming(new_fptr);
80999 +
81000 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
81001 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
81002 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
81003 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
81004 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
81005 + VEC_safe_push(tree, gc, inputs, input);
81006 + VEC_safe_push(tree, gc, outputs, output);
81007 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
81008 + gimple_asm_set_volatile(asm_or_stmt, true);
81009 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
81010 + update_stmt(asm_or_stmt);
81011 +
81012 + // replace call stmt fn with the new fptr
81013 + gimple_call_set_fn(call_stmt, new_fptr);
81014 + update_stmt(call_stmt);
81015 +}
81016 +
81017 +/*
81018 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
81019 + */
81020 +static unsigned int execute_kernexec_fptr(void)
81021 +{
81022 + basic_block bb;
81023 +
81024 + // 1. loop through BBs and GIMPLE statements
81025 + FOR_EACH_BB(bb) {
81026 + gimple_stmt_iterator gsi;
81027 +
81028 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81029 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
81030 + tree fn;
81031 + gimple call_stmt;
81032 +
81033 + // is it a call ...
81034 + call_stmt = gsi_stmt(gsi);
81035 + if (!is_gimple_call(call_stmt))
81036 + continue;
81037 + fn = gimple_call_fn(call_stmt);
81038 + if (TREE_CODE(fn) == ADDR_EXPR)
81039 + continue;
81040 + if (TREE_CODE(fn) != SSA_NAME)
81041 + gcc_unreachable();
81042 +
81043 + // ... through a function pointer
81044 + fn = SSA_NAME_VAR(fn);
81045 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
81046 + continue;
81047 + fn = TREE_TYPE(fn);
81048 + if (TREE_CODE(fn) != POINTER_TYPE)
81049 + continue;
81050 + fn = TREE_TYPE(fn);
81051 + if (TREE_CODE(fn) != FUNCTION_TYPE)
81052 + continue;
81053 +
81054 + kernexec_instrument_fptr(&gsi);
81055 +
81056 +//debug_tree(gimple_call_fn(call_stmt));
81057 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81058 + }
81059 + }
81060 +
81061 + return 0;
81062 +}
81063 +
81064 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
81065 +static void kernexec_instrument_retaddr_bts(rtx insn)
81066 +{
81067 + rtx btsq;
81068 + rtvec argvec, constraintvec, labelvec;
81069 + int line;
81070 +
81071 + // create asm volatile("btsq $63,(%%rsp)":::)
81072 + argvec = rtvec_alloc(0);
81073 + constraintvec = rtvec_alloc(0);
81074 + labelvec = rtvec_alloc(0);
81075 + line = expand_location(RTL_LOCATION(insn)).line;
81076 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81077 + MEM_VOLATILE_P(btsq) = 1;
81078 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
81079 + emit_insn_before(btsq, insn);
81080 +}
81081 +
81082 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
81083 +static void kernexec_instrument_retaddr_or(rtx insn)
81084 +{
81085 + rtx orq;
81086 + rtvec argvec, constraintvec, labelvec;
81087 + int line;
81088 +
81089 + // create asm volatile("orq %%r10,(%%rsp)":::)
81090 + argvec = rtvec_alloc(0);
81091 + constraintvec = rtvec_alloc(0);
81092 + labelvec = rtvec_alloc(0);
81093 + line = expand_location(RTL_LOCATION(insn)).line;
81094 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81095 + MEM_VOLATILE_P(orq) = 1;
81096 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
81097 + emit_insn_before(orq, insn);
81098 +}
81099 +
81100 +/*
81101 + * find all asm level function returns and forcibly set the highest bit of the return address
81102 + */
81103 +static unsigned int execute_kernexec_retaddr(void)
81104 +{
81105 + rtx insn;
81106 +
81107 + // 1. find function returns
81108 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
81109 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
81110 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
81111 + rtx body;
81112 +
81113 + // is it a retn
81114 + if (!JUMP_P(insn))
81115 + continue;
81116 + body = PATTERN(insn);
81117 + if (GET_CODE(body) == PARALLEL)
81118 + body = XVECEXP(body, 0, 0);
81119 + if (GET_CODE(body) != RETURN)
81120 + continue;
81121 + kernexec_instrument_retaddr(insn);
81122 + }
81123 +
81124 +// print_simple_rtl(stderr, get_insns());
81125 +// print_rtl(stderr, get_insns());
81126 +
81127 + return 0;
81128 +}
81129 +
81130 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81131 +{
81132 + const char * const plugin_name = plugin_info->base_name;
81133 + const int argc = plugin_info->argc;
81134 + const struct plugin_argument * const argv = plugin_info->argv;
81135 + int i;
81136 + struct register_pass_info kernexec_reload_pass_info = {
81137 + .pass = &kernexec_reload_pass.pass,
81138 + .reference_pass_name = "ssa",
81139 + .ref_pass_instance_number = 0,
81140 + .pos_op = PASS_POS_INSERT_AFTER
81141 + };
81142 + struct register_pass_info kernexec_fptr_pass_info = {
81143 + .pass = &kernexec_fptr_pass.pass,
81144 + .reference_pass_name = "ssa",
81145 + .ref_pass_instance_number = 0,
81146 + .pos_op = PASS_POS_INSERT_AFTER
81147 + };
81148 + struct register_pass_info kernexec_retaddr_pass_info = {
81149 + .pass = &kernexec_retaddr_pass.pass,
81150 + .reference_pass_name = "pro_and_epilogue",
81151 + .ref_pass_instance_number = 0,
81152 + .pos_op = PASS_POS_INSERT_AFTER
81153 + };
81154 +
81155 + if (!plugin_default_version_check(version, &gcc_version)) {
81156 + error(G_("incompatible gcc/plugin versions"));
81157 + return 1;
81158 + }
81159 +
81160 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
81161 +
81162 + if (TARGET_64BIT == 0)
81163 + return 0;
81164 +
81165 + for (i = 0; i < argc; ++i) {
81166 + if (!strcmp(argv[i].key, "method")) {
81167 + if (!argv[i].value) {
81168 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81169 + continue;
81170 + }
81171 + if (!strcmp(argv[i].value, "bts")) {
81172 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
81173 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
81174 + } else if (!strcmp(argv[i].value, "or")) {
81175 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
81176 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
81177 + fix_register("r10", 1, 1);
81178 + } else
81179 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81180 + continue;
81181 + }
81182 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81183 + }
81184 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
81185 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
81186 +
81187 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
81188 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
81189 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
81190 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
81191 +
81192 + return 0;
81193 +}
81194 diff --git a/tools/gcc/size_overflow_hash.h b/tools/gcc/size_overflow_hash.h
81195 new file mode 100644
81196 index 0000000..ce7366b
81197 --- /dev/null
81198 +++ b/tools/gcc/size_overflow_hash.h
81199 @@ -0,0 +1,13933 @@
81200 +struct size_overflow_hash _000001_hash = {
81201 + .next = NULL,
81202 + .name = "alloc_dr",
81203 + .file = "drivers/base/devres.c",
81204 + .param2 = 1,
81205 +};
81206 +
81207 +struct size_overflow_hash _000002_hash = {
81208 + .next = NULL,
81209 + .name = "__copy_from_user",
81210 + .file = "arch/x86/include/asm/uaccess_32.h",
81211 + .param3 = 1,
81212 +};
81213 +
81214 +struct size_overflow_hash _000003_hash = {
81215 + .next = NULL,
81216 + .name = "copy_from_user",
81217 + .file = "arch/x86/include/asm/uaccess_32.h",
81218 + .param3 = 1,
81219 +};
81220 +
81221 +struct size_overflow_hash _000004_hash = {
81222 + .next = NULL,
81223 + .name = "__copy_from_user_inatomic",
81224 + .file = "arch/x86/include/asm/uaccess_32.h",
81225 + .param3 = 1,
81226 +};
81227 +
81228 +struct size_overflow_hash _000005_hash = {
81229 + .next = NULL,
81230 + .name = "__copy_from_user_nocache",
81231 + .file = "arch/x86/include/asm/uaccess_32.h",
81232 + .param3 = 1,
81233 +};
81234 +
81235 +struct size_overflow_hash _000006_hash = {
81236 + .next = NULL,
81237 + .name = "__copy_to_user_inatomic",
81238 + .file = "arch/x86/include/asm/uaccess_32.h",
81239 + .param3 = 1,
81240 +};
81241 +
81242 +struct size_overflow_hash _000007_hash = {
81243 + .next = NULL,
81244 + .name = "do_xip_mapping_read",
81245 + .file = "mm/filemap_xip.c",
81246 + .param5 = 1,
81247 +};
81248 +
81249 +struct size_overflow_hash _000008_hash = {
81250 + .next = NULL,
81251 + .name = "hugetlbfs_read",
81252 + .file = "fs/hugetlbfs/inode.c",
81253 + .param3 = 1,
81254 +};
81255 +
81256 +struct size_overflow_hash _000009_hash = {
81257 + .next = NULL,
81258 + .name = "kcalloc",
81259 + .file = "include/linux/slab.h",
81260 + .param1 = 1,
81261 + .param2 = 1,
81262 +};
81263 +
81264 +struct size_overflow_hash _000011_hash = {
81265 + .next = NULL,
81266 + .name = "kmalloc",
81267 + .file = "include/linux/slub_def.h",
81268 + .param1 = 1,
81269 +};
81270 +
81271 +struct size_overflow_hash _000012_hash = {
81272 + .next = NULL,
81273 + .name = "kmalloc_slab",
81274 + .file = "include/linux/slub_def.h",
81275 + .param1 = 1,
81276 +};
81277 +
81278 +struct size_overflow_hash _000013_hash = {
81279 + .next = NULL,
81280 + .name = "kmemdup",
81281 + .file = "include/linux/string.h",
81282 + .param2 = 1,
81283 +};
81284 +
81285 +struct size_overflow_hash _000014_hash = {
81286 + .next = NULL,
81287 + .name = "__krealloc",
81288 + .file = "include/linux/slab.h",
81289 + .param2 = 1,
81290 +};
81291 +
81292 +struct size_overflow_hash _000015_hash = {
81293 + .next = NULL,
81294 + .name = "memdup_user",
81295 + .file = "include/linux/string.h",
81296 + .param2 = 1,
81297 +};
81298 +
81299 +struct size_overflow_hash _000016_hash = {
81300 + .next = NULL,
81301 + .name = "module_alloc",
81302 + .file = "include/linux/moduleloader.h",
81303 + .param1 = 1,
81304 +};
81305 +
81306 +struct size_overflow_hash _000017_hash = {
81307 + .next = NULL,
81308 + .name = "read_default_ldt",
81309 + .file = "arch/x86/kernel/ldt.c",
81310 + .param2 = 1,
81311 +};
81312 +
81313 +struct size_overflow_hash _000018_hash = {
81314 + .next = NULL,
81315 + .name = "read_kcore",
81316 + .file = "fs/proc/kcore.c",
81317 + .param3 = 1,
81318 +};
81319 +
81320 +struct size_overflow_hash _000019_hash = {
81321 + .next = NULL,
81322 + .name = "read_ldt",
81323 + .file = "arch/x86/kernel/ldt.c",
81324 + .param2 = 1,
81325 +};
81326 +
81327 +struct size_overflow_hash _000020_hash = {
81328 + .next = NULL,
81329 + .name = "read_zero",
81330 + .file = "drivers/char/mem.c",
81331 + .param3 = 1,
81332 +};
81333 +
81334 +struct size_overflow_hash _000021_hash = {
81335 + .next = NULL,
81336 + .name = "__vmalloc_node",
81337 + .file = "mm/vmalloc.c",
81338 + .param1 = 1,
81339 +};
81340 +
81341 +struct size_overflow_hash _000022_hash = {
81342 + .next = NULL,
81343 + .name = "vm_map_ram",
81344 + .file = "include/linux/vmalloc.h",
81345 + .param2 = 1,
81346 +};
81347 +
81348 +struct size_overflow_hash _000023_hash = {
81349 + .next = NULL,
81350 + .name = "aa_simple_write_to_buffer",
81351 + .file = "security/apparmor/apparmorfs.c",
81352 + .param4 = 1,
81353 +};
81354 +
81355 +struct size_overflow_hash _000024_hash = {
81356 + .next = NULL,
81357 + .name = "ablkcipher_copy_iv",
81358 + .file = "crypto/ablkcipher.c",
81359 + .param3 = 1,
81360 +};
81361 +
81362 +struct size_overflow_hash _000025_hash = {
81363 + .next = NULL,
81364 + .name = "ablkcipher_next_slow",
81365 + .file = "crypto/ablkcipher.c",
81366 + .param4 = 1,
81367 +};
81368 +
81369 +struct size_overflow_hash _000026_hash = {
81370 + .next = NULL,
81371 + .name = "acpi_os_allocate",
81372 + .file = "include/acpi/platform/aclinux.h",
81373 + .param1 = 1,
81374 +};
81375 +
81376 +struct size_overflow_hash _000027_hash = {
81377 + .next = NULL,
81378 + .name = "acpi_system_write_wakeup_device",
81379 + .file = "drivers/acpi/proc.c",
81380 + .param3 = 1,
81381 +};
81382 +
81383 +struct size_overflow_hash _000028_hash = {
81384 + .next = NULL,
81385 + .name = "ahash_setkey_unaligned",
81386 + .file = "crypto/ahash.c",
81387 + .param3 = 1,
81388 +};
81389 +
81390 +struct size_overflow_hash _000029_hash = {
81391 + .next = NULL,
81392 + .name = "alloc_fdmem",
81393 + .file = "fs/file.c",
81394 + .param1 = 1,
81395 +};
81396 +
81397 +struct size_overflow_hash _000030_hash = {
81398 + .next = NULL,
81399 + .name = "audit_unpack_string",
81400 + .file = "kernel/auditfilter.c",
81401 + .param3 = 1,
81402 +};
81403 +
81404 +struct size_overflow_hash _000031_hash = {
81405 + .next = NULL,
81406 + .name = "bio_alloc_map_data",
81407 + .file = "fs/bio.c",
81408 + .param2 = 1,
81409 +};
81410 +
81411 +struct size_overflow_hash _000032_hash = {
81412 + .next = NULL,
81413 + .name = "bio_kmalloc",
81414 + .file = "include/linux/bio.h",
81415 + .param2 = 1,
81416 +};
81417 +
81418 +struct size_overflow_hash _000033_hash = {
81419 + .next = NULL,
81420 + .name = "blkcipher_copy_iv",
81421 + .file = "crypto/blkcipher.c",
81422 + .param3 = 1,
81423 +};
81424 +
81425 +struct size_overflow_hash _000034_hash = {
81426 + .next = NULL,
81427 + .name = "blkcipher_next_slow",
81428 + .file = "crypto/blkcipher.c",
81429 + .param4 = 1,
81430 +};
81431 +
81432 +struct size_overflow_hash _000035_hash = {
81433 + .next = NULL,
81434 + .name = "cgroup_write_string",
81435 + .file = "kernel/cgroup.c",
81436 + .param5 = 1,
81437 +};
81438 +
81439 +struct size_overflow_hash _000036_hash = {
81440 + .next = NULL,
81441 + .name = "cgroup_write_X64",
81442 + .file = "kernel/cgroup.c",
81443 + .param5 = 1,
81444 +};
81445 +
81446 +struct size_overflow_hash _000037_hash = {
81447 + .next = NULL,
81448 + .name = "clear_refs_write",
81449 + .file = "fs/proc/task_mmu.c",
81450 + .param3 = 1,
81451 +};
81452 +
81453 +struct size_overflow_hash _000038_hash = {
81454 + .next = NULL,
81455 + .name = "comm_write",
81456 + .file = "fs/proc/base.c",
81457 + .param3 = 1,
81458 +};
81459 +
81460 +struct size_overflow_hash _000039_hash = {
81461 + .next = NULL,
81462 + .name = "copy_and_check",
81463 + .file = "kernel/module.c",
81464 + .param3 = 1,
81465 +};
81466 +
81467 +struct size_overflow_hash _000040_hash = {
81468 + .next = NULL,
81469 + .name = "__copy_to_user",
81470 + .file = "arch/x86/include/asm/uaccess_32.h",
81471 + .param3 = 1,
81472 +};
81473 +
81474 +struct size_overflow_hash _000041_hash = {
81475 + .next = NULL,
81476 + .name = "copy_vm86_regs_from_user",
81477 + .file = "arch/x86/kernel/vm86_32.c",
81478 + .param3 = 1,
81479 +};
81480 +
81481 +struct size_overflow_hash _000042_hash = {
81482 + .next = NULL,
81483 + .name = "csum_partial_copy_fromiovecend",
81484 + .file = "include/linux/socket.h",
81485 + .param4 = 1,
81486 +};
81487 +
81488 +struct size_overflow_hash _000043_hash = {
81489 + .next = NULL,
81490 + .name = "ddebug_proc_write",
81491 + .file = "lib/dynamic_debug.c",
81492 + .param3 = 1,
81493 +};
81494 +
81495 +struct size_overflow_hash _000044_hash = {
81496 + .next = NULL,
81497 + .name = "devm_kzalloc",
81498 + .file = "include/linux/device.h",
81499 + .param2 = 1,
81500 +};
81501 +
81502 +struct size_overflow_hash _000045_hash = {
81503 + .next = NULL,
81504 + .name = "devres_alloc",
81505 + .file = "include/linux/device.h",
81506 + .param2 = 1,
81507 +};
81508 +
81509 +struct size_overflow_hash _000046_hash = {
81510 + .next = NULL,
81511 + .name = "do_ip_setsockopt",
81512 + .file = "net/ipv4/ip_sockglue.c",
81513 + .param5 = 1,
81514 +};
81515 +
81516 +struct size_overflow_hash _000047_hash = {
81517 + .next = NULL,
81518 + .name = "do_kimage_alloc",
81519 + .file = "kernel/kexec.c",
81520 + .param3 = 1,
81521 +};
81522 +
81523 +struct size_overflow_hash _000048_hash = {
81524 + .next = NULL,
81525 + .name = "do_tty_write",
81526 + .file = "drivers/tty/tty_io.c",
81527 + .param5 = 1,
81528 +};
81529 +
81530 +struct size_overflow_hash _000049_hash = {
81531 + .next = NULL,
81532 + .name = "fanotify_write",
81533 + .file = "fs/notify/fanotify/fanotify_user.c",
81534 + .param3 = 1,
81535 +};
81536 +
81537 +struct size_overflow_hash _000050_hash = {
81538 + .next = NULL,
81539 + .name = "file_read_actor",
81540 + .file = "include/linux/fs.h",
81541 + .param4 = 1,
81542 +};
81543 +
81544 +struct size_overflow_hash _000051_hash = {
81545 + .next = NULL,
81546 + .name = "fill_write_buffer",
81547 + .file = "fs/sysfs/file.c",
81548 + .param3 = 1,
81549 +};
81550 +
81551 +struct size_overflow_hash _000052_hash = {
81552 + .next = NULL,
81553 + .name = "get_user_cpu_mask",
81554 + .file = "kernel/sched/core.c",
81555 + .param2 = 1,
81556 +};
81557 +
81558 +struct size_overflow_hash _000053_hash = {
81559 + .next = NULL,
81560 + .name = "hashtab_create",
81561 + .file = "security/selinux/ss/hashtab.c",
81562 + .param3 = 1,
81563 +};
81564 +
81565 +struct size_overflow_hash _000054_hash = {
81566 + .next = NULL,
81567 + .name = "heap_init",
81568 + .file = "include/linux/prio_heap.h",
81569 + .param2 = 1,
81570 +};
81571 +
81572 +struct size_overflow_hash _000055_hash = {
81573 + .next = NULL,
81574 + .name = "hest_ghes_dev_register",
81575 + .file = "drivers/acpi/apei/hest.c",
81576 + .param1 = 1,
81577 +};
81578 +
81579 +struct size_overflow_hash _000056_hash = {
81580 + .next = NULL,
81581 + .name = "ima_write_policy",
81582 + .file = "security/integrity/ima/ima_fs.c",
81583 + .param3 = 1,
81584 +};
81585 +
81586 +struct size_overflow_hash _000057_hash = {
81587 + .next = NULL,
81588 + .name = "input_ff_create",
81589 + .file = "include/linux/input.h",
81590 + .param2 = 1,
81591 +};
81592 +
81593 +struct size_overflow_hash _000058_hash = {
81594 + .next = NULL,
81595 + .name = "input_mt_init_slots",
81596 + .file = "include/linux/input/mt.h",
81597 + .param2 = 1,
81598 +};
81599 +
81600 +struct size_overflow_hash _000059_hash = {
81601 + .next = NULL,
81602 + .name = "iov_iter_copy_from_user",
81603 + .file = "include/linux/fs.h",
81604 + .param4 = 1,
81605 +};
81606 +
81607 +struct size_overflow_hash _000060_hash = {
81608 + .next = NULL,
81609 + .name = "iov_iter_copy_from_user_atomic",
81610 + .file = "include/linux/fs.h",
81611 + .param4 = 1,
81612 +};
81613 +
81614 +struct size_overflow_hash _000061_hash = {
81615 + .next = NULL,
81616 + .name = "keyctl_instantiate_key_common",
81617 + .file = "security/keys/keyctl.c",
81618 + .param4 = 1,
81619 +};
81620 +
81621 +struct size_overflow_hash _000062_hash = {
81622 + .next = NULL,
81623 + .name = "keyctl_update_key",
81624 + .file = "security/keys/keyctl.c",
81625 + .param3 = 1,
81626 +};
81627 +
81628 +struct size_overflow_hash _000063_hash = {
81629 + .next = NULL,
81630 + .name = "__kfifo_alloc",
81631 + .file = "include/linux/kfifo.h",
81632 + .param2 = 1,
81633 + .param3 = 1,
81634 +};
81635 +
81636 +struct size_overflow_hash _000065_hash = {
81637 + .next = NULL,
81638 + .name = "kfifo_copy_from_user",
81639 + .file = "kernel/kfifo.c",
81640 + .param3 = 1,
81641 +};
81642 +
81643 +struct size_overflow_hash _000066_hash = {
81644 + .next = NULL,
81645 + .name = "kmalloc_node",
81646 + .file = "include/linux/slab.h",
81647 + .param1 = 1,
81648 +};
81649 +
81650 +struct size_overflow_hash _000067_hash = {
81651 + .next = NULL,
81652 + .name = "kmalloc_parameter",
81653 + .file = "kernel/params.c",
81654 + .param1 = 1,
81655 +};
81656 +
81657 +struct size_overflow_hash _000068_hash = {
81658 + .next = NULL,
81659 + .name = "kobj_map",
81660 + .file = "include/linux/kobj_map.h",
81661 + .param2 = 1,
81662 + .param3 = 1,
81663 +};
81664 +
81665 +struct size_overflow_hash _000070_hash = {
81666 + .next = NULL,
81667 + .name = "krealloc",
81668 + .file = "include/linux/slab.h",
81669 + .param2 = 1,
81670 +};
81671 +
81672 +struct size_overflow_hash _000071_hash = {
81673 + .next = NULL,
81674 + .name = "kvmalloc",
81675 + .file = "security/apparmor/lib.c",
81676 + .param1 = 1,
81677 +};
81678 +
81679 +struct size_overflow_hash _000072_hash = {
81680 + .next = NULL,
81681 + .name = "kzalloc",
81682 + .file = "include/linux/slab.h",
81683 + .param1 = 1,
81684 +};
81685 +
81686 +struct size_overflow_hash _000073_hash = {
81687 + .next = NULL,
81688 + .name = "listxattr",
81689 + .file = "fs/xattr.c",
81690 + .param3 = 1,
81691 +};
81692 +
81693 +struct size_overflow_hash _000074_hash = {
81694 + .next = NULL,
81695 + .name = "mempool_kmalloc",
81696 + .file = "include/linux/mempool.h",
81697 + .param2 = 1,
81698 +};
81699 +
81700 +struct size_overflow_hash _000075_hash = {
81701 + .next = NULL,
81702 + .name = "mem_rw",
81703 + .file = "fs/proc/base.c",
81704 + .param3 = 1,
81705 +};
81706 +
81707 +struct size_overflow_hash _000076_hash = {
81708 + .next = NULL,
81709 + .name = "module_alloc_update_bounds",
81710 + .file = "kernel/module.c",
81711 + .param1 = 1,
81712 +};
81713 +
81714 +struct size_overflow_hash _000077_hash = {
81715 + .next = NULL,
81716 + .name = "mpi_alloc_limb_space",
81717 + .file = "lib/mpi/mpiutil.c",
81718 + .param1 = 1,
81719 +};
81720 +
81721 +struct size_overflow_hash _000078_hash = {
81722 + .next = NULL,
81723 + .name = "mpi_resize",
81724 + .file = "include/linux/mpi.h",
81725 + .param2 = 1,
81726 +};
81727 +
81728 +struct size_overflow_hash _000079_hash = {
81729 + .next = NULL,
81730 + .name = "mtrr_write",
81731 + .file = "arch/x86/kernel/cpu/mtrr/if.c",
81732 + .param3 = 1,
81733 +};
81734 +
81735 +struct size_overflow_hash _000080_hash = {
81736 + .next = NULL,
81737 + .name = "oom_adjust_write",
81738 + .file = "fs/proc/base.c",
81739 + .param3 = 1,
81740 +};
81741 +
81742 +struct size_overflow_hash _000081_hash = {
81743 + .next = NULL,
81744 + .name = "oom_score_adj_write",
81745 + .file = "fs/proc/base.c",
81746 + .param3 = 1,
81747 +};
81748 +
81749 +struct size_overflow_hash _000082_hash = {
81750 + .next = NULL,
81751 + .name = "pipe_iov_copy_from_user",
81752 + .file = "fs/pipe.c",
81753 + .param3 = 1,
81754 +};
81755 +
81756 +struct size_overflow_hash _000083_hash = {
81757 + .next = NULL,
81758 + .name = "pipe_iov_copy_to_user",
81759 + .file = "fs/pipe.c",
81760 + .param3 = 1,
81761 +};
81762 +
81763 +struct size_overflow_hash _000084_hash = {
81764 + .next = NULL,
81765 + .name = "pipe_set_size",
81766 + .file = "fs/pipe.c",
81767 + .param2 = 1,
81768 +};
81769 +
81770 +struct size_overflow_hash _000085_hash = {
81771 + .next = NULL,
81772 + .name = "platform_device_add_data",
81773 + .file = "include/linux/platform_device.h",
81774 + .param3 = 1,
81775 +};
81776 +
81777 +struct size_overflow_hash _000086_hash = {
81778 + .next = NULL,
81779 + .name = "platform_device_add_resources",
81780 + .file = "include/linux/platform_device.h",
81781 + .param3 = 1,
81782 +};
81783 +
81784 +struct size_overflow_hash _000087_hash = {
81785 + .next = NULL,
81786 + .name = "pm_qos_power_write",
81787 + .file = "kernel/power/qos.c",
81788 + .param3 = 1,
81789 +};
81790 +
81791 +struct size_overflow_hash _000088_hash = {
81792 + .next = NULL,
81793 + .name = "pnpbios_proc_write",
81794 + .file = "drivers/pnp/pnpbios/proc.c",
81795 + .param3 = 1,
81796 +};
81797 +
81798 +struct size_overflow_hash _000089_hash = {
81799 + .next = NULL,
81800 + .name = "__probe_kernel_read",
81801 + .file = "include/linux/uaccess.h",
81802 + .param3 = 1,
81803 +};
81804 +
81805 +struct size_overflow_hash _000090_hash = {
81806 + .next = NULL,
81807 + .name = "__probe_kernel_write",
81808 + .file = "include/linux/uaccess.h",
81809 + .param3 = 1,
81810 +};
81811 +
81812 +struct size_overflow_hash _000091_hash = {
81813 + .next = NULL,
81814 + .name = "proc_coredump_filter_write",
81815 + .file = "fs/proc/base.c",
81816 + .param3 = 1,
81817 +};
81818 +
81819 +struct size_overflow_hash _000092_hash = {
81820 + .next = NULL,
81821 + .name = "process_vm_rw_pages",
81822 + .file = "mm/process_vm_access.c",
81823 + .param5 = 1,
81824 + .param6 = 1,
81825 +};
81826 +
81827 +struct size_overflow_hash _000094_hash = {
81828 + .next = NULL,
81829 + .name = "proc_loginuid_write",
81830 + .file = "fs/proc/base.c",
81831 + .param3 = 1,
81832 +};
81833 +
81834 +struct size_overflow_hash _000095_hash = {
81835 + .next = NULL,
81836 + .name = "proc_pid_attr_write",
81837 + .file = "fs/proc/base.c",
81838 + .param3 = 1,
81839 +};
81840 +
81841 +struct size_overflow_hash _000096_hash = {
81842 + .next = NULL,
81843 + .name = "pstore_mkfile",
81844 + .file = "fs/pstore/inode.c",
81845 + .param5 = 1,
81846 +};
81847 +
81848 +struct size_overflow_hash _000097_hash = {
81849 + .next = NULL,
81850 + .name = "qdisc_class_hash_alloc",
81851 + .file = "net/sched/sch_api.c",
81852 + .param1 = 1,
81853 +};
81854 +
81855 +struct size_overflow_hash _000098_hash = {
81856 + .next = NULL,
81857 + .name = "read",
81858 + .file = "fs/sysfs/bin.c",
81859 + .param3 = 1,
81860 +};
81861 +
81862 +struct size_overflow_hash _000099_hash = {
81863 + .next = NULL,
81864 + .name = "regmap_access_read_file",
81865 + .file = "drivers/base/regmap/regmap-debugfs.c",
81866 + .param3 = 1,
81867 +};
81868 +
81869 +struct size_overflow_hash _000100_hash = {
81870 + .next = NULL,
81871 + .name = "regmap_map_read_file",
81872 + .file = "drivers/base/regmap/regmap-debugfs.c",
81873 + .param3 = 1,
81874 +};
81875 +
81876 +struct size_overflow_hash _000101_hash = {
81877 + .next = NULL,
81878 + .name = "_regmap_raw_write",
81879 + .file = "drivers/base/regmap/regmap.c",
81880 + .param4 = 1,
81881 +};
81882 +
81883 +struct size_overflow_hash _000102_hash = {
81884 + .next = NULL,
81885 + .name = "regset_tls_set",
81886 + .file = "arch/x86/kernel/tls.c",
81887 + .param4 = 1,
81888 +};
81889 +
81890 +struct size_overflow_hash _000103_hash = {
81891 + .next = NULL,
81892 + .name = "request_key_auth_new",
81893 + .file = "security/keys/request_key_auth.c",
81894 + .param3 = 1,
81895 +};
81896 +
81897 +struct size_overflow_hash _000104_hash = {
81898 + .next = NULL,
81899 + .name = "restore_i387_fxsave",
81900 + .file = "arch/x86/kernel/i387.c",
81901 + .param2 = 1,
81902 +};
81903 +
81904 +struct size_overflow_hash _000105_hash = {
81905 + .next = NULL,
81906 + .name = "rngapi_reset",
81907 + .file = "crypto/rng.c",
81908 + .param3 = 1,
81909 +};
81910 +
81911 +struct size_overflow_hash _000106_hash = {
81912 + .next = NULL,
81913 + .name = "rw_copy_check_uvector",
81914 + .file = "include/linux/fs.h",
81915 + .param3 = 1,
81916 +};
81917 +
81918 +struct size_overflow_hash _000107_hash = {
81919 + .next = NULL,
81920 + .name = "sched_autogroup_write",
81921 + .file = "fs/proc/base.c",
81922 + .param3 = 1,
81923 +};
81924 +
81925 +struct size_overflow_hash _000108_hash = {
81926 + .next = NULL,
81927 + .name = "security_context_to_sid_core",
81928 + .file = "security/selinux/ss/services.c",
81929 + .param2 = 1,
81930 +};
81931 +
81932 +struct size_overflow_hash _000109_hash = {
81933 + .next = NULL,
81934 + .name = "sel_commit_bools_write",
81935 + .file = "security/selinux/selinuxfs.c",
81936 + .param3 = 1,
81937 +};
81938 +
81939 +struct size_overflow_hash _000110_hash = {
81940 + .next = NULL,
81941 + .name = "sel_write_avc_cache_threshold",
81942 + .file = "security/selinux/selinuxfs.c",
81943 + .param3 = 1,
81944 +};
81945 +
81946 +struct size_overflow_hash _000111_hash = {
81947 + .next = NULL,
81948 + .name = "sel_write_bool",
81949 + .file = "security/selinux/selinuxfs.c",
81950 + .param3 = 1,
81951 +};
81952 +
81953 +struct size_overflow_hash _000112_hash = {
81954 + .next = NULL,
81955 + .name = "sel_write_checkreqprot",
81956 + .file = "security/selinux/selinuxfs.c",
81957 + .param3 = 1,
81958 +};
81959 +
81960 +struct size_overflow_hash _000113_hash = {
81961 + .next = NULL,
81962 + .name = "sel_write_disable",
81963 + .file = "security/selinux/selinuxfs.c",
81964 + .param3 = 1,
81965 +};
81966 +
81967 +struct size_overflow_hash _000114_hash = {
81968 + .next = NULL,
81969 + .name = "sel_write_enforce",
81970 + .file = "security/selinux/selinuxfs.c",
81971 + .param3 = 1,
81972 +};
81973 +
81974 +struct size_overflow_hash _000115_hash = {
81975 + .next = NULL,
81976 + .name = "sel_write_load",
81977 + .file = "security/selinux/selinuxfs.c",
81978 + .param3 = 1,
81979 +};
81980 +
81981 +struct size_overflow_hash _000116_hash = {
81982 + .next = NULL,
81983 + .name = "setkey_unaligned",
81984 + .file = "crypto/ablkcipher.c",
81985 + .param3 = 1,
81986 +};
81987 +
81988 +struct size_overflow_hash _000117_hash = {
81989 + .next = NULL,
81990 + .name = "setkey_unaligned",
81991 + .file = "crypto/blkcipher.c",
81992 + .param3 = 1,
81993 +};
81994 +
81995 +struct size_overflow_hash _000118_hash = {
81996 + .next = NULL,
81997 + .name = "setkey_unaligned",
81998 + .file = "crypto/aead.c",
81999 + .param3 = 1,
82000 +};
82001 +
82002 +struct size_overflow_hash _000119_hash = {
82003 + .next = NULL,
82004 + .name = "setkey_unaligned",
82005 + .file = "crypto/cipher.c",
82006 + .param3 = 1,
82007 +};
82008 +
82009 +struct size_overflow_hash _000120_hash = {
82010 + .next = NULL,
82011 + .name = "setxattr",
82012 + .file = "fs/xattr.c",
82013 + .param4 = 1,
82014 +};
82015 +
82016 +struct size_overflow_hash _000121_hash = {
82017 + .next = NULL,
82018 + .name = "sg_kmalloc",
82019 + .file = "lib/scatterlist.c",
82020 + .param1 = 1,
82021 +};
82022 +
82023 +struct size_overflow_hash _000122_hash = {
82024 + .next = NULL,
82025 + .name = "shash_setkey_unaligned",
82026 + .file = "crypto/shash.c",
82027 + .param3 = 1,
82028 +};
82029 +
82030 +struct size_overflow_hash _000123_hash = {
82031 + .next = NULL,
82032 + .name = "shmem_xattr_set",
82033 + .file = "mm/shmem.c",
82034 + .param4 = 1,
82035 +};
82036 +
82037 +struct size_overflow_hash _000124_hash = {
82038 + .next = NULL,
82039 + .name = "simple_transaction_get",
82040 + .file = "include/linux/fs.h",
82041 + .param3 = 1,
82042 +};
82043 +
82044 +struct size_overflow_hash _000125_hash = {
82045 + .next = NULL,
82046 + .name = "simple_write_to_buffer",
82047 + .file = "include/linux/fs.h",
82048 + .param2 = 1,
82049 + .param5 = 1,
82050 +};
82051 +
82052 +struct size_overflow_hash _000127_hash = {
82053 + .next = NULL,
82054 + .name = "smk_write_ambient",
82055 + .file = "security/smack/smackfs.c",
82056 + .param3 = 1,
82057 +};
82058 +
82059 +struct size_overflow_hash _000128_hash = {
82060 + .next = NULL,
82061 + .name = "smk_write_cipso",
82062 + .file = "security/smack/smackfs.c",
82063 + .param3 = 1,
82064 +};
82065 +
82066 +struct size_overflow_hash _000129_hash = {
82067 + .next = NULL,
82068 + .name = "smk_write_direct",
82069 + .file = "security/smack/smackfs.c",
82070 + .param3 = 1,
82071 +};
82072 +
82073 +struct size_overflow_hash _000130_hash = {
82074 + .next = NULL,
82075 + .name = "smk_write_doi",
82076 + .file = "security/smack/smackfs.c",
82077 + .param3 = 1,
82078 +};
82079 +
82080 +struct size_overflow_hash _000131_hash = {
82081 + .next = NULL,
82082 + .name = "smk_write_load_list",
82083 + .file = "security/smack/smackfs.c",
82084 + .param3 = 1,
82085 +};
82086 +
82087 +struct size_overflow_hash _000132_hash = {
82088 + .next = &_000102_hash,
82089 + .name = "smk_write_logging",
82090 + .file = "security/smack/smackfs.c",
82091 + .param3 = 1,
82092 +};
82093 +
82094 +struct size_overflow_hash _000133_hash = {
82095 + .next = NULL,
82096 + .name = "smk_write_netlbladdr",
82097 + .file = "security/smack/smackfs.c",
82098 + .param3 = 1,
82099 +};
82100 +
82101 +struct size_overflow_hash _000134_hash = {
82102 + .next = NULL,
82103 + .name = "smk_write_onlycap",
82104 + .file = "security/smack/smackfs.c",
82105 + .param3 = 1,
82106 +};
82107 +
82108 +struct size_overflow_hash _000135_hash = {
82109 + .next = NULL,
82110 + .name = "sys_add_key",
82111 + .file = "include/linux/syscalls.h",
82112 + .param4 = 1,
82113 +};
82114 +
82115 +struct size_overflow_hash _000136_hash = {
82116 + .next = NULL,
82117 + .name = "sys_modify_ldt",
82118 + .file = "arch/x86/include/asm/syscalls.h",
82119 + .param3 = 1,
82120 +};
82121 +
82122 +struct size_overflow_hash _000137_hash = {
82123 + .next = NULL,
82124 + .name = "sys_semtimedop",
82125 + .file = "include/linux/syscalls.h",
82126 + .param3 = 1,
82127 +};
82128 +
82129 +struct size_overflow_hash _000138_hash = {
82130 + .next = NULL,
82131 + .name = "tomoyo_write_self",
82132 + .file = "security/tomoyo/securityfs_if.c",
82133 + .param3 = 1,
82134 +};
82135 +
82136 +struct size_overflow_hash _000139_hash = {
82137 + .next = NULL,
82138 + .name = "tpm_write",
82139 + .file = "drivers/char/tpm/tpm.c",
82140 + .param3 = 1,
82141 +};
82142 +
82143 +struct size_overflow_hash _000140_hash = {
82144 + .next = NULL,
82145 + .name = "tty_buffer_alloc",
82146 + .file = "drivers/tty/tty_buffer.c",
82147 + .param2 = 1,
82148 +};
82149 +
82150 +struct size_overflow_hash _000141_hash = {
82151 + .next = NULL,
82152 + .name = "user_instantiate",
82153 + .file = "include/keys/user-type.h",
82154 + .param3 = 1,
82155 +};
82156 +
82157 +struct size_overflow_hash _000142_hash = {
82158 + .next = NULL,
82159 + .name = "user_update",
82160 + .file = "include/keys/user-type.h",
82161 + .param3 = 1,
82162 +};
82163 +
82164 +struct size_overflow_hash _000143_hash = {
82165 + .next = NULL,
82166 + .name = "vc_do_resize",
82167 + .file = "drivers/tty/vt/vt.c",
82168 + .param3 = 1,
82169 + .param4 = 1,
82170 +};
82171 +
82172 +struct size_overflow_hash _000145_hash = {
82173 + .next = NULL,
82174 + .name = "vcs_write",
82175 + .file = "drivers/tty/vt/vc_screen.c",
82176 + .param3 = 1,
82177 +};
82178 +
82179 +struct size_overflow_hash _000146_hash = {
82180 + .next = NULL,
82181 + .name = "vga_arb_write",
82182 + .file = "drivers/gpu/vga/vgaarb.c",
82183 + .param3 = 1,
82184 +};
82185 +
82186 +struct size_overflow_hash _000147_hash = {
82187 + .next = NULL,
82188 + .name = "vga_switcheroo_debugfs_write",
82189 + .file = "drivers/gpu/vga/vga_switcheroo.c",
82190 + .param3 = 1,
82191 +};
82192 +
82193 +struct size_overflow_hash _000148_hash = {
82194 + .next = NULL,
82195 + .name = "__vmalloc",
82196 + .file = "include/linux/vmalloc.h",
82197 + .param1 = 1,
82198 +};
82199 +
82200 +struct size_overflow_hash _000149_hash = {
82201 + .next = NULL,
82202 + .name = "vmalloc_32",
82203 + .file = "include/linux/vmalloc.h",
82204 + .param1 = 1,
82205 +};
82206 +
82207 +struct size_overflow_hash _000150_hash = {
82208 + .next = NULL,
82209 + .name = "vmalloc_32_user",
82210 + .file = "include/linux/vmalloc.h",
82211 + .param1 = 1,
82212 +};
82213 +
82214 +struct size_overflow_hash _000151_hash = {
82215 + .next = NULL,
82216 + .name = "vmalloc_exec",
82217 + .file = "include/linux/vmalloc.h",
82218 + .param1 = 1,
82219 +};
82220 +
82221 +struct size_overflow_hash _000152_hash = {
82222 + .next = NULL,
82223 + .name = "vmalloc_node",
82224 + .file = "include/linux/vmalloc.h",
82225 + .param1 = 1,
82226 +};
82227 +
82228 +struct size_overflow_hash _000153_hash = {
82229 + .next = NULL,
82230 + .name = "__vmalloc_node_flags",
82231 + .file = "mm/vmalloc.c",
82232 + .param1 = 1,
82233 +};
82234 +
82235 +struct size_overflow_hash _000154_hash = {
82236 + .next = NULL,
82237 + .name = "vmalloc_user",
82238 + .file = "include/linux/vmalloc.h",
82239 + .param1 = 1,
82240 +};
82241 +
82242 +struct size_overflow_hash _000155_hash = {
82243 + .next = NULL,
82244 + .name = "write",
82245 + .file = "fs/sysfs/bin.c",
82246 + .param3 = 1,
82247 +};
82248 +
82249 +struct size_overflow_hash _000156_hash = {
82250 + .next = NULL,
82251 + .name = "__xip_file_write",
82252 + .file = "mm/filemap_xip.c",
82253 + .param3 = 1,
82254 +};
82255 +
82256 +struct size_overflow_hash _000157_hash = {
82257 + .next = NULL,
82258 + .name = "acpi_ex_allocate_name_string",
82259 + .file = "drivers/acpi/acpica/exnames.c",
82260 + .param2 = 1,
82261 +};
82262 +
82263 +struct size_overflow_hash _000158_hash = {
82264 + .next = NULL,
82265 + .name = "acpi_os_allocate_zeroed",
82266 + .file = "include/acpi/platform/aclinux.h",
82267 + .param1 = 1,
82268 +};
82269 +
82270 +struct size_overflow_hash _000159_hash = {
82271 + .next = NULL,
82272 + .name = "acpi_ut_initialize_buffer",
82273 + .file = "drivers/acpi/acpica/utalloc.c",
82274 + .param2 = 1,
82275 +};
82276 +
82277 +struct size_overflow_hash _000160_hash = {
82278 + .next = NULL,
82279 + .name = "add_numbered_child",
82280 + .file = "drivers/mfd/twl-core.c",
82281 + .param5 = 1,
82282 +};
82283 +
82284 +struct size_overflow_hash _000161_hash = {
82285 + .next = NULL,
82286 + .name = "___alloc_bootmem_nopanic",
82287 + .file = "mm/nobootmem.c",
82288 + .param1 = 1,
82289 +};
82290 +
82291 +struct size_overflow_hash _000162_hash = {
82292 + .next = NULL,
82293 + .name = "alloc_large_system_hash",
82294 + .file = "include/linux/bootmem.h",
82295 + .param2 = 1,
82296 +};
82297 +
82298 +struct size_overflow_hash _000163_hash = {
82299 + .next = NULL,
82300 + .name = "audit_init_entry",
82301 + .file = "kernel/auditfilter.c",
82302 + .param1 = 1,
82303 +};
82304 +
82305 +struct size_overflow_hash _000164_hash = {
82306 + .next = NULL,
82307 + .name = "__bio_map_kern",
82308 + .file = "fs/bio.c",
82309 + .param2 = 1,
82310 + .param3 = 1,
82311 +};
82312 +
82313 +struct size_overflow_hash _000166_hash = {
82314 + .next = NULL,
82315 + .name = "blk_register_region",
82316 + .file = "include/linux/genhd.h",
82317 + .param1 = 1,
82318 + .param2 = 1,
82319 +};
82320 +
82321 +struct size_overflow_hash _000168_hash = {
82322 + .next = NULL,
82323 + .name = "cdev_add",
82324 + .file = "include/linux/cdev.h",
82325 + .param2 = 1,
82326 + .param3 = 1,
82327 +};
82328 +
82329 +struct size_overflow_hash _000170_hash = {
82330 + .next = NULL,
82331 + .name = "copy_to_user",
82332 + .file = "arch/x86/include/asm/uaccess_32.h",
82333 + .param3 = 1,
82334 +};
82335 +
82336 +struct size_overflow_hash _000171_hash = {
82337 + .next = NULL,
82338 + .name = "crypto_ahash_setkey",
82339 + .file = "include/crypto/hash.h",
82340 + .param3 = 1,
82341 +};
82342 +
82343 +struct size_overflow_hash _000172_hash = {
82344 + .next = NULL,
82345 + .name = "crypto_alloc_instance2",
82346 + .file = "include/crypto/algapi.h",
82347 + .param3 = 1,
82348 +};
82349 +
82350 +struct size_overflow_hash _000173_hash = {
82351 + .next = NULL,
82352 + .name = "crypto_shash_setkey",
82353 + .file = "include/crypto/hash.h",
82354 + .param3 = 1,
82355 +};
82356 +
82357 +struct size_overflow_hash _000174_hash = {
82358 + .next = NULL,
82359 + .name = "dev_set_alias",
82360 + .file = "include/linux/netdevice.h",
82361 + .param3 = 1,
82362 +};
82363 +
82364 +struct size_overflow_hash _000175_hash = {
82365 + .next = NULL,
82366 + .name = "do_readv_writev",
82367 + .file = "fs/read_write.c",
82368 + .param4 = 1,
82369 +};
82370 +
82371 +struct size_overflow_hash _000176_hash = {
82372 + .next = NULL,
82373 + .name = "getxattr",
82374 + .file = "fs/xattr.c",
82375 + .param4 = 1,
82376 +};
82377 +
82378 +struct size_overflow_hash _000177_hash = {
82379 + .next = NULL,
82380 + .name = "hugetlbfs_read_actor",
82381 + .file = "fs/hugetlbfs/inode.c",
82382 + .param2 = 1,
82383 + .param5 = 1,
82384 + .param4 = 1,
82385 +};
82386 +
82387 +struct size_overflow_hash _000180_hash = {
82388 + .next = NULL,
82389 + .name = "keyctl_instantiate_key",
82390 + .file = "security/keys/keyctl.c",
82391 + .param3 = 1,
82392 +};
82393 +
82394 +struct size_overflow_hash _000181_hash = {
82395 + .next = NULL,
82396 + .name = "keyctl_instantiate_key_iov",
82397 + .file = "security/keys/keyctl.c",
82398 + .param3 = 1,
82399 +};
82400 +
82401 +struct size_overflow_hash _000182_hash = {
82402 + .next = NULL,
82403 + .name = "__kfifo_from_user",
82404 + .file = "include/linux/kfifo.h",
82405 + .param3 = 1,
82406 +};
82407 +
82408 +struct size_overflow_hash _000183_hash = {
82409 + .next = NULL,
82410 + .name = "kimage_crash_alloc",
82411 + .file = "kernel/kexec.c",
82412 + .param3 = 1,
82413 +};
82414 +
82415 +struct size_overflow_hash _000184_hash = {
82416 + .next = NULL,
82417 + .name = "kimage_normal_alloc",
82418 + .file = "kernel/kexec.c",
82419 + .param3 = 1,
82420 +};
82421 +
82422 +struct size_overflow_hash _000185_hash = {
82423 + .next = NULL,
82424 + .name = "mpi_alloc",
82425 + .file = "include/linux/mpi.h",
82426 + .param1 = 1,
82427 +};
82428 +
82429 +struct size_overflow_hash _000186_hash = {
82430 + .next = NULL,
82431 + .name = "mpi_set_bit",
82432 + .file = "include/linux/mpi.h",
82433 + .param2 = 1,
82434 +};
82435 +
82436 +struct size_overflow_hash _000187_hash = {
82437 + .next = NULL,
82438 + .name = "mpi_set_highbit",
82439 + .file = "include/linux/mpi.h",
82440 + .param2 = 1,
82441 +};
82442 +
82443 +struct size_overflow_hash _000188_hash = {
82444 + .next = NULL,
82445 + .name = "neigh_hash_alloc",
82446 + .file = "net/core/neighbour.c",
82447 + .param1 = 1,
82448 +};
82449 +
82450 +struct size_overflow_hash _000189_hash = {
82451 + .next = NULL,
82452 + .name = "nl_pid_hash_zalloc",
82453 + .file = "net/netlink/af_netlink.c",
82454 + .param1 = 1,
82455 +};
82456 +
82457 +struct size_overflow_hash _000190_hash = {
82458 + .next = NULL,
82459 + .name = "pci_add_cap_save_buffer",
82460 + .file = "drivers/pci/pci.c",
82461 + .param3 = 1,
82462 +};
82463 +
82464 +struct size_overflow_hash _000191_hash = {
82465 + .next = NULL,
82466 + .name = "pcpu_mem_zalloc",
82467 + .file = "mm/percpu.c",
82468 + .param1 = 1,
82469 +};
82470 +
82471 +struct size_overflow_hash _000192_hash = {
82472 + .next = NULL,
82473 + .name = "platform_create_bundle",
82474 + .file = "include/linux/platform_device.h",
82475 + .param4 = 1,
82476 + .param6 = 1,
82477 +};
82478 +
82479 +struct size_overflow_hash _000194_hash = {
82480 + .next = NULL,
82481 + .name = "process_vm_rw",
82482 + .file = "mm/process_vm_access.c",
82483 + .param3 = 1,
82484 + .param5 = 1,
82485 +};
82486 +
82487 +struct size_overflow_hash _000196_hash = {
82488 + .next = NULL,
82489 + .name = "process_vm_rw_single_vec",
82490 + .file = "mm/process_vm_access.c",
82491 + .param1 = 1,
82492 + .param2 = 1,
82493 +};
82494 +
82495 +struct size_overflow_hash _000198_hash = {
82496 + .next = NULL,
82497 + .name = "profile_load",
82498 + .file = "security/apparmor/apparmorfs.c",
82499 + .param3 = 1,
82500 +};
82501 +
82502 +struct size_overflow_hash _000199_hash = {
82503 + .next = NULL,
82504 + .name = "profile_remove",
82505 + .file = "security/apparmor/apparmorfs.c",
82506 + .param3 = 1,
82507 +};
82508 +
82509 +struct size_overflow_hash _000200_hash = {
82510 + .next = NULL,
82511 + .name = "profile_replace",
82512 + .file = "security/apparmor/apparmorfs.c",
82513 + .param3 = 1,
82514 +};
82515 +
82516 +struct size_overflow_hash _000201_hash = {
82517 + .next = NULL,
82518 + .name = "regcache_rbtree_insert_to_block",
82519 + .file = "drivers/base/regmap/regcache-rbtree.c",
82520 + .param5 = 1,
82521 +};
82522 +
82523 +struct size_overflow_hash _000202_hash = {
82524 + .next = NULL,
82525 + .name = "regmap_raw_write",
82526 + .file = "include/linux/regmap.h",
82527 + .param4 = 1,
82528 +};
82529 +
82530 +struct size_overflow_hash _000203_hash = {
82531 + .next = NULL,
82532 + .name = "relay_alloc_page_array",
82533 + .file = "kernel/relay.c",
82534 + .param1 = 1,
82535 +};
82536 +
82537 +struct size_overflow_hash _000204_hash = {
82538 + .next = NULL,
82539 + .name = "RESIZE_IF_NEEDED",
82540 + .file = "lib/mpi/mpi-internal.h",
82541 + .param2 = 1,
82542 +};
82543 +
82544 +struct size_overflow_hash _000205_hash = {
82545 + .next = NULL,
82546 + .name = "security_context_to_sid",
82547 + .file = "security/selinux/ss/services.c",
82548 + .param2 = 1,
82549 +};
82550 +
82551 +struct size_overflow_hash _000206_hash = {
82552 + .next = NULL,
82553 + .name = "security_context_to_sid_default",
82554 + .file = "security/selinux/ss/services.c",
82555 + .param2 = 1,
82556 +};
82557 +
82558 +struct size_overflow_hash _000207_hash = {
82559 + .next = NULL,
82560 + .name = "security_context_to_sid_force",
82561 + .file = "security/selinux/ss/services.c",
82562 + .param2 = 1,
82563 +};
82564 +
82565 +struct size_overflow_hash _000208_hash = {
82566 + .next = NULL,
82567 + .name = "selinux_transaction_write",
82568 + .file = "security/selinux/selinuxfs.c",
82569 + .param3 = 1,
82570 +};
82571 +
82572 +struct size_overflow_hash _000209_hash = {
82573 + .next = NULL,
82574 + .name = "sel_write_access",
82575 + .file = "security/selinux/selinuxfs.c",
82576 + .param3 = 1,
82577 +};
82578 +
82579 +struct size_overflow_hash _000210_hash = {
82580 + .next = NULL,
82581 + .name = "sel_write_create",
82582 + .file = "security/selinux/selinuxfs.c",
82583 + .param3 = 1,
82584 +};
82585 +
82586 +struct size_overflow_hash _000211_hash = {
82587 + .next = NULL,
82588 + .name = "sel_write_member",
82589 + .file = "security/selinux/selinuxfs.c",
82590 + .param3 = 1,
82591 +};
82592 +
82593 +struct size_overflow_hash _000212_hash = {
82594 + .next = NULL,
82595 + .name = "sel_write_relabel",
82596 + .file = "security/selinux/selinuxfs.c",
82597 + .param3 = 1,
82598 +};
82599 +
82600 +struct size_overflow_hash _000213_hash = {
82601 + .next = NULL,
82602 + .name = "sel_write_user",
82603 + .file = "security/selinux/selinuxfs.c",
82604 + .param3 = 1,
82605 +};
82606 +
82607 +struct size_overflow_hash _000214_hash = {
82608 + .next = NULL,
82609 + .name = "setkey",
82610 + .file = "crypto/cipher.c",
82611 + .param3 = 1,
82612 +};
82613 +
82614 +struct size_overflow_hash _000215_hash = {
82615 + .next = NULL,
82616 + .name = "setkey",
82617 + .file = "crypto/ablkcipher.c",
82618 + .param3 = 1,
82619 +};
82620 +
82621 +struct size_overflow_hash _000216_hash = {
82622 + .next = NULL,
82623 + .name = "setkey",
82624 + .file = "crypto/aead.c",
82625 + .param3 = 1,
82626 +};
82627 +
82628 +struct size_overflow_hash _000217_hash = {
82629 + .next = NULL,
82630 + .name = "setkey",
82631 + .file = "crypto/blkcipher.c",
82632 + .param3 = 1,
82633 +};
82634 +
82635 +struct size_overflow_hash _000218_hash = {
82636 + .next = NULL,
82637 + .name = "smk_write_access",
82638 + .file = "security/smack/smackfs.c",
82639 + .param3 = 1,
82640 +};
82641 +
82642 +struct size_overflow_hash _000219_hash = {
82643 + .next = NULL,
82644 + .name = "snapshot_write",
82645 + .file = "kernel/power/user.c",
82646 + .param3 = 1,
82647 +};
82648 +
82649 +struct size_overflow_hash _000220_hash = {
82650 + .next = NULL,
82651 + .name = "spi_alloc_master",
82652 + .file = "include/linux/spi/spi.h",
82653 + .param2 = 1,
82654 +};
82655 +
82656 +struct size_overflow_hash _000221_hash = {
82657 + .next = NULL,
82658 + .name = "spi_register_board_info",
82659 + .file = "include/linux/spi/spi.h",
82660 + .param2 = 1,
82661 +};
82662 +
82663 +struct size_overflow_hash _000222_hash = {
82664 + .next = NULL,
82665 + .name = "sys_flistxattr",
82666 + .file = "include/linux/syscalls.h",
82667 + .param3 = 1,
82668 +};
82669 +
82670 +struct size_overflow_hash _000223_hash = {
82671 + .next = NULL,
82672 + .name = "sys_fsetxattr",
82673 + .file = "include/linux/syscalls.h",
82674 + .param4 = 1,
82675 +};
82676 +
82677 +struct size_overflow_hash _000224_hash = {
82678 + .next = NULL,
82679 + .name = "sysfs_write_file",
82680 + .file = "fs/sysfs/file.c",
82681 + .param3 = 1,
82682 +};
82683 +
82684 +struct size_overflow_hash _000225_hash = {
82685 + .next = NULL,
82686 + .name = "sys_ipc",
82687 + .file = "include/linux/syscalls.h",
82688 + .param3 = 1,
82689 +};
82690 +
82691 +struct size_overflow_hash _000226_hash = {
82692 + .next = NULL,
82693 + .name = "sys_keyctl",
82694 + .file = "include/linux/syscalls.h",
82695 + .param4 = 1,
82696 +};
82697 +
82698 +struct size_overflow_hash _000227_hash = {
82699 + .next = NULL,
82700 + .name = "sys_listxattr",
82701 + .file = "include/linux/syscalls.h",
82702 + .param3 = 1,
82703 +};
82704 +
82705 +struct size_overflow_hash _000228_hash = {
82706 + .next = NULL,
82707 + .name = "sys_llistxattr",
82708 + .file = "include/linux/syscalls.h",
82709 + .param3 = 1,
82710 +};
82711 +
82712 +struct size_overflow_hash _000229_hash = {
82713 + .next = NULL,
82714 + .name = "sys_lsetxattr",
82715 + .file = "include/linux/syscalls.h",
82716 + .param4 = 1,
82717 +};
82718 +
82719 +struct size_overflow_hash _000230_hash = {
82720 + .next = NULL,
82721 + .name = "sys_sched_setaffinity",
82722 + .file = "include/linux/syscalls.h",
82723 + .param2 = 1,
82724 +};
82725 +
82726 +struct size_overflow_hash _000231_hash = {
82727 + .next = NULL,
82728 + .name = "sys_semop",
82729 + .file = "include/linux/syscalls.h",
82730 + .param3 = 1,
82731 +};
82732 +
82733 +struct size_overflow_hash _000232_hash = {
82734 + .next = NULL,
82735 + .name = "sys_setxattr",
82736 + .file = "include/linux/syscalls.h",
82737 + .param4 = 1,
82738 +};
82739 +
82740 +struct size_overflow_hash _000233_hash = {
82741 + .next = NULL,
82742 + .name = "tnode_alloc",
82743 + .file = "net/ipv4/fib_trie.c",
82744 + .param1 = 1,
82745 +};
82746 +
82747 +struct size_overflow_hash _000234_hash = {
82748 + .next = NULL,
82749 + .name = "tomoyo_commit_ok",
82750 + .file = "security/tomoyo/memory.c",
82751 + .param2 = 1,
82752 +};
82753 +
82754 +struct size_overflow_hash _000235_hash = {
82755 + .next = NULL,
82756 + .name = "tomoyo_scan_bprm",
82757 + .file = "security/tomoyo/condition.c",
82758 + .param2 = 1,
82759 + .param4 = 1,
82760 +};
82761 +
82762 +struct size_overflow_hash _000237_hash = {
82763 + .next = NULL,
82764 + .name = "tty_write",
82765 + .file = "drivers/tty/tty_io.c",
82766 + .param3 = 1,
82767 +};
82768 +
82769 +struct size_overflow_hash _000238_hash = {
82770 + .next = NULL,
82771 + .name = "vc_resize",
82772 + .file = "include/linux/vt_kern.h",
82773 + .param2 = 1,
82774 + .param3 = 1,
82775 +};
82776 +
82777 +struct size_overflow_hash _000240_hash = {
82778 + .next = NULL,
82779 + .name = "vmalloc",
82780 + .file = "include/linux/vmalloc.h",
82781 + .param1 = 1,
82782 +};
82783 +
82784 +struct size_overflow_hash _000241_hash = {
82785 + .next = NULL,
82786 + .name = "vzalloc",
82787 + .file = "include/linux/vmalloc.h",
82788 + .param1 = 1,
82789 +};
82790 +
82791 +struct size_overflow_hash _000242_hash = {
82792 + .next = NULL,
82793 + .name = "vzalloc_node",
82794 + .file = "include/linux/vmalloc.h",
82795 + .param1 = 1,
82796 +};
82797 +
82798 +struct size_overflow_hash _000243_hash = {
82799 + .next = NULL,
82800 + .name = "xfrm_hash_alloc",
82801 + .file = "net/xfrm/xfrm_hash.c",
82802 + .param1 = 1,
82803 +};
82804 +
82805 +struct size_overflow_hash _000244_hash = {
82806 + .next = NULL,
82807 + .name = "acpi_ds_build_internal_package_obj",
82808 + .file = "drivers/acpi/acpica/dsobject.c",
82809 + .param3 = 1,
82810 +};
82811 +
82812 +struct size_overflow_hash _000245_hash = {
82813 + .next = NULL,
82814 + .name = "acpi_system_read_event",
82815 + .file = "drivers/acpi/event.c",
82816 + .param3 = 1,
82817 +};
82818 +
82819 +struct size_overflow_hash _000246_hash = {
82820 + .next = NULL,
82821 + .name = "acpi_ut_create_buffer_object",
82822 + .file = "drivers/acpi/acpica/utobject.c",
82823 + .param1 = 1,
82824 +};
82825 +
82826 +struct size_overflow_hash _000247_hash = {
82827 + .next = NULL,
82828 + .name = "acpi_ut_create_package_object",
82829 + .file = "drivers/acpi/acpica/utobject.c",
82830 + .param1 = 1,
82831 +};
82832 +
82833 +struct size_overflow_hash _000248_hash = {
82834 + .next = NULL,
82835 + .name = "acpi_ut_create_string_object",
82836 + .file = "drivers/acpi/acpica/utobject.c",
82837 + .param1 = 1,
82838 +};
82839 +
82840 +struct size_overflow_hash _000249_hash = {
82841 + .next = NULL,
82842 + .name = "add_child",
82843 + .file = "drivers/mfd/twl-core.c",
82844 + .param4 = 1,
82845 +};
82846 +
82847 +struct size_overflow_hash _000250_hash = {
82848 + .next = NULL,
82849 + .name = "___alloc_bootmem",
82850 + .file = "mm/nobootmem.c",
82851 + .param1 = 1,
82852 +};
82853 +
82854 +struct size_overflow_hash _000251_hash = {
82855 + .next = NULL,
82856 + .name = "__alloc_bootmem_nopanic",
82857 + .file = "include/linux/bootmem.h",
82858 + .param1 = 1,
82859 +};
82860 +
82861 +struct size_overflow_hash _000252_hash = {
82862 + .next = NULL,
82863 + .name = "async_setkey",
82864 + .file = "crypto/blkcipher.c",
82865 + .param3 = 1,
82866 +};
82867 +
82868 +struct size_overflow_hash _000253_hash = {
82869 + .next = NULL,
82870 + .name = "bio_map_kern",
82871 + .file = "include/linux/bio.h",
82872 + .param3 = 1,
82873 +};
82874 +
82875 +struct size_overflow_hash _000254_hash = {
82876 + .next = NULL,
82877 + .name = "copy_oldmem_page",
82878 + .file = "include/linux/crash_dump.h",
82879 + .param3 = 1,
82880 +};
82881 +
82882 +struct size_overflow_hash _000255_hash = {
82883 + .next = NULL,
82884 + .name = "do_sigpending",
82885 + .file = "include/linux/signal.h",
82886 + .param2 = 1,
82887 +};
82888 +
82889 +struct size_overflow_hash _000257_hash = {
82890 + .next = NULL,
82891 + .name = "keyctl_describe_key",
82892 + .file = "security/keys/keyctl.c",
82893 + .param3 = 1,
82894 +};
82895 +
82896 +struct size_overflow_hash _000258_hash = {
82897 + .next = NULL,
82898 + .name = "keyctl_get_security",
82899 + .file = "security/keys/keyctl.c",
82900 + .param3 = 1,
82901 +};
82902 +
82903 +struct size_overflow_hash _000259_hash = {
82904 + .next = NULL,
82905 + .name = "keyring_read",
82906 + .file = "security/keys/keyring.c",
82907 + .param3 = 1,
82908 +};
82909 +
82910 +struct size_overflow_hash _000260_hash = {
82911 + .next = NULL,
82912 + .name = "kfifo_copy_to_user",
82913 + .file = "kernel/kfifo.c",
82914 + .param3 = 1,
82915 +};
82916 +
82917 +struct size_overflow_hash _000261_hash = {
82918 + .next = NULL,
82919 + .name = "mousedev_read",
82920 + .file = "drivers/input/mousedev.c",
82921 + .param3 = 1,
82922 +};
82923 +
82924 +struct size_overflow_hash _000262_hash = {
82925 + .next = NULL,
82926 + .name = "mpi_lshift_limbs",
82927 + .file = "lib/mpi/mpi-bit.c",
82928 + .param2 = 1,
82929 +};
82930 +
82931 +struct size_overflow_hash _000263_hash = {
82932 + .next = NULL,
82933 + .name = "neigh_hash_grow",
82934 + .file = "net/core/neighbour.c",
82935 + .param2 = 1,
82936 +};
82937 +
82938 +struct size_overflow_hash _000264_hash = {
82939 + .next = NULL,
82940 + .name = "posix_clock_register",
82941 + .file = "include/linux/posix-clock.h",
82942 + .param2 = 1,
82943 +};
82944 +
82945 +struct size_overflow_hash _000265_hash = {
82946 + .next = NULL,
82947 + .name = "__proc_file_read",
82948 + .file = "fs/proc/generic.c",
82949 + .param3 = 1,
82950 +};
82951 +
82952 +struct size_overflow_hash _000266_hash = {
82953 + .next = NULL,
82954 + .name = "read_profile",
82955 + .file = "kernel/profile.c",
82956 + .param3 = 1,
82957 +};
82958 +
82959 +struct size_overflow_hash _000267_hash = {
82960 + .next = NULL,
82961 + .name = "read_vmcore",
82962 + .file = "fs/proc/vmcore.c",
82963 + .param3 = 1,
82964 +};
82965 +
82966 +struct size_overflow_hash _000268_hash = {
82967 + .next = NULL,
82968 + .name = "redirected_tty_write",
82969 + .file = "drivers/tty/tty_io.c",
82970 + .param3 = 1,
82971 +};
82972 +
82973 +struct size_overflow_hash _000269_hash = {
82974 + .next = NULL,
82975 + .name = "__register_chrdev",
82976 + .file = "include/linux/fs.h",
82977 + .param2 = 1,
82978 + .param3 = 1,
82979 +};
82980 +
82981 +struct size_overflow_hash _000271_hash = {
82982 + .next = NULL,
82983 + .name = "request_key_auth_read",
82984 + .file = "security/keys/request_key_auth.c",
82985 + .param3 = 1,
82986 +};
82987 +
82988 +struct size_overflow_hash _000272_hash = {
82989 + .next = NULL,
82990 + .name = "shash_async_setkey",
82991 + .file = "crypto/shash.c",
82992 + .param3 = 1,
82993 +};
82994 +
82995 +struct size_overflow_hash _000273_hash = {
82996 + .next = NULL,
82997 + .name = "shash_compat_setkey",
82998 + .file = "crypto/shash.c",
82999 + .param3 = 1,
83000 +};
83001 +
83002 +struct size_overflow_hash _000274_hash = {
83003 + .next = NULL,
83004 + .name = "simple_read_from_buffer",
83005 + .file = "include/linux/fs.h",
83006 + .param2 = 1,
83007 + .param5 = 1,
83008 +};
83009 +
83010 +struct size_overflow_hash _000276_hash = {
83011 + .next = NULL,
83012 + .name = "store_ifalias",
83013 + .file = "net/core/net-sysfs.c",
83014 + .param4 = 1,
83015 +};
83016 +
83017 +struct size_overflow_hash _000277_hash = {
83018 + .next = NULL,
83019 + .name = "subbuf_read_actor",
83020 + .file = "kernel/relay.c",
83021 + .param3 = 1,
83022 +};
83023 +
83024 +struct size_overflow_hash _000278_hash = {
83025 + .next = NULL,
83026 + .name = "sys_fgetxattr",
83027 + .file = "include/linux/syscalls.h",
83028 + .param4 = 1,
83029 +};
83030 +
83031 +struct size_overflow_hash _000279_hash = {
83032 + .next = NULL,
83033 + .name = "sys_getxattr",
83034 + .file = "include/linux/syscalls.h",
83035 + .param4 = 1,
83036 +};
83037 +
83038 +struct size_overflow_hash _000280_hash = {
83039 + .next = NULL,
83040 + .name = "sys_kexec_load",
83041 + .file = "include/linux/syscalls.h",
83042 + .param2 = 1,
83043 +};
83044 +
83045 +struct size_overflow_hash _000281_hash = {
83046 + .next = NULL,
83047 + .name = "sys_lgetxattr",
83048 + .file = "include/linux/syscalls.h",
83049 + .param4 = 1,
83050 +};
83051 +
83052 +struct size_overflow_hash _000282_hash = {
83053 + .next = NULL,
83054 + .name = "sys_process_vm_readv",
83055 + .file = "include/linux/syscalls.h",
83056 + .param3 = 1,
83057 + .param5 = 1,
83058 +};
83059 +
83060 +struct size_overflow_hash _000284_hash = {
83061 + .next = NULL,
83062 + .name = "sys_process_vm_writev",
83063 + .file = "include/linux/syscalls.h",
83064 + .param3 = 1,
83065 + .param5 = 1,
83066 +};
83067 +
83068 +struct size_overflow_hash _000286_hash = {
83069 + .next = NULL,
83070 + .name = "sys_sched_getaffinity",
83071 + .file = "include/linux/syscalls.h",
83072 + .param2 = 1,
83073 +};
83074 +
83075 +struct size_overflow_hash _000287_hash = {
83076 + .next = NULL,
83077 + .name = "tomoyo_read_self",
83078 + .file = "security/tomoyo/securityfs_if.c",
83079 + .param3 = 1,
83080 +};
83081 +
83082 +struct size_overflow_hash _000288_hash = {
83083 + .next = NULL,
83084 + .name = "tpm_read",
83085 + .file = "drivers/char/tpm/tpm.c",
83086 + .param3 = 1,
83087 +};
83088 +
83089 +struct size_overflow_hash _000289_hash = {
83090 + .next = NULL,
83091 + .name = "user_read",
83092 + .file = "include/keys/user-type.h",
83093 + .param3 = 1,
83094 +};
83095 +
83096 +struct size_overflow_hash _000290_hash = {
83097 + .next = NULL,
83098 + .name = "vcs_read",
83099 + .file = "drivers/tty/vt/vc_screen.c",
83100 + .param3 = 1,
83101 +};
83102 +
83103 +struct size_overflow_hash _000291_hash = {
83104 + .next = NULL,
83105 + .name = "vfs_readv",
83106 + .file = "include/linux/fs.h",
83107 + .param3 = 1,
83108 +};
83109 +
83110 +struct size_overflow_hash _000292_hash = {
83111 + .next = NULL,
83112 + .name = "vfs_writev",
83113 + .file = "include/linux/fs.h",
83114 + .param3 = 1,
83115 +};
83116 +
83117 +struct size_overflow_hash _000293_hash = {
83118 + .next = NULL,
83119 + .name = "vga_arb_read",
83120 + .file = "drivers/gpu/vga/vgaarb.c",
83121 + .param3 = 1,
83122 +};
83123 +
83124 +struct size_overflow_hash _000294_hash = {
83125 + .next = NULL,
83126 + .name = "xz_dec_lzma2_create",
83127 + .file = "lib/xz/xz_dec_lzma2.c",
83128 + .param2 = 1,
83129 +};
83130 +
83131 +struct size_overflow_hash _000295_hash = {
83132 + .next = NULL,
83133 + .name = "aat2870_reg_read_file",
83134 + .file = "drivers/mfd/aat2870-core.c",
83135 + .param3 = 1,
83136 +};
83137 +
83138 +struct size_overflow_hash _000296_hash = {
83139 + .next = NULL,
83140 + .name = "__alloc_bootmem",
83141 + .file = "include/linux/bootmem.h",
83142 + .param1 = 1,
83143 +};
83144 +
83145 +struct size_overflow_hash _000297_hash = {
83146 + .next = NULL,
83147 + .name = "__alloc_bootmem_low",
83148 + .file = "include/linux/bootmem.h",
83149 + .param1 = 1,
83150 +};
83151 +
83152 +struct size_overflow_hash _000298_hash = {
83153 + .next = NULL,
83154 + .name = "__alloc_bootmem_node_nopanic",
83155 + .file = "include/linux/bootmem.h",
83156 + .param2 = 1,
83157 +};
83158 +
83159 +struct size_overflow_hash _000299_hash = {
83160 + .next = NULL,
83161 + .name = "blk_rq_map_kern",
83162 + .file = "include/linux/blkdev.h",
83163 + .param4 = 1,
83164 +};
83165 +
83166 +struct size_overflow_hash _000300_hash = {
83167 + .next = NULL,
83168 + .name = "cgroup_read_s64",
83169 + .file = "kernel/cgroup.c",
83170 + .param5 = 1,
83171 +};
83172 +
83173 +struct size_overflow_hash _000301_hash = {
83174 + .next = NULL,
83175 + .name = "cgroup_read_u64",
83176 + .file = "kernel/cgroup.c",
83177 + .param5 = 1,
83178 +};
83179 +
83180 +struct size_overflow_hash _000302_hash = {
83181 + .next = NULL,
83182 + .name = "cpuset_common_file_read",
83183 + .file = "kernel/cpuset.c",
83184 + .param5 = 1,
83185 +};
83186 +
83187 +struct size_overflow_hash _000303_hash = {
83188 + .next = NULL,
83189 + .name = "filter_read",
83190 + .file = "lib/dma-debug.c",
83191 + .param3 = 1,
83192 +};
83193 +
83194 +struct size_overflow_hash _000304_hash = {
83195 + .next = NULL,
83196 + .name = "ima_show_htable_value",
83197 + .file = "security/integrity/ima/ima_fs.c",
83198 + .param2 = 1,
83199 +};
83200 +
83201 +struct size_overflow_hash _000305_hash = {
83202 + .next = NULL,
83203 + .name = "kernel_readv",
83204 + .file = "fs/splice.c",
83205 + .param3 = 1,
83206 +};
83207 +
83208 +struct size_overflow_hash _000306_hash = {
83209 + .next = NULL,
83210 + .name = "__kfifo_to_user",
83211 + .file = "include/linux/kfifo.h",
83212 + .param3 = 1,
83213 +};
83214 +
83215 +struct size_overflow_hash _000307_hash = {
83216 + .next = NULL,
83217 + .name = "__kfifo_to_user_r",
83218 + .file = "include/linux/kfifo.h",
83219 + .param3 = 1,
83220 +};
83221 +
83222 +struct size_overflow_hash _000308_hash = {
83223 + .next = NULL,
83224 + .name = "mqueue_read_file",
83225 + .file = "ipc/mqueue.c",
83226 + .param3 = 1,
83227 +};
83228 +
83229 +struct size_overflow_hash _000309_hash = {
83230 + .next = NULL,
83231 + .name = "oom_adjust_read",
83232 + .file = "fs/proc/base.c",
83233 + .param3 = 1,
83234 +};
83235 +
83236 +struct size_overflow_hash _000310_hash = {
83237 + .next = NULL,
83238 + .name = "oom_score_adj_read",
83239 + .file = "fs/proc/base.c",
83240 + .param3 = 1,
83241 +};
83242 +
83243 +struct size_overflow_hash _000311_hash = {
83244 + .next = NULL,
83245 + .name = "pm_qos_power_read",
83246 + .file = "kernel/power/qos.c",
83247 + .param3 = 1,
83248 +};
83249 +
83250 +struct size_overflow_hash _000312_hash = {
83251 + .next = NULL,
83252 + .name = "proc_coredump_filter_read",
83253 + .file = "fs/proc/base.c",
83254 + .param3 = 1,
83255 +};
83256 +
83257 +struct size_overflow_hash _000313_hash = {
83258 + .next = NULL,
83259 + .name = "proc_fdinfo_read",
83260 + .file = "fs/proc/base.c",
83261 + .param3 = 1,
83262 +};
83263 +
83264 +struct size_overflow_hash _000314_hash = {
83265 + .next = NULL,
83266 + .name = "proc_info_read",
83267 + .file = "fs/proc/base.c",
83268 + .param3 = 1,
83269 +};
83270 +
83271 +struct size_overflow_hash _000315_hash = {
83272 + .next = NULL,
83273 + .name = "proc_loginuid_read",
83274 + .file = "fs/proc/base.c",
83275 + .param3 = 1,
83276 +};
83277 +
83278 +struct size_overflow_hash _000316_hash = {
83279 + .next = NULL,
83280 + .name = "proc_pid_attr_read",
83281 + .file = "fs/proc/base.c",
83282 + .param3 = 1,
83283 +};
83284 +
83285 +struct size_overflow_hash _000317_hash = {
83286 + .next = NULL,
83287 + .name = "proc_sessionid_read",
83288 + .file = "fs/proc/base.c",
83289 + .param3 = 1,
83290 +};
83291 +
83292 +struct size_overflow_hash _000318_hash = {
83293 + .next = NULL,
83294 + .name = "pstore_file_read",
83295 + .file = "fs/pstore/inode.c",
83296 + .param3 = 1,
83297 +};
83298 +
83299 +struct size_overflow_hash _000319_hash = {
83300 + .next = NULL,
83301 + .name = "read_enabled_file_bool",
83302 + .file = "kernel/kprobes.c",
83303 + .param3 = 1,
83304 +};
83305 +
83306 +struct size_overflow_hash _000320_hash = {
83307 + .next = NULL,
83308 + .name = "read_file_blob",
83309 + .file = "fs/debugfs/file.c",
83310 + .param3 = 1,
83311 +};
83312 +
83313 +struct size_overflow_hash _000321_hash = {
83314 + .next = NULL,
83315 + .name = "read_file_bool",
83316 + .file = "fs/debugfs/file.c",
83317 + .param3 = 1,
83318 +};
83319 +
83320 +struct size_overflow_hash _000322_hash = {
83321 + .next = NULL,
83322 + .name = "read_from_oldmem",
83323 + .file = "fs/proc/vmcore.c",
83324 + .param2 = 1,
83325 +};
83326 +
83327 +struct size_overflow_hash _000323_hash = {
83328 + .next = NULL,
83329 + .name = "read_oldmem",
83330 + .file = "drivers/char/mem.c",
83331 + .param3 = 1,
83332 +};
83333 +
83334 +struct size_overflow_hash _000324_hash = {
83335 + .next = NULL,
83336 + .name = "res_counter_read",
83337 + .file = "include/linux/res_counter.h",
83338 + .param4 = 1,
83339 +};
83340 +
83341 +struct size_overflow_hash _000325_hash = {
83342 + .next = NULL,
83343 + .name = "sel_read_avc_cache_threshold",
83344 + .file = "security/selinux/selinuxfs.c",
83345 + .param3 = 1,
83346 +};
83347 +
83348 +struct size_overflow_hash _000326_hash = {
83349 + .next = NULL,
83350 + .name = "sel_read_avc_hash_stats",
83351 + .file = "security/selinux/selinuxfs.c",
83352 + .param3 = 1,
83353 +};
83354 +
83355 +struct size_overflow_hash _000327_hash = {
83356 + .next = NULL,
83357 + .name = "sel_read_bool",
83358 + .file = "security/selinux/selinuxfs.c",
83359 + .param3 = 1,
83360 +};
83361 +
83362 +struct size_overflow_hash _000328_hash = {
83363 + .next = NULL,
83364 + .name = "sel_read_checkreqprot",
83365 + .file = "security/selinux/selinuxfs.c",
83366 + .param3 = 1,
83367 +};
83368 +
83369 +struct size_overflow_hash _000329_hash = {
83370 + .next = NULL,
83371 + .name = "sel_read_class",
83372 + .file = "security/selinux/selinuxfs.c",
83373 + .param3 = 1,
83374 +};
83375 +
83376 +struct size_overflow_hash _000330_hash = {
83377 + .next = NULL,
83378 + .name = "sel_read_enforce",
83379 + .file = "security/selinux/selinuxfs.c",
83380 + .param3 = 1,
83381 +};
83382 +
83383 +struct size_overflow_hash _000331_hash = {
83384 + .next = NULL,
83385 + .name = "sel_read_handle_status",
83386 + .file = "security/selinux/selinuxfs.c",
83387 + .param3 = 1,
83388 +};
83389 +
83390 +struct size_overflow_hash _000332_hash = {
83391 + .next = NULL,
83392 + .name = "sel_read_handle_unknown",
83393 + .file = "security/selinux/selinuxfs.c",
83394 + .param3 = 1,
83395 +};
83396 +
83397 +struct size_overflow_hash _000333_hash = {
83398 + .next = NULL,
83399 + .name = "sel_read_initcon",
83400 + .file = "security/selinux/selinuxfs.c",
83401 + .param3 = 1,
83402 +};
83403 +
83404 +struct size_overflow_hash _000334_hash = {
83405 + .next = NULL,
83406 + .name = "sel_read_mls",
83407 + .file = "security/selinux/selinuxfs.c",
83408 + .param3 = 1,
83409 +};
83410 +
83411 +struct size_overflow_hash _000335_hash = {
83412 + .next = NULL,
83413 + .name = "sel_read_perm",
83414 + .file = "security/selinux/selinuxfs.c",
83415 + .param3 = 1,
83416 +};
83417 +
83418 +struct size_overflow_hash _000336_hash = {
83419 + .next = NULL,
83420 + .name = "sel_read_policy",
83421 + .file = "security/selinux/selinuxfs.c",
83422 + .param3 = 1,
83423 +};
83424 +
83425 +struct size_overflow_hash _000337_hash = {
83426 + .next = NULL,
83427 + .name = "sel_read_policycap",
83428 + .file = "security/selinux/selinuxfs.c",
83429 + .param3 = 1,
83430 +};
83431 +
83432 +struct size_overflow_hash _000338_hash = {
83433 + .next = NULL,
83434 + .name = "sel_read_policyvers",
83435 + .file = "security/selinux/selinuxfs.c",
83436 + .param3 = 1,
83437 +};
83438 +
83439 +struct size_overflow_hash _000339_hash = {
83440 + .next = NULL,
83441 + .name = "simple_attr_read",
83442 + .file = "include/linux/fs.h",
83443 + .param3 = 1,
83444 +};
83445 +
83446 +struct size_overflow_hash _000340_hash = {
83447 + .next = NULL,
83448 + .name = "simple_transaction_read",
83449 + .file = "include/linux/fs.h",
83450 + .param3 = 1,
83451 +};
83452 +
83453 +struct size_overflow_hash _000341_hash = {
83454 + .next = NULL,
83455 + .name = "smk_read_ambient",
83456 + .file = "security/smack/smackfs.c",
83457 + .param3 = 1,
83458 +};
83459 +
83460 +struct size_overflow_hash _000342_hash = {
83461 + .next = NULL,
83462 + .name = "smk_read_direct",
83463 + .file = "security/smack/smackfs.c",
83464 + .param3 = 1,
83465 +};
83466 +
83467 +struct size_overflow_hash _000343_hash = {
83468 + .next = NULL,
83469 + .name = "smk_read_doi",
83470 + .file = "security/smack/smackfs.c",
83471 + .param3 = 1,
83472 +};
83473 +
83474 +struct size_overflow_hash _000344_hash = {
83475 + .next = NULL,
83476 + .name = "smk_read_logging",
83477 + .file = "security/smack/smackfs.c",
83478 + .param3 = 1,
83479 +};
83480 +
83481 +struct size_overflow_hash _000345_hash = {
83482 + .next = NULL,
83483 + .name = "smk_read_onlycap",
83484 + .file = "security/smack/smackfs.c",
83485 + .param3 = 1,
83486 +};
83487 +
83488 +struct size_overflow_hash _000346_hash = {
83489 + .next = NULL,
83490 + .name = "snapshot_read",
83491 + .file = "kernel/power/user.c",
83492 + .param3 = 1,
83493 +};
83494 +
83495 +struct size_overflow_hash _000347_hash = {
83496 + .next = NULL,
83497 + .name = "supply_map_read_file",
83498 + .file = "drivers/regulator/core.c",
83499 + .param3 = 1,
83500 +};
83501 +
83502 +struct size_overflow_hash _000348_hash = {
83503 + .next = NULL,
83504 + .name = "sysfs_read_file",
83505 + .file = "fs/sysfs/file.c",
83506 + .param3 = 1,
83507 +};
83508 +
83509 +struct size_overflow_hash _000349_hash = {
83510 + .next = NULL,
83511 + .name = "sys_preadv",
83512 + .file = "include/linux/syscalls.h",
83513 + .param3 = 1,
83514 +};
83515 +
83516 +struct size_overflow_hash _000350_hash = {
83517 + .next = NULL,
83518 + .name = "sys_pwritev",
83519 + .file = "include/linux/syscalls.h",
83520 + .param3 = 1,
83521 +};
83522 +
83523 +struct size_overflow_hash _000351_hash = {
83524 + .next = NULL,
83525 + .name = "sys_readv",
83526 + .file = "include/linux/syscalls.h",
83527 + .param3 = 1,
83528 +};
83529 +
83530 +struct size_overflow_hash _000352_hash = {
83531 + .next = NULL,
83532 + .name = "sys_rt_sigpending",
83533 + .file = "include/linux/syscalls.h",
83534 + .param2 = 1,
83535 +};
83536 +
83537 +struct size_overflow_hash _000353_hash = {
83538 + .next = NULL,
83539 + .name = "sys_writev",
83540 + .file = "include/linux/syscalls.h",
83541 + .param3 = 1,
83542 +};
83543 +
83544 +struct size_overflow_hash _000354_hash = {
83545 + .next = NULL,
83546 + .name = "ima_show_htable_violations",
83547 + .file = "security/integrity/ima/ima_fs.c",
83548 + .param3 = 1,
83549 +};
83550 +
83551 +struct size_overflow_hash _000355_hash = {
83552 + .next = NULL,
83553 + .name = "ima_show_measurements_count",
83554 + .file = "security/integrity/ima/ima_fs.c",
83555 + .param3 = 1,
83556 +};
83557 +
83558 +struct size_overflow_hash _000356_hash = {
83559 + .next = NULL,
83560 + .name = "alloc_cpu_rmap",
83561 + .file = "include/linux/cpu_rmap.h",
83562 + .param1 = 1,
83563 +};
83564 +
83565 +struct size_overflow_hash _000357_hash = {
83566 + .next = NULL,
83567 + .name = "alloc_page_cgroup",
83568 + .file = "mm/page_cgroup.c",
83569 + .param1 = 1,
83570 +};
83571 +
83572 +struct size_overflow_hash _000358_hash = {
83573 + .next = NULL,
83574 + .name = "alloc_sched_domains",
83575 + .file = "include/linux/sched.h",
83576 + .param1 = 1,
83577 +};
83578 +
83579 +struct size_overflow_hash _000359_hash = {
83580 + .next = NULL,
83581 + .name = "compat_rw_copy_check_uvector",
83582 + .file = "include/linux/compat.h",
83583 + .param3 = 1,
83584 +};
83585 +
83586 +struct size_overflow_hash _000360_hash = {
83587 + .next = NULL,
83588 + .name = "compat_sys_kexec_load",
83589 + .file = "include/linux/kexec.h",
83590 + .param2 = 1,
83591 +};
83592 +
83593 +struct size_overflow_hash _000361_hash = {
83594 + .next = NULL,
83595 + .name = "compat_sys_semtimedop",
83596 + .file = "include/linux/compat.h",
83597 + .param3 = 1,
83598 +};
83599 +
83600 +struct size_overflow_hash _000362_hash = {
83601 + .next = NULL,
83602 + .name = "copy_from_user",
83603 + .file = "arch/x86/include/asm/uaccess_64.h",
83604 + .param3 = 1,
83605 +};
83606 +
83607 +struct size_overflow_hash _000363_hash = {
83608 + .next = NULL,
83609 + .name = "__copy_from_user",
83610 + .file = "arch/x86/include/asm/uaccess_64.h",
83611 + .param3 = 1,
83612 +};
83613 +
83614 +struct size_overflow_hash _000364_hash = {
83615 + .next = NULL,
83616 + .name = "__copy_from_user_inatomic",
83617 + .file = "arch/x86/include/asm/uaccess_64.h",
83618 + .param3 = 1,
83619 +};
83620 +
83621 +struct size_overflow_hash _000365_hash = {
83622 + .next = NULL,
83623 + .name = "__copy_from_user_nocache",
83624 + .file = "arch/x86/include/asm/uaccess_64.h",
83625 + .param3 = 1,
83626 +};
83627 +
83628 +struct size_overflow_hash _000366_hash = {
83629 + .next = NULL,
83630 + .name = "__copy_in_user",
83631 + .file = "arch/x86/include/asm/uaccess_64.h",
83632 + .param3 = 1,
83633 +};
83634 +
83635 +struct size_overflow_hash _000367_hash = {
83636 + .next = NULL,
83637 + .name = "copy_in_user",
83638 + .file = "arch/x86/include/asm/uaccess_64.h",
83639 + .param3 = 1,
83640 +};
83641 +
83642 +struct size_overflow_hash _000368_hash = {
83643 + .next = NULL,
83644 + .name = "__copy_to_user",
83645 + .file = "arch/x86/include/asm/uaccess_64.h",
83646 + .param3 = 1,
83647 +};
83648 +
83649 +struct size_overflow_hash _000369_hash = {
83650 + .next = NULL,
83651 + .name = "copy_to_user",
83652 + .file = "arch/x86/include/asm/uaccess_64.h",
83653 + .param3 = 1,
83654 +};
83655 +
83656 +struct size_overflow_hash _000370_hash = {
83657 + .next = NULL,
83658 + .name = "__copy_to_user_inatomic",
83659 + .file = "arch/x86/include/asm/uaccess_64.h",
83660 + .param3 = 1,
83661 +};
83662 +
83663 +struct size_overflow_hash _000371_hash = {
83664 + .next = NULL,
83665 + .name = "kmalloc_node",
83666 + .file = "include/linux/slub_def.h",
83667 + .param1 = 1,
83668 +};
83669 +
83670 +struct size_overflow_hash _000372_hash = {
83671 + .next = NULL,
83672 + .name = "pcpu_alloc_bootmem",
83673 + .file = "arch/x86/kernel/setup_percpu.c",
83674 + .param2 = 1,
83675 +};
83676 +
83677 +struct size_overflow_hash _000373_hash = {
83678 + .next = NULL,
83679 + .name = "sys32_rt_sigpending",
83680 + .file = "arch/x86/include/asm/sys_ia32.h",
83681 + .param2 = 1,
83682 +};
83683 +
83684 +struct size_overflow_hash _000374_hash = {
83685 + .next = NULL,
83686 + .name = "tunables_read",
83687 + .file = "arch/x86/platform/uv/tlb_uv.c",
83688 + .param3 = 1,
83689 +};
83690 +
83691 +struct size_overflow_hash _000375_hash = {
83692 + .next = NULL,
83693 + .name = "compat_do_readv_writev",
83694 + .file = "fs/compat.c",
83695 + .param4 = 1,
83696 +};
83697 +
83698 +struct size_overflow_hash _000376_hash = {
83699 + .next = NULL,
83700 + .name = "compat_keyctl_instantiate_key_iov",
83701 + .file = "security/keys/compat.c",
83702 + .param3 = 1,
83703 +};
83704 +
83705 +struct size_overflow_hash _000377_hash = {
83706 + .next = NULL,
83707 + .name = "compat_process_vm_rw",
83708 + .file = "mm/process_vm_access.c",
83709 + .param3 = 1,
83710 + .param5 = 1,
83711 +};
83712 +
83713 +struct size_overflow_hash _000379_hash = {
83714 + .next = NULL,
83715 + .name = "do_pages_stat",
83716 + .file = "mm/migrate.c",
83717 + .param2 = 1,
83718 +};
83719 +
83720 +struct size_overflow_hash _000380_hash = {
83721 + .next = NULL,
83722 + .name = "kzalloc_node",
83723 + .file = "include/linux/slab.h",
83724 + .param1 = 1,
83725 +};
83726 +
83727 +struct size_overflow_hash _000381_hash = {
83728 + .next = NULL,
83729 + .name = "pcpu_fc_alloc",
83730 + .file = "arch/x86/kernel/setup_percpu.c",
83731 + .param2 = 1,
83732 +};
83733 +
83734 +struct size_overflow_hash _000382_hash = {
83735 + .next = NULL,
83736 + .name = "ptc_proc_write",
83737 + .file = "arch/x86/platform/uv/tlb_uv.c",
83738 + .param3 = 1,
83739 +};
83740 +
83741 +struct size_overflow_hash _000383_hash = {
83742 + .next = NULL,
83743 + .name = "tunables_write",
83744 + .file = "arch/x86/platform/uv/tlb_uv.c",
83745 + .param3 = 1,
83746 +};
83747 +
83748 +struct size_overflow_hash _000384_hash = {
83749 + .next = NULL,
83750 + .name = "__alloc_bootmem_low_node",
83751 + .file = "include/linux/bootmem.h",
83752 + .param2 = 1,
83753 +};
83754 +
83755 +struct size_overflow_hash _000385_hash = {
83756 + .next = NULL,
83757 + .name = "__alloc_bootmem_node",
83758 + .file = "include/linux/bootmem.h",
83759 + .param2 = 1,
83760 +};
83761 +
83762 +struct size_overflow_hash _000386_hash = {
83763 + .next = NULL,
83764 + .name = "compat_readv",
83765 + .file = "fs/compat.c",
83766 + .param3 = 1,
83767 +};
83768 +
83769 +struct size_overflow_hash _000387_hash = {
83770 + .next = NULL,
83771 + .name = "compat_sys_keyctl",
83772 + .file = "include/linux/compat.h",
83773 + .param4 = 1,
83774 +};
83775 +
83776 +struct size_overflow_hash _000388_hash = {
83777 + .next = NULL,
83778 + .name = "compat_sys_process_vm_readv",
83779 + .file = "include/linux/compat.h",
83780 + .param3 = 1,
83781 + .param5 = 1,
83782 +};
83783 +
83784 +struct size_overflow_hash _000390_hash = {
83785 + .next = NULL,
83786 + .name = "compat_sys_process_vm_writev",
83787 + .file = "include/linux/compat.h",
83788 + .param3 = 1,
83789 + .param5 = 1,
83790 +};
83791 +
83792 +struct size_overflow_hash _000392_hash = {
83793 + .next = NULL,
83794 + .name = "compat_writev",
83795 + .file = "fs/compat.c",
83796 + .param3 = 1,
83797 +};
83798 +
83799 +struct size_overflow_hash _000393_hash = {
83800 + .next = NULL,
83801 + .name = "sys_move_pages",
83802 + .file = "include/linux/syscalls.h",
83803 + .param2 = 1,
83804 +};
83805 +
83806 +struct size_overflow_hash _000394_hash = {
83807 + .next = NULL,
83808 + .name = "__alloc_bootmem_node_high",
83809 + .file = "include/linux/bootmem.h",
83810 + .param2 = 1,
83811 +};
83812 +
83813 +struct size_overflow_hash _000395_hash = {
83814 + .next = NULL,
83815 + .name = "compat_sys_move_pages",
83816 + .file = "include/linux/compat.h",
83817 + .param2 = 1,
83818 +};
83819 +
83820 +struct size_overflow_hash _000396_hash = {
83821 + .next = NULL,
83822 + .name = "compat_sys_preadv",
83823 + .file = "include/linux/compat.h",
83824 + .param3 = 1,
83825 +};
83826 +
83827 +struct size_overflow_hash _000397_hash = {
83828 + .next = NULL,
83829 + .name = "compat_sys_pwritev",
83830 + .file = "include/linux/compat.h",
83831 + .param3 = 1,
83832 +};
83833 +
83834 +struct size_overflow_hash _000398_hash = {
83835 + .next = NULL,
83836 + .name = "compat_sys_readv",
83837 + .file = "include/linux/compat.h",
83838 + .param3 = 1,
83839 +};
83840 +
83841 +struct size_overflow_hash _000399_hash = {
83842 + .next = NULL,
83843 + .name = "compat_sys_writev",
83844 + .file = "include/linux/compat.h",
83845 + .param3 = 1,
83846 +};
83847 +
83848 +struct size_overflow_hash _000400_hash = {
83849 + .next = NULL,
83850 + .name = "sparse_early_usemaps_alloc_node",
83851 + .file = "mm/sparse.c",
83852 + .param4 = 1,
83853 +};
83854 +
83855 +struct size_overflow_hash _000401_hash = {
83856 + .next = NULL,
83857 + .name = "__earlyonly_bootmem_alloc",
83858 + .file = "mm/sparse-vmemmap.c",
83859 + .param2 = 1,
83860 +};
83861 +
83862 +struct size_overflow_hash _000402_hash = {
83863 + .next = NULL,
83864 + .name = "sparse_mem_maps_populate_node",
83865 + .file = "include/linux/mm.h",
83866 + .param4 = 1,
83867 +};
83868 +
83869 +struct size_overflow_hash _000403_hash = {
83870 + .next = NULL,
83871 + .name = "vmemmap_alloc_block",
83872 + .file = "include/linux/mm.h",
83873 + .param1 = 1,
83874 +};
83875 +
83876 +struct size_overflow_hash _000404_hash = {
83877 + .next = NULL,
83878 + .name = "sparse_early_mem_maps_alloc_node",
83879 + .file = "mm/sparse.c",
83880 + .param4 = 1,
83881 +};
83882 +
83883 +struct size_overflow_hash _000405_hash = {
83884 + .next = NULL,
83885 + .name = "vmemmap_alloc_block_buf",
83886 + .file = "include/linux/mm.h",
83887 + .param1 = 1,
83888 +};
83889 +
83890 +struct size_overflow_hash _000406_hash = {
83891 + .next = NULL,
83892 + .name = "acpi_battery_write_alarm",
83893 + .file = "drivers/acpi/battery.c",
83894 + .param3 = 1,
83895 +};
83896 +
83897 +struct size_overflow_hash _000407_hash = {
83898 + .next = NULL,
83899 + .name = "acpi_battery_write_alarm",
83900 + .file = "drivers/acpi/sbs.c",
83901 + .param3 = 1,
83902 +};
83903 +
83904 +struct size_overflow_hash _000408_hash = {
83905 + .next = NULL,
83906 + .name = "ad7879_spi_xfer",
83907 + .file = "drivers/input/touchscreen/ad7879-spi.c",
83908 + .param3 = 1,
83909 +};
83910 +
83911 +struct size_overflow_hash _000409_hash = {
83912 + .next = NULL,
83913 + .name = "add_port",
83914 + .file = "drivers/char/virtio_console.c",
83915 + .param2 = 1,
83916 +};
83917 +
83918 +struct size_overflow_hash _000410_hash = {
83919 + .next = NULL,
83920 + .name = "addtgt",
83921 + .file = "drivers/block/aoe/aoecmd.c",
83922 + .param3 = 1,
83923 +};
83924 +
83925 +struct size_overflow_hash _000411_hash = {
83926 + .next = NULL,
83927 + .name = "adu_read",
83928 + .file = "drivers/usb/misc/adutux.c",
83929 + .param3 = 1,
83930 +};
83931 +
83932 +struct size_overflow_hash _000412_hash = {
83933 + .next = NULL,
83934 + .name = "adu_write",
83935 + .file = "drivers/usb/misc/adutux.c",
83936 + .param3 = 1,
83937 +};
83938 +
83939 +struct size_overflow_hash _000413_hash = {
83940 + .next = NULL,
83941 + .name = "aer_inject_write",
83942 + .file = "drivers/pci/pcie/aer/aer_inject.c",
83943 + .param3 = 1,
83944 +};
83945 +
83946 +struct size_overflow_hash _000414_hash = {
83947 + .next = NULL,
83948 + .name = "aes_decrypt_fail_read",
83949 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83950 + .param3 = 1,
83951 +};
83952 +
83953 +struct size_overflow_hash _000415_hash = {
83954 + .next = NULL,
83955 + .name = "aes_decrypt_interrupt_read",
83956 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83957 + .param3 = 1,
83958 +};
83959 +
83960 +struct size_overflow_hash _000416_hash = {
83961 + .next = NULL,
83962 + .name = "aes_decrypt_packets_read",
83963 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83964 + .param3 = 1,
83965 +};
83966 +
83967 +struct size_overflow_hash _000417_hash = {
83968 + .next = NULL,
83969 + .name = "aes_encrypt_fail_read",
83970 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83971 + .param3 = 1,
83972 +};
83973 +
83974 +struct size_overflow_hash _000418_hash = {
83975 + .next = NULL,
83976 + .name = "aes_encrypt_interrupt_read",
83977 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83978 + .param3 = 1,
83979 +};
83980 +
83981 +struct size_overflow_hash _000419_hash = {
83982 + .next = NULL,
83983 + .name = "aes_encrypt_packets_read",
83984 + .file = "drivers/net/wireless/wl1251/debugfs.c",
83985 + .param3 = 1,
83986 +};
83987 +
83988 +struct size_overflow_hash _000420_hash = {
83989 + .next = NULL,
83990 + .name = "afs_alloc_flat_call",
83991 + .file = "fs/afs/rxrpc.c",
83992 + .param2 = 1,
83993 + .param3 = 1,
83994 +};
83995 +
83996 +struct size_overflow_hash _000422_hash = {
83997 + .next = NULL,
83998 + .name = "afs_cell_alloc",
83999 + .file = "fs/afs/cell.c",
84000 + .param2 = 1,
84001 +};
84002 +
84003 +struct size_overflow_hash _000423_hash = {
84004 + .next = NULL,
84005 + .name = "afs_proc_cells_write",
84006 + .file = "fs/afs/proc.c",
84007 + .param3 = 1,
84008 +};
84009 +
84010 +struct size_overflow_hash _000424_hash = {
84011 + .next = NULL,
84012 + .name = "afs_proc_rootcell_write",
84013 + .file = "fs/afs/proc.c",
84014 + .param3 = 1,
84015 +};
84016 +
84017 +struct size_overflow_hash _000425_hash = {
84018 + .next = NULL,
84019 + .name = "aggr_recv_addba_req_evt",
84020 + .file = "drivers/net/wireless/ath/ath6kl/txrx.c",
84021 + .param4 = 1,
84022 +};
84023 +
84024 +struct size_overflow_hash _000426_hash = {
84025 + .next = NULL,
84026 + .name = "agp_3_5_isochronous_node_enable",
84027 + .file = "drivers/char/agp/isoch.c",
84028 + .param3 = 1,
84029 +};
84030 +
84031 +struct size_overflow_hash _000427_hash = {
84032 + .next = NULL,
84033 + .name = "agp_alloc_page_array",
84034 + .file = "drivers/char/agp/generic.c",
84035 + .param1 = 1,
84036 +};
84037 +
84038 +struct size_overflow_hash _000428_hash = {
84039 + .next = NULL,
84040 + .name = "alg_setkey",
84041 + .file = "crypto/af_alg.c",
84042 + .param3 = 1,
84043 +};
84044 +
84045 +struct size_overflow_hash _000429_hash = {
84046 + .next = NULL,
84047 + .name = "alloc_buf",
84048 + .file = "drivers/char/virtio_console.c",
84049 + .param1 = 1,
84050 +};
84051 +
84052 +struct size_overflow_hash _000430_hash = {
84053 + .next = NULL,
84054 + .name = "alloc_context",
84055 + .file = "drivers/md/dm-raid1.c",
84056 + .param1 = 1,
84057 +};
84058 +
84059 +struct size_overflow_hash _000431_hash = {
84060 + .next = NULL,
84061 + .name = "alloc_context",
84062 + .file = "drivers/md/dm-stripe.c",
84063 + .param1 = 1,
84064 +};
84065 +
84066 +struct size_overflow_hash _000432_hash = {
84067 + .next = NULL,
84068 + .name = "__alloc_dev_table",
84069 + .file = "fs/exofs/super.c",
84070 + .param2 = 1,
84071 +};
84072 +
84073 +struct size_overflow_hash _000433_hash = {
84074 + .next = NULL,
84075 + .name = "alloc_ep_req",
84076 + .file = "drivers/usb/gadget/f_midi.c",
84077 + .param2 = 1,
84078 +};
84079 +
84080 +struct size_overflow_hash _000434_hash = {
84081 + .next = NULL,
84082 + .name = "alloc_flex_gd",
84083 + .file = "fs/ext4/resize.c",
84084 + .param1 = 1,
84085 +};
84086 +
84087 +struct size_overflow_hash _000435_hash = {
84088 + .next = NULL,
84089 + .name = "__alloc_objio_seg",
84090 + .file = "fs/nfs/objlayout/objio_osd.c",
84091 + .param1 = 1,
84092 +};
84093 +
84094 +struct size_overflow_hash _000436_hash = {
84095 + .next = NULL,
84096 + .name = "alloc_one_pg_vec_page",
84097 + .file = "net/packet/af_packet.c",
84098 + .param1 = 1,
84099 +};
84100 +
84101 +struct size_overflow_hash _000437_hash = {
84102 + .next = NULL,
84103 + .name = "alloc_ring",
84104 + .file = "drivers/net/ethernet/chelsio/cxgb3/sge.c",
84105 + .param2 = 1,
84106 + .param4 = 1,
84107 +};
84108 +
84109 +struct size_overflow_hash _000438_hash = {
84110 + .next = NULL,
84111 + .name = "alloc_ring",
84112 + .file = "drivers/net/ethernet/chelsio/cxgb4vf/sge.c",
84113 + .param2 = 1,
84114 + .param4 = 1,
84115 +};
84116 +
84117 +struct size_overflow_hash _000441_hash = {
84118 + .next = NULL,
84119 + .name = "alloc_ts_config",
84120 + .file = "include/linux/textsearch.h",
84121 + .param1 = 1,
84122 +};
84123 +
84124 +struct size_overflow_hash _000442_hash = {
84125 + .next = NULL,
84126 + .name = "altera_drscan",
84127 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
84128 + .param2 = 1,
84129 +};
84130 +
84131 +struct size_overflow_hash _000443_hash = {
84132 + .next = NULL,
84133 + .name = "altera_irscan",
84134 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
84135 + .param2 = 1,
84136 +};
84137 +
84138 +struct size_overflow_hash _000444_hash = {
84139 + .next = &_000066_hash,
84140 + .name = "altera_set_dr_post",
84141 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
84142 + .param2 = 1,
84143 +};
84144 +
84145 +struct size_overflow_hash _000445_hash = {
84146 + .next = NULL,
84147 + .name = "altera_set_dr_pre",
84148 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
84149 + .param2 = 1,
84150 +};
84151 +
84152 +struct size_overflow_hash _000446_hash = {
84153 + .next = NULL,
84154 + .name = "altera_set_ir_post",
84155 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
84156 + .param2 = 1,
84157 +};
84158 +
84159 +struct size_overflow_hash _000447_hash = {
84160 + .next = NULL,
84161 + .name = "altera_set_ir_pre",
84162 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
84163 + .param2 = 1,
84164 +};
84165 +
84166 +struct size_overflow_hash _000448_hash = {
84167 + .next = NULL,
84168 + .name = "altera_swap_dr",
84169 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
84170 + .param2 = 1,
84171 +};
84172 +
84173 +struct size_overflow_hash _000449_hash = {
84174 + .next = NULL,
84175 + .name = "altera_swap_ir",
84176 + .file = "drivers/misc/altera-stapl/altera-jtag.c",
84177 + .param2 = 1,
84178 +};
84179 +
84180 +struct size_overflow_hash _000450_hash = {
84181 + .next = NULL,
84182 + .name = "aoedev_flush",
84183 + .file = "drivers/block/aoe/aoedev.c",
84184 + .param2 = 1,
84185 +};
84186 +
84187 +struct size_overflow_hash _000451_hash = {
84188 + .next = NULL,
84189 + .name = "asd_store_update_bios",
84190 + .file = "drivers/scsi/aic94xx/aic94xx_init.c",
84191 + .param4 = 1,
84192 +};
84193 +
84194 +struct size_overflow_hash _000452_hash = {
84195 + .next = NULL,
84196 + .name = "asix_read_cmd",
84197 + .file = "drivers/net/usb/asix.c",
84198 + .param5 = 1,
84199 +};
84200 +
84201 +struct size_overflow_hash _000453_hash = {
84202 + .next = NULL,
84203 + .name = "asix_write_cmd",
84204 + .file = "drivers/net/usb/asix.c",
84205 + .param5 = 1,
84206 +};
84207 +
84208 +struct size_overflow_hash _000454_hash = {
84209 + .next = NULL,
84210 + .name = "asn1_octets_decode",
84211 + .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
84212 + .param2 = 1,
84213 +};
84214 +
84215 +struct size_overflow_hash _000455_hash = {
84216 + .next = NULL,
84217 + .name = "asn1_oid_decode",
84218 + .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
84219 + .param2 = 1,
84220 +};
84221 +
84222 +struct size_overflow_hash _000456_hash = {
84223 + .next = NULL,
84224 + .name = "asn1_oid_decode",
84225 + .file = "fs/cifs/asn1.c",
84226 + .param2 = 1,
84227 +};
84228 +
84229 +struct size_overflow_hash _000457_hash = {
84230 + .next = NULL,
84231 + .name = "ath6kl_add_bss_if_needed",
84232 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
84233 + .param6 = 1,
84234 +};
84235 +
84236 +struct size_overflow_hash _000458_hash = {
84237 + .next = NULL,
84238 + .name = "ath6kl_debug_roam_tbl_event",
84239 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84240 + .param3 = 1,
84241 +};
84242 +
84243 +struct size_overflow_hash _000459_hash = {
84244 + .next = NULL,
84245 + .name = "ath6kl_disconnect_timeout_read",
84246 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84247 + .param3 = 1,
84248 +};
84249 +
84250 +struct size_overflow_hash _000460_hash = {
84251 + .next = NULL,
84252 + .name = "ath6kl_endpoint_stats_read",
84253 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84254 + .param3 = 1,
84255 +};
84256 +
84257 +struct size_overflow_hash _000461_hash = {
84258 + .next = NULL,
84259 + .name = "ath6kl_fwlog_mask_read",
84260 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84261 + .param3 = 1,
84262 +};
84263 +
84264 +struct size_overflow_hash _000462_hash = {
84265 + .next = NULL,
84266 + .name = "ath6kl_fwlog_read",
84267 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84268 + .param3 = 1,
84269 +};
84270 +
84271 +struct size_overflow_hash _000463_hash = {
84272 + .next = NULL,
84273 + .name = "ath6kl_keepalive_read",
84274 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84275 + .param3 = 1,
84276 +};
84277 +
84278 +struct size_overflow_hash _000464_hash = {
84279 + .next = NULL,
84280 + .name = "ath6kl_lrssi_roam_read",
84281 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84282 + .param3 = 1,
84283 +};
84284 +
84285 +struct size_overflow_hash _000465_hash = {
84286 + .next = NULL,
84287 + .name = "ath6kl_regdump_read",
84288 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84289 + .param3 = 1,
84290 +};
84291 +
84292 +struct size_overflow_hash _000466_hash = {
84293 + .next = NULL,
84294 + .name = "ath6kl_regread_read",
84295 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84296 + .param3 = 1,
84297 +};
84298 +
84299 +struct size_overflow_hash _000467_hash = {
84300 + .next = NULL,
84301 + .name = "ath6kl_regwrite_read",
84302 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84303 + .param3 = 1,
84304 +};
84305 +
84306 +struct size_overflow_hash _000468_hash = {
84307 + .next = NULL,
84308 + .name = "ath6kl_roam_table_read",
84309 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
84310 + .param3 = 1,
84311 +};
84312 +
84313 +struct size_overflow_hash _000469_hash = {
84314 + .next = NULL,
84315 + .name = "ath6kl_send_go_probe_resp",
84316 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
84317 + .param3 = 1,
84318 +};
84319 +
84320 +struct size_overflow_hash _000470_hash = {
84321 + .next = NULL,
84322 + .name = "ath6kl_set_ap_probe_resp_ies",
84323 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
84324 + .param3 = 1,
84325 +};
84326 +
84327 +struct size_overflow_hash _000471_hash = {
84328 + .next = NULL,
84329 + .name = "ath6kl_set_assoc_req_ies",
84330 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
84331 + .param3 = 1,
84332 +};
84333 +
84334 +struct size_overflow_hash _000472_hash = {
84335 + .next = NULL,
84336 + .name = "ath6kl_tm_rx_report_event",
84337 + .file = "drivers/net/wireless/ath/ath6kl/testmode.c",
84338 + .param3 = 1,
84339 +};
84340 +
84341 +struct size_overflow_hash _000473_hash = {
84342 + .next = NULL,
84343 + .name = "ath6kl_wmi_send_action_cmd",
84344 + .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
84345 + .param7 = 1,
84346 +};
84347 +
84348 +struct size_overflow_hash _000474_hash = {
84349 + .next = NULL,
84350 + .name = "ath6kl_wmi_send_mgmt_cmd",
84351 + .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
84352 + .param7 = 1,
84353 +};
84354 +
84355 +struct size_overflow_hash _000475_hash = {
84356 + .next = NULL,
84357 + .name = "ath9k_debugfs_read_buf",
84358 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
84359 + .param3 = 1,
84360 +};
84361 +
84362 +struct size_overflow_hash _000476_hash = {
84363 + .next = NULL,
84364 + .name = "atk_debugfs_ggrp_read",
84365 + .file = "drivers/hwmon/asus_atk0110.c",
84366 + .param3 = 1,
84367 +};
84368 +
84369 +struct size_overflow_hash _000477_hash = {
84370 + .next = NULL,
84371 + .name = "atm_get_addr",
84372 + .file = "net/atm/addr.c",
84373 + .param3 = 1,
84374 +};
84375 +
84376 +struct size_overflow_hash _000478_hash = {
84377 + .next = NULL,
84378 + .name = "attach_hdlc_protocol",
84379 + .file = "include/linux/hdlc.h",
84380 + .param3 = 1,
84381 +};
84382 +
84383 +struct size_overflow_hash _000479_hash = {
84384 + .next = NULL,
84385 + .name = "av7110_vbi_write",
84386 + .file = "drivers/media/dvb/ttpci/av7110_v4l.c",
84387 + .param3 = 1,
84388 +};
84389 +
84390 +struct size_overflow_hash _000480_hash = {
84391 + .next = NULL,
84392 + .name = "ax25_setsockopt",
84393 + .file = "net/ax25/af_ax25.c",
84394 + .param5 = 1,
84395 +};
84396 +
84397 +struct size_overflow_hash _000481_hash = {
84398 + .next = NULL,
84399 + .name = "b43_debugfs_read",
84400 + .file = "drivers/net/wireless/b43/debugfs.c",
84401 + .param3 = 1,
84402 +};
84403 +
84404 +struct size_overflow_hash _000482_hash = {
84405 + .next = NULL,
84406 + .name = "b43_debugfs_write",
84407 + .file = "drivers/net/wireless/b43/debugfs.c",
84408 + .param3 = 1,
84409 +};
84410 +
84411 +struct size_overflow_hash _000483_hash = {
84412 + .next = NULL,
84413 + .name = "b43legacy_debugfs_read",
84414 + .file = "drivers/net/wireless/b43legacy/debugfs.c",
84415 + .param3 = 1,
84416 +};
84417 +
84418 +struct size_overflow_hash _000484_hash = {
84419 + .next = NULL,
84420 + .name = "b43legacy_debugfs_write",
84421 + .file = "drivers/net/wireless/b43legacy/debugfs.c",
84422 + .param3 = 1,
84423 +};
84424 +
84425 +struct size_overflow_hash _000485_hash = {
84426 + .next = NULL,
84427 + .name = "b43_nphy_load_samples",
84428 + .file = "drivers/net/wireless/b43/phy_n.c",
84429 + .param3 = 1,
84430 +};
84431 +
84432 +struct size_overflow_hash _000486_hash = {
84433 + .next = NULL,
84434 + .name = "bch_alloc",
84435 + .file = "lib/bch.c",
84436 + .param1 = 1,
84437 +};
84438 +
84439 +struct size_overflow_hash _000487_hash = {
84440 + .next = NULL,
84441 + .name = "bfad_debugfs_read",
84442 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
84443 + .param3 = 1,
84444 +};
84445 +
84446 +struct size_overflow_hash _000488_hash = {
84447 + .next = NULL,
84448 + .name = "bfad_debugfs_read_regrd",
84449 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
84450 + .param3 = 1,
84451 +};
84452 +
84453 +struct size_overflow_hash _000489_hash = {
84454 + .next = NULL,
84455 + .name = "bfad_debugfs_write_regrd",
84456 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
84457 + .param3 = 1,
84458 +};
84459 +
84460 +struct size_overflow_hash _000490_hash = {
84461 + .next = NULL,
84462 + .name = "bfad_debugfs_write_regwr",
84463 + .file = "drivers/scsi/bfa/bfad_debugfs.c",
84464 + .param3 = 1,
84465 +};
84466 +
84467 +struct size_overflow_hash _000491_hash = {
84468 + .next = NULL,
84469 + .name = "bits_to_user",
84470 + .file = "drivers/input/evdev.c",
84471 + .param3 = 1,
84472 +};
84473 +
84474 +struct size_overflow_hash _000492_hash = {
84475 + .next = NULL,
84476 + .name = "bl_pipe_downcall",
84477 + .file = "fs/nfs/blocklayout/blocklayoutdev.c",
84478 + .param3 = 1,
84479 +};
84480 +
84481 +struct size_overflow_hash _000493_hash = {
84482 + .next = NULL,
84483 + .name = "bm_entry_read",
84484 + .file = "fs/binfmt_misc.c",
84485 + .param3 = 1,
84486 +};
84487 +
84488 +struct size_overflow_hash _000494_hash = {
84489 + .next = NULL,
84490 + .name = "bm_realloc_pages",
84491 + .file = "drivers/block/drbd/drbd_bitmap.c",
84492 + .param2 = 1,
84493 +};
84494 +
84495 +struct size_overflow_hash _000495_hash = {
84496 + .next = NULL,
84497 + .name = "bm_status_read",
84498 + .file = "fs/binfmt_misc.c",
84499 + .param3 = 1,
84500 +};
84501 +
84502 +struct size_overflow_hash _000496_hash = {
84503 + .next = NULL,
84504 + .name = "bnad_debugfs_read",
84505 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
84506 + .param3 = 1,
84507 +};
84508 +
84509 +struct size_overflow_hash _000497_hash = {
84510 + .next = NULL,
84511 + .name = "bnad_debugfs_read_regrd",
84512 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
84513 + .param3 = 1,
84514 +};
84515 +
84516 +struct size_overflow_hash _000498_hash = {
84517 + .next = NULL,
84518 + .name = "bnad_debugfs_write_regrd",
84519 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
84520 + .param3 = 1,
84521 +};
84522 +
84523 +struct size_overflow_hash _000499_hash = {
84524 + .next = NULL,
84525 + .name = "bnad_debugfs_write_regwr",
84526 + .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
84527 + .param3 = 1,
84528 +};
84529 +
84530 +struct size_overflow_hash _000500_hash = {
84531 + .next = NULL,
84532 + .name = "bnx2fc_cmd_mgr_alloc",
84533 + .file = "drivers/scsi/bnx2fc/bnx2fc_io.c",
84534 + .param2 = 1,
84535 + .param3 = 1,
84536 +};
84537 +
84538 +struct size_overflow_hash _000502_hash = {
84539 + .next = NULL,
84540 + .name = "btmrvl_curpsmode_read",
84541 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84542 + .param3 = 1,
84543 +};
84544 +
84545 +struct size_overflow_hash _000503_hash = {
84546 + .next = NULL,
84547 + .name = "btmrvl_gpiogap_read",
84548 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84549 + .param3 = 1,
84550 +};
84551 +
84552 +struct size_overflow_hash _000504_hash = {
84553 + .next = NULL,
84554 + .name = "btmrvl_gpiogap_write",
84555 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84556 + .param3 = 1,
84557 +};
84558 +
84559 +struct size_overflow_hash _000505_hash = {
84560 + .next = NULL,
84561 + .name = "btmrvl_hscfgcmd_read",
84562 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84563 + .param3 = 1,
84564 +};
84565 +
84566 +struct size_overflow_hash _000506_hash = {
84567 + .next = NULL,
84568 + .name = "btmrvl_hscfgcmd_write",
84569 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84570 + .param3 = 1,
84571 +};
84572 +
84573 +struct size_overflow_hash _000507_hash = {
84574 + .next = &_000006_hash,
84575 + .name = "btmrvl_hscmd_read",
84576 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84577 + .param3 = 1,
84578 +};
84579 +
84580 +struct size_overflow_hash _000508_hash = {
84581 + .next = NULL,
84582 + .name = "btmrvl_hscmd_write",
84583 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84584 + .param3 = 1,
84585 +};
84586 +
84587 +struct size_overflow_hash _000509_hash = {
84588 + .next = NULL,
84589 + .name = "btmrvl_hsmode_read",
84590 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84591 + .param3 = 1,
84592 +};
84593 +
84594 +struct size_overflow_hash _000510_hash = {
84595 + .next = NULL,
84596 + .name = "btmrvl_hsmode_write",
84597 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84598 + .param3 = 1,
84599 +};
84600 +
84601 +struct size_overflow_hash _000511_hash = {
84602 + .next = NULL,
84603 + .name = "btmrvl_hsstate_read",
84604 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84605 + .param3 = 1,
84606 +};
84607 +
84608 +struct size_overflow_hash _000512_hash = {
84609 + .next = NULL,
84610 + .name = "btmrvl_pscmd_read",
84611 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84612 + .param3 = 1,
84613 +};
84614 +
84615 +struct size_overflow_hash _000513_hash = {
84616 + .next = NULL,
84617 + .name = "btmrvl_pscmd_write",
84618 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84619 + .param3 = 1,
84620 +};
84621 +
84622 +struct size_overflow_hash _000514_hash = {
84623 + .next = NULL,
84624 + .name = "btmrvl_psmode_read",
84625 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84626 + .param3 = 1,
84627 +};
84628 +
84629 +struct size_overflow_hash _000515_hash = {
84630 + .next = NULL,
84631 + .name = "btmrvl_psmode_write",
84632 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84633 + .param3 = 1,
84634 +};
84635 +
84636 +struct size_overflow_hash _000516_hash = {
84637 + .next = NULL,
84638 + .name = "btmrvl_psstate_read",
84639 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84640 + .param3 = 1,
84641 +};
84642 +
84643 +struct size_overflow_hash _000517_hash = {
84644 + .next = NULL,
84645 + .name = "btmrvl_txdnldready_read",
84646 + .file = "drivers/bluetooth/btmrvl_debugfs.c",
84647 + .param3 = 1,
84648 +};
84649 +
84650 +struct size_overflow_hash _000518_hash = {
84651 + .next = NULL,
84652 + .name = "btrfs_alloc_delayed_item",
84653 + .file = "fs/btrfs/delayed-inode.c",
84654 + .param1 = 1,
84655 +};
84656 +
84657 +struct size_overflow_hash _000519_hash = {
84658 + .next = NULL,
84659 + .name = "btrfs_copy_from_user",
84660 + .file = "fs/btrfs/file.c",
84661 + .param3 = 1,
84662 +};
84663 +
84664 +struct size_overflow_hash _000520_hash = {
84665 + .next = NULL,
84666 + .name = "__btrfs_map_block",
84667 + .file = "fs/btrfs/volumes.c",
84668 + .param3 = 1,
84669 +};
84670 +
84671 +struct size_overflow_hash _000521_hash = {
84672 + .next = NULL,
84673 + .name = "__c4iw_init_resource_fifo",
84674 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
84675 + .param3 = 1,
84676 +};
84677 +
84678 +struct size_overflow_hash _000522_hash = {
84679 + .next = NULL,
84680 + .name = "cache_do_downcall",
84681 + .file = "net/sunrpc/cache.c",
84682 + .param3 = 1,
84683 +};
84684 +
84685 +struct size_overflow_hash _000523_hash = {
84686 + .next = NULL,
84687 + .name = "cachefiles_daemon_write",
84688 + .file = "fs/cachefiles/daemon.c",
84689 + .param3 = 1,
84690 +};
84691 +
84692 +struct size_overflow_hash _000524_hash = {
84693 + .next = NULL,
84694 + .name = "cache_read",
84695 + .file = "net/sunrpc/cache.c",
84696 + .param3 = 1,
84697 +};
84698 +
84699 +struct size_overflow_hash _000525_hash = {
84700 + .next = NULL,
84701 + .name = "ca_extend",
84702 + .file = "drivers/md/persistent-data/dm-space-map-checker.c",
84703 + .param2 = 1,
84704 +};
84705 +
84706 +struct size_overflow_hash _000526_hash = {
84707 + .next = NULL,
84708 + .name = "calc_hmac",
84709 + .file = "security/keys/encrypted-keys/encrypted.c",
84710 + .param3 = 1,
84711 +};
84712 +
84713 +struct size_overflow_hash _000527_hash = {
84714 + .next = NULL,
84715 + .name = "capi_write",
84716 + .file = "drivers/isdn/capi/capi.c",
84717 + .param3 = 1,
84718 +};
84719 +
84720 +struct size_overflow_hash _000528_hash = {
84721 + .next = NULL,
84722 + .name = "carl9170_cmd_buf",
84723 + .file = "drivers/net/wireless/ath/carl9170/cmd.c",
84724 + .param3 = 1,
84725 +};
84726 +
84727 +struct size_overflow_hash _000529_hash = {
84728 + .next = NULL,
84729 + .name = "carl9170_debugfs_read",
84730 + .file = "drivers/net/wireless/ath/carl9170/debug.c",
84731 + .param3 = 1,
84732 +};
84733 +
84734 +struct size_overflow_hash _000530_hash = {
84735 + .next = NULL,
84736 + .name = "carl9170_debugfs_write",
84737 + .file = "drivers/net/wireless/ath/carl9170/debug.c",
84738 + .param3 = 1,
84739 +};
84740 +
84741 +struct size_overflow_hash _000531_hash = {
84742 + .next = NULL,
84743 + .name = "cciss_proc_write",
84744 + .file = "drivers/block/cciss.c",
84745 + .param3 = 1,
84746 +};
84747 +
84748 +struct size_overflow_hash _000532_hash = {
84749 + .next = NULL,
84750 + .name = "ceph_buffer_new",
84751 + .file = "include/linux/ceph/buffer.h",
84752 + .param1 = 1,
84753 +};
84754 +
84755 +struct size_overflow_hash _000533_hash = {
84756 + .next = NULL,
84757 + .name = "ceph_copy_page_vector_to_user",
84758 + .file = "include/linux/ceph/libceph.h",
84759 + .param4 = 1,
84760 +};
84761 +
84762 +struct size_overflow_hash _000534_hash = {
84763 + .next = NULL,
84764 + .name = "ceph_copy_user_to_page_vector",
84765 + .file = "include/linux/ceph/libceph.h",
84766 + .param4 = 1,
84767 +};
84768 +
84769 +struct size_overflow_hash _000535_hash = {
84770 + .next = NULL,
84771 + .name = "ceph_read_dir",
84772 + .file = "fs/ceph/dir.c",
84773 + .param3 = 1,
84774 +};
84775 +
84776 +struct size_overflow_hash _000536_hash = {
84777 + .next = NULL,
84778 + .name = "ceph_setxattr",
84779 + .file = "fs/ceph/xattr.c",
84780 + .param4 = 1,
84781 +};
84782 +
84783 +struct size_overflow_hash _000537_hash = {
84784 + .next = NULL,
84785 + .name = "cfg80211_connect_result",
84786 + .file = "include/net/cfg80211.h",
84787 + .param4 = 1,
84788 + .param6 = 1,
84789 +};
84790 +
84791 +struct size_overflow_hash _000539_hash = {
84792 + .next = NULL,
84793 + .name = "cfg80211_disconnected",
84794 + .file = "include/net/cfg80211.h",
84795 + .param4 = 1,
84796 +};
84797 +
84798 +struct size_overflow_hash _000540_hash = {
84799 + .next = NULL,
84800 + .name = "cfg80211_inform_bss",
84801 + .file = "include/net/cfg80211.h",
84802 + .param8 = 1,
84803 +};
84804 +
84805 +struct size_overflow_hash _000541_hash = {
84806 + .next = NULL,
84807 + .name = "cfg80211_inform_bss_frame",
84808 + .file = "include/net/cfg80211.h",
84809 + .param4 = 1,
84810 +};
84811 +
84812 +struct size_overflow_hash _000542_hash = {
84813 + .next = NULL,
84814 + .name = "cfg80211_roamed_bss",
84815 + .file = "include/net/cfg80211.h",
84816 + .param4 = 1,
84817 + .param6 = 1,
84818 +};
84819 +
84820 +struct size_overflow_hash _000544_hash = {
84821 + .next = NULL,
84822 + .name = "cfi_read_pri",
84823 + .file = "include/linux/mtd/cfi.h",
84824 + .param3 = 1,
84825 +};
84826 +
84827 +struct size_overflow_hash _000545_hash = {
84828 + .next = NULL,
84829 + .name = "channel_type_read",
84830 + .file = "net/mac80211/debugfs.c",
84831 + .param3 = 1,
84832 +};
84833 +
84834 +struct size_overflow_hash _000546_hash = {
84835 + .next = NULL,
84836 + .name = "cifs_idmap_key_instantiate",
84837 + .file = "fs/cifs/cifsacl.c",
84838 + .param3 = 1,
84839 +};
84840 +
84841 +struct size_overflow_hash _000547_hash = {
84842 + .next = NULL,
84843 + .name = "cifs_readdata_alloc",
84844 + .file = "fs/cifs/cifssmb.c",
84845 + .param1 = 1,
84846 +};
84847 +
84848 +struct size_overflow_hash _000548_hash = {
84849 + .next = NULL,
84850 + .name = "cifs_security_flags_proc_write",
84851 + .file = "fs/cifs/cifs_debug.c",
84852 + .param3 = 1,
84853 +};
84854 +
84855 +struct size_overflow_hash _000549_hash = {
84856 + .next = NULL,
84857 + .name = "cifs_setxattr",
84858 + .file = "fs/cifs/xattr.c",
84859 + .param4 = 1,
84860 +};
84861 +
84862 +struct size_overflow_hash _000550_hash = {
84863 + .next = NULL,
84864 + .name = "cifs_spnego_key_instantiate",
84865 + .file = "fs/cifs/cifs_spnego.c",
84866 + .param3 = 1,
84867 +};
84868 +
84869 +struct size_overflow_hash _000551_hash = {
84870 + .next = NULL,
84871 + .name = "cifs_writedata_alloc",
84872 + .file = "fs/cifs/cifssmb.c",
84873 + .param1 = 1,
84874 +};
84875 +
84876 +struct size_overflow_hash _000552_hash = {
84877 + .next = NULL,
84878 + .name = "ci_ll_write",
84879 + .file = "drivers/media/dvb/ttpci/av7110_ca.c",
84880 + .param4 = 1,
84881 +};
84882 +
84883 +struct size_overflow_hash _000553_hash = {
84884 + .next = NULL,
84885 + .name = "clusterip_proc_write",
84886 + .file = "net/ipv4/netfilter/ipt_CLUSTERIP.c",
84887 + .param3 = 1,
84888 +};
84889 +
84890 +struct size_overflow_hash _000554_hash = {
84891 + .next = &_000108_hash,
84892 + .name = "cm4040_write",
84893 + .file = "drivers/char/pcmcia/cm4040_cs.c",
84894 + .param3 = 1,
84895 +};
84896 +
84897 +struct size_overflow_hash _000555_hash = {
84898 + .next = NULL,
84899 + .name = "cm_copy_private_data",
84900 + .file = "drivers/infiniband/core/cm.c",
84901 + .param2 = 1,
84902 +};
84903 +
84904 +struct size_overflow_hash _000556_hash = {
84905 + .next = NULL,
84906 + .name = "cmm_write",
84907 + .file = "drivers/char/pcmcia/cm4000_cs.c",
84908 + .param3 = 1,
84909 +};
84910 +
84911 +struct size_overflow_hash _000557_hash = {
84912 + .next = NULL,
84913 + .name = "cm_write",
84914 + .file = "drivers/acpi/custom_method.c",
84915 + .param3 = 1,
84916 +};
84917 +
84918 +struct size_overflow_hash _000558_hash = {
84919 + .next = NULL,
84920 + .name = "coda_psdev_read",
84921 + .file = "fs/coda/psdev.c",
84922 + .param3 = 1,
84923 +};
84924 +
84925 +struct size_overflow_hash _000559_hash = {
84926 + .next = NULL,
84927 + .name = "coda_psdev_write",
84928 + .file = "fs/coda/psdev.c",
84929 + .param3 = 1,
84930 +};
84931 +
84932 +struct size_overflow_hash _000560_hash = {
84933 + .next = NULL,
84934 + .name = "codec_list_read_file",
84935 + .file = "sound/soc/soc-core.c",
84936 + .param3 = 1,
84937 +};
84938 +
84939 +struct size_overflow_hash _000561_hash = {
84940 + .next = NULL,
84941 + .name = "codec_reg_read_file",
84942 + .file = "sound/soc/soc-core.c",
84943 + .param3 = 1,
84944 +};
84945 +
84946 +struct size_overflow_hash _000562_hash = {
84947 + .next = NULL,
84948 + .name = "command_file_write",
84949 + .file = "drivers/misc/ibmasm/ibmasmfs.c",
84950 + .param3 = 1,
84951 +};
84952 +
84953 +struct size_overflow_hash _000563_hash = {
84954 + .next = NULL,
84955 + .name = "command_write",
84956 + .file = "drivers/uwb/uwb-debug.c",
84957 + .param3 = 1,
84958 +};
84959 +
84960 +struct size_overflow_hash _000564_hash = {
84961 + .next = NULL,
84962 + .name = "concat_writev",
84963 + .file = "drivers/mtd/mtdconcat.c",
84964 + .param3 = 1,
84965 +};
84966 +
84967 +struct size_overflow_hash _000565_hash = {
84968 + .next = NULL,
84969 + .name = "configfs_read_file",
84970 + .file = "fs/configfs/file.c",
84971 + .param3 = 1,
84972 +};
84973 +
84974 +struct size_overflow_hash _000566_hash = {
84975 + .next = NULL,
84976 + .name = "context_alloc",
84977 + .file = "drivers/md/dm-raid.c",
84978 + .param3 = 1,
84979 +};
84980 +
84981 +struct size_overflow_hash _000567_hash = {
84982 + .next = NULL,
84983 + .name = "copy_counters_to_user",
84984 + .file = "net/bridge/netfilter/ebtables.c",
84985 + .param5 = 1,
84986 +};
84987 +
84988 +struct size_overflow_hash _000568_hash = {
84989 + .next = NULL,
84990 + .name = "copy_entries_to_user",
84991 + .file = "net/ipv6/netfilter/ip6_tables.c",
84992 + .param1 = 1,
84993 +};
84994 +
84995 +struct size_overflow_hash _000569_hash = {
84996 + .next = NULL,
84997 + .name = "copy_entries_to_user",
84998 + .file = "net/ipv4/netfilter/arp_tables.c",
84999 + .param1 = 1,
85000 +};
85001 +
85002 +struct size_overflow_hash _000570_hash = {
85003 + .next = NULL,
85004 + .name = "copy_entries_to_user",
85005 + .file = "net/ipv4/netfilter/ip_tables.c",
85006 + .param1 = 1,
85007 +};
85008 +
85009 +struct size_overflow_hash _000571_hash = {
85010 + .next = NULL,
85011 + .name = "copy_from_user_toio",
85012 + .file = "include/sound/core.h",
85013 + .param3 = 1,
85014 +};
85015 +
85016 +struct size_overflow_hash _000572_hash = {
85017 + .next = NULL,
85018 + .name = "copy_macs",
85019 + .file = "net/atm/mpc.c",
85020 + .param4 = 1,
85021 +};
85022 +
85023 +struct size_overflow_hash _000573_hash = {
85024 + .next = NULL,
85025 + .name = "copy_to_user_fromio",
85026 + .file = "include/sound/core.h",
85027 + .param3 = 1,
85028 +};
85029 +
85030 +struct size_overflow_hash _000574_hash = {
85031 + .next = NULL,
85032 + .name = "cosa_write",
85033 + .file = "drivers/net/wan/cosa.c",
85034 + .param3 = 1,
85035 +};
85036 +
85037 +struct size_overflow_hash _000575_hash = {
85038 + .next = NULL,
85039 + .name = "create_attr_set",
85040 + .file = "drivers/platform/x86/thinkpad_acpi.c",
85041 + .param1 = 1,
85042 +};
85043 +
85044 +struct size_overflow_hash _000576_hash = {
85045 + .next = NULL,
85046 + .name = "create_entry",
85047 + .file = "fs/binfmt_misc.c",
85048 + .param2 = 1,
85049 +};
85050 +
85051 +struct size_overflow_hash _000577_hash = {
85052 + .next = NULL,
85053 + .name = "create_gpadl_header",
85054 + .file = "drivers/hv/channel.c",
85055 + .param2 = 1,
85056 +};
85057 +
85058 +struct size_overflow_hash _000578_hash = {
85059 + .next = NULL,
85060 + .name = "create_queues",
85061 + .file = "drivers/atm/ambassador.c",
85062 + .param2 = 1,
85063 + .param3 = 1,
85064 +};
85065 +
85066 +struct size_overflow_hash _000580_hash = {
85067 + .next = NULL,
85068 + .name = "_create_sg_bios",
85069 + .file = "drivers/scsi/osd/osd_initiator.c",
85070 + .param4 = 1,
85071 +};
85072 +
85073 +struct size_overflow_hash _000581_hash = {
85074 + .next = NULL,
85075 + .name = "cryptd_alloc_instance",
85076 + .file = "crypto/cryptd.c",
85077 + .param2 = 1,
85078 + .param3 = 1,
85079 +};
85080 +
85081 +struct size_overflow_hash _000583_hash = {
85082 + .next = NULL,
85083 + .name = "cryptd_hash_setkey",
85084 + .file = "crypto/cryptd.c",
85085 + .param3 = 1,
85086 +};
85087 +
85088 +struct size_overflow_hash _000584_hash = {
85089 + .next = NULL,
85090 + .name = "crypto_authenc_esn_setkey",
85091 + .file = "crypto/authencesn.c",
85092 + .param3 = 1,
85093 +};
85094 +
85095 +struct size_overflow_hash _000585_hash = {
85096 + .next = NULL,
85097 + .name = "crypto_authenc_setkey",
85098 + .file = "crypto/authenc.c",
85099 + .param3 = 1,
85100 +};
85101 +
85102 +struct size_overflow_hash _000586_hash = {
85103 + .next = NULL,
85104 + .name = "ctrl_out",
85105 + .file = "drivers/usb/misc/usbtest.c",
85106 + .param3 = 1,
85107 + .param5 = 1,
85108 +};
85109 +
85110 +struct size_overflow_hash _000588_hash = {
85111 + .next = NULL,
85112 + .name = "cx18_copy_buf_to_user",
85113 + .file = "drivers/media/video/cx18/cx18-fileops.c",
85114 + .param4 = 1,
85115 +};
85116 +
85117 +struct size_overflow_hash _000589_hash = {
85118 + .next = NULL,
85119 + .name = "cx24116_writeregN",
85120 + .file = "drivers/media/dvb/frontends/cx24116.c",
85121 + .param4 = 1,
85122 +};
85123 +
85124 +struct size_overflow_hash _000590_hash = {
85125 + .next = NULL,
85126 + .name = "cxgb_alloc_mem",
85127 + .file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
85128 + .param1 = 1,
85129 +};
85130 +
85131 +struct size_overflow_hash _000591_hash = {
85132 + .next = NULL,
85133 + .name = "cxgbi_alloc_big_mem",
85134 + .file = "drivers/scsi/cxgbi/libcxgbi.h",
85135 + .param1 = 1,
85136 +};
85137 +
85138 +struct size_overflow_hash _000592_hash = {
85139 + .next = NULL,
85140 + .name = "cxgbi_device_register",
85141 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
85142 + .param1 = 1,
85143 + .param2 = 1,
85144 +};
85145 +
85146 +struct size_overflow_hash _000594_hash = {
85147 + .next = NULL,
85148 + .name = "__cxio_init_resource_fifo",
85149 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
85150 + .param3 = 1,
85151 +};
85152 +
85153 +struct size_overflow_hash _000595_hash = {
85154 + .next = NULL,
85155 + .name = "dac960_user_command_proc_write",
85156 + .file = "drivers/block/DAC960.c",
85157 + .param3 = 1,
85158 +};
85159 +
85160 +struct size_overflow_hash _000596_hash = {
85161 + .next = NULL,
85162 + .name = "dai_list_read_file",
85163 + .file = "sound/soc/soc-core.c",
85164 + .param3 = 1,
85165 +};
85166 +
85167 +struct size_overflow_hash _000597_hash = {
85168 + .next = NULL,
85169 + .name = "dapm_bias_read_file",
85170 + .file = "sound/soc/soc-dapm.c",
85171 + .param3 = 1,
85172 +};
85173 +
85174 +struct size_overflow_hash _000598_hash = {
85175 + .next = NULL,
85176 + .name = "dapm_widget_power_read_file",
85177 + .file = "sound/soc/soc-dapm.c",
85178 + .param3 = 1,
85179 +};
85180 +
85181 +struct size_overflow_hash _000599_hash = {
85182 + .next = NULL,
85183 + .name = "datablob_format",
85184 + .file = "security/keys/encrypted-keys/encrypted.c",
85185 + .param2 = 1,
85186 +};
85187 +
85188 +struct size_overflow_hash _000600_hash = {
85189 + .next = NULL,
85190 + .name = "dbgfs_frame",
85191 + .file = "drivers/net/caif/caif_spi.c",
85192 + .param3 = 1,
85193 +};
85194 +
85195 +struct size_overflow_hash _000601_hash = {
85196 + .next = NULL,
85197 + .name = "dbgfs_state",
85198 + .file = "drivers/net/caif/caif_spi.c",
85199 + .param3 = 1,
85200 +};
85201 +
85202 +struct size_overflow_hash _000602_hash = {
85203 + .next = NULL,
85204 + .name = "dccp_feat_clone_sp_val",
85205 + .file = "net/dccp/feat.c",
85206 + .param3 = 1,
85207 +};
85208 +
85209 +struct size_overflow_hash _000603_hash = {
85210 + .next = NULL,
85211 + .name = "dccp_setsockopt_ccid",
85212 + .file = "net/dccp/proto.c",
85213 + .param4 = 1,
85214 +};
85215 +
85216 +struct size_overflow_hash _000604_hash = {
85217 + .next = NULL,
85218 + .name = "dccp_setsockopt_service",
85219 + .file = "net/dccp/proto.c",
85220 + .param4 = 1,
85221 +};
85222 +
85223 +struct size_overflow_hash _000605_hash = {
85224 + .next = NULL,
85225 + .name = "ddb_input_read",
85226 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
85227 + .param3 = 1,
85228 +};
85229 +
85230 +struct size_overflow_hash _000606_hash = {
85231 + .next = NULL,
85232 + .name = "ddb_output_write",
85233 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
85234 + .param3 = 1,
85235 +};
85236 +
85237 +struct size_overflow_hash _000607_hash = {
85238 + .next = NULL,
85239 + .name = "ddp_make_gl",
85240 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
85241 + .param1 = 1,
85242 +};
85243 +
85244 +struct size_overflow_hash _000608_hash = {
85245 + .next = NULL,
85246 + .name = "debugfs_read",
85247 + .file = "drivers/infiniband/hw/cxgb4/device.c",
85248 + .param3 = 1,
85249 +};
85250 +
85251 +struct size_overflow_hash _000609_hash = {
85252 + .next = NULL,
85253 + .name = "debugfs_read",
85254 + .file = "drivers/char/virtio_console.c",
85255 + .param3 = 1,
85256 +};
85257 +
85258 +struct size_overflow_hash _000610_hash = {
85259 + .next = NULL,
85260 + .name = "debug_output",
85261 + .file = "drivers/usb/host/ohci-dbg.c",
85262 + .param3 = 1,
85263 +};
85264 +
85265 +struct size_overflow_hash _000611_hash = {
85266 + .next = NULL,
85267 + .name = "debug_output",
85268 + .file = "drivers/usb/host/ehci-dbg.c",
85269 + .param3 = 1,
85270 +};
85271 +
85272 +struct size_overflow_hash _000612_hash = {
85273 + .next = NULL,
85274 + .name = "debug_read",
85275 + .file = "fs/ocfs2/dlm/dlmdebug.c",
85276 + .param3 = 1,
85277 +};
85278 +
85279 +struct size_overflow_hash _000613_hash = {
85280 + .next = NULL,
85281 + .name = "dev_config",
85282 + .file = "drivers/usb/gadget/inode.c",
85283 + .param3 = 1,
85284 +};
85285 +
85286 +struct size_overflow_hash _000614_hash = {
85287 + .next = NULL,
85288 + .name = "device_write",
85289 + .file = "fs/dlm/user.c",
85290 + .param3 = 1,
85291 +};
85292 +
85293 +struct size_overflow_hash _000615_hash = {
85294 + .next = NULL,
85295 + .name = "dev_read",
85296 + .file = "drivers/media/video/gspca/gspca.c",
85297 + .param3 = 1,
85298 +};
85299 +
85300 +struct size_overflow_hash _000616_hash = {
85301 + .next = NULL,
85302 + .name = "dfs_file_read",
85303 + .file = "drivers/mtd/ubi/debug.c",
85304 + .param3 = 1,
85305 +};
85306 +
85307 +struct size_overflow_hash _000617_hash = {
85308 + .next = NULL,
85309 + .name = "dfs_file_write",
85310 + .file = "drivers/mtd/ubi/debug.c",
85311 + .param3 = 1,
85312 +};
85313 +
85314 +struct size_overflow_hash _000618_hash = {
85315 + .next = NULL,
85316 + .name = "direct_entry",
85317 + .file = "drivers/misc/lkdtm.c",
85318 + .param3 = 1,
85319 +};
85320 +
85321 +struct size_overflow_hash _000619_hash = {
85322 + .next = NULL,
85323 + .name = "dispatch_proc_write",
85324 + .file = "drivers/platform/x86/thinkpad_acpi.c",
85325 + .param3 = 1,
85326 +};
85327 +
85328 +struct size_overflow_hash _000620_hash = {
85329 + .next = NULL,
85330 + .name = "diva_os_malloc",
85331 + .file = "drivers/isdn/hardware/eicon/platform.h",
85332 + .param2 = 1,
85333 +};
85334 +
85335 +struct size_overflow_hash _000621_hash = {
85336 + .next = NULL,
85337 + .name = "dlmfs_file_read",
85338 + .file = "fs/ocfs2/dlmfs/dlmfs.c",
85339 + .param3 = 1,
85340 +};
85341 +
85342 +struct size_overflow_hash _000622_hash = {
85343 + .next = NULL,
85344 + .name = "dlmfs_file_write",
85345 + .file = "fs/ocfs2/dlmfs/dlmfs.c",
85346 + .param3 = 1,
85347 +};
85348 +
85349 +struct size_overflow_hash _000623_hash = {
85350 + .next = NULL,
85351 + .name = "dma_attach",
85352 + .file = "drivers/net/wireless/brcm80211/brcmsmac/dma.c",
85353 + .param6 = 1,
85354 + .param7 = 1,
85355 +};
85356 +
85357 +struct size_overflow_hash _000625_hash = {
85358 + .next = NULL,
85359 + .name = "dma_rx_errors_read",
85360 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85361 + .param3 = 1,
85362 +};
85363 +
85364 +struct size_overflow_hash _000626_hash = {
85365 + .next = NULL,
85366 + .name = "dma_rx_requested_read",
85367 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85368 + .param3 = 1,
85369 +};
85370 +
85371 +struct size_overflow_hash _000627_hash = {
85372 + .next = NULL,
85373 + .name = "dma_show_regs",
85374 + .file = "drivers/tty/serial/mfd.c",
85375 + .param3 = 1,
85376 +};
85377 +
85378 +struct size_overflow_hash _000628_hash = {
85379 + .next = NULL,
85380 + .name = "dma_tx_errors_read",
85381 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85382 + .param3 = 1,
85383 +};
85384 +
85385 +struct size_overflow_hash _000629_hash = {
85386 + .next = NULL,
85387 + .name = "dma_tx_requested_read",
85388 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85389 + .param3 = 1,
85390 +};
85391 +
85392 +struct size_overflow_hash _000630_hash = {
85393 + .next = NULL,
85394 + .name = "dm_read",
85395 + .file = "drivers/net/usb/dm9601.c",
85396 + .param3 = 1,
85397 +};
85398 +
85399 +struct size_overflow_hash _000631_hash = {
85400 + .next = NULL,
85401 + .name = "dm_vcalloc",
85402 + .file = "include/linux/device-mapper.h",
85403 + .param1 = 1,
85404 + .param2 = 1,
85405 +};
85406 +
85407 +struct size_overflow_hash _000633_hash = {
85408 + .next = NULL,
85409 + .name = "dm_write",
85410 + .file = "drivers/net/usb/dm9601.c",
85411 + .param3 = 1,
85412 +};
85413 +
85414 +struct size_overflow_hash _000634_hash = {
85415 + .next = NULL,
85416 + .name = "__dn_setsockopt",
85417 + .file = "net/decnet/af_decnet.c",
85418 + .param5 = 1,
85419 +};
85420 +
85421 +struct size_overflow_hash _000635_hash = {
85422 + .next = NULL,
85423 + .name = "dns_query",
85424 + .file = "include/linux/dns_resolver.h",
85425 + .param3 = 1,
85426 +};
85427 +
85428 +struct size_overflow_hash _000636_hash = {
85429 + .next = NULL,
85430 + .name = "dns_resolver_instantiate",
85431 + .file = "net/dns_resolver/dns_key.c",
85432 + .param3 = 1,
85433 +};
85434 +
85435 +struct size_overflow_hash _000637_hash = {
85436 + .next = NULL,
85437 + .name = "dns_resolver_read",
85438 + .file = "net/dns_resolver/dns_key.c",
85439 + .param3 = 1,
85440 +};
85441 +
85442 +struct size_overflow_hash _000638_hash = {
85443 + .next = NULL,
85444 + .name = "do_add_counters",
85445 + .file = "net/ipv6/netfilter/ip6_tables.c",
85446 + .param3 = 1,
85447 +};
85448 +
85449 +struct size_overflow_hash _000639_hash = {
85450 + .next = NULL,
85451 + .name = "do_add_counters",
85452 + .file = "net/ipv4/netfilter/ip_tables.c",
85453 + .param3 = 1,
85454 +};
85455 +
85456 +struct size_overflow_hash _000640_hash = {
85457 + .next = NULL,
85458 + .name = "do_add_counters",
85459 + .file = "net/ipv4/netfilter/arp_tables.c",
85460 + .param3 = 1,
85461 +};
85462 +
85463 +struct size_overflow_hash _000641_hash = {
85464 + .next = NULL,
85465 + .name = "__do_config_autodelink",
85466 + .file = "drivers/usb/storage/realtek_cr.c",
85467 + .param3 = 1,
85468 +};
85469 +
85470 +struct size_overflow_hash _000642_hash = {
85471 + .next = NULL,
85472 + .name = "do_ipv6_setsockopt",
85473 + .file = "net/ipv6/ipv6_sockglue.c",
85474 + .param5 = 1,
85475 +};
85476 +
85477 +struct size_overflow_hash _000643_hash = {
85478 + .next = NULL,
85479 + .name = "do_ip_vs_set_ctl",
85480 + .file = "net/netfilter/ipvs/ip_vs_ctl.c",
85481 + .param4 = 1,
85482 +};
85483 +
85484 +struct size_overflow_hash _000644_hash = {
85485 + .next = NULL,
85486 + .name = "do_register_entry",
85487 + .file = "drivers/misc/lkdtm.c",
85488 + .param4 = 1,
85489 +};
85490 +
85491 +struct size_overflow_hash _000645_hash = {
85492 + .next = NULL,
85493 + .name = "__do_replace",
85494 + .file = "net/ipv6/netfilter/ip6_tables.c",
85495 + .param5 = 1,
85496 +};
85497 +
85498 +struct size_overflow_hash _000646_hash = {
85499 + .next = NULL,
85500 + .name = "__do_replace",
85501 + .file = "net/ipv4/netfilter/ip_tables.c",
85502 + .param5 = 1,
85503 +};
85504 +
85505 +struct size_overflow_hash _000647_hash = {
85506 + .next = NULL,
85507 + .name = "__do_replace",
85508 + .file = "net/ipv4/netfilter/arp_tables.c",
85509 + .param5 = 1,
85510 +};
85511 +
85512 +struct size_overflow_hash _000648_hash = {
85513 + .next = NULL,
85514 + .name = "do_sync",
85515 + .file = "fs/gfs2/quota.c",
85516 + .param1 = 1,
85517 +};
85518 +
85519 +struct size_overflow_hash _000649_hash = {
85520 + .next = NULL,
85521 + .name = "do_update_counters",
85522 + .file = "net/bridge/netfilter/ebtables.c",
85523 + .param4 = 1,
85524 +};
85525 +
85526 +struct size_overflow_hash _000650_hash = {
85527 + .next = NULL,
85528 + .name = "driver_state_read",
85529 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
85530 + .param3 = 1,
85531 +};
85532 +
85533 +struct size_overflow_hash _000651_hash = {
85534 + .next = NULL,
85535 + .name = "dsp_write",
85536 + .file = "sound/oss/msnd_pinnacle.c",
85537 + .param2 = 1,
85538 +};
85539 +
85540 +struct size_overflow_hash _000652_hash = {
85541 + .next = NULL,
85542 + .name = "dvb_aplay",
85543 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
85544 + .param3 = 1,
85545 +};
85546 +
85547 +struct size_overflow_hash _000653_hash = {
85548 + .next = NULL,
85549 + .name = "dvb_ca_en50221_io_write",
85550 + .file = "drivers/media/dvb/dvb-core/dvb_ca_en50221.c",
85551 + .param3 = 1,
85552 +};
85553 +
85554 +struct size_overflow_hash _000654_hash = {
85555 + .next = NULL,
85556 + .name = "dvb_dmxdev_set_buffer_size",
85557 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
85558 + .param2 = 1,
85559 +};
85560 +
85561 +struct size_overflow_hash _000655_hash = {
85562 + .next = NULL,
85563 + .name = "dvbdmx_write",
85564 + .file = "drivers/media/dvb/dvb-core/dvb_demux.c",
85565 + .param3 = 1,
85566 +};
85567 +
85568 +struct size_overflow_hash _000656_hash = {
85569 + .next = NULL,
85570 + .name = "dvb_dvr_set_buffer_size",
85571 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
85572 + .param2 = 1,
85573 +};
85574 +
85575 +struct size_overflow_hash _000657_hash = {
85576 + .next = NULL,
85577 + .name = "dvb_play",
85578 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
85579 + .param3 = 1,
85580 +};
85581 +
85582 +struct size_overflow_hash _000658_hash = {
85583 + .next = NULL,
85584 + .name = "dvb_ringbuffer_pkt_read_user",
85585 + .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
85586 + .param5 = 1,
85587 +};
85588 +
85589 +struct size_overflow_hash _000659_hash = {
85590 + .next = NULL,
85591 + .name = "dvb_ringbuffer_read_user",
85592 + .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
85593 + .param3 = 1,
85594 +};
85595 +
85596 +struct size_overflow_hash _000660_hash = {
85597 + .next = NULL,
85598 + .name = "dw210x_op_rw",
85599 + .file = "drivers/media/dvb/dvb-usb/dw2102.c",
85600 + .param6 = 1,
85601 +};
85602 +
85603 +struct size_overflow_hash _000661_hash = {
85604 + .next = NULL,
85605 + .name = "dwc3_mode_write",
85606 + .file = "drivers/usb/dwc3/debugfs.c",
85607 + .param3 = 1,
85608 +};
85609 +
85610 +struct size_overflow_hash _000662_hash = {
85611 + .next = NULL,
85612 + .name = "econet_sendmsg",
85613 + .file = "net/econet/af_econet.c",
85614 + .param4 = 1,
85615 +};
85616 +
85617 +struct size_overflow_hash _000663_hash = {
85618 + .next = NULL,
85619 + .name = "ecryptfs_copy_filename",
85620 + .file = "fs/ecryptfs/crypto.c",
85621 + .param4 = 1,
85622 +};
85623 +
85624 +struct size_overflow_hash _000664_hash = {
85625 + .next = NULL,
85626 + .name = "ecryptfs_miscdev_write",
85627 + .file = "fs/ecryptfs/miscdev.c",
85628 + .param3 = 1,
85629 +};
85630 +
85631 +struct size_overflow_hash _000665_hash = {
85632 + .next = NULL,
85633 + .name = "ecryptfs_send_miscdev",
85634 + .file = "fs/ecryptfs/miscdev.c",
85635 + .param2 = 1,
85636 +};
85637 +
85638 +struct size_overflow_hash _000666_hash = {
85639 + .next = NULL,
85640 + .name = "edac_device_alloc_ctl_info",
85641 + .file = "drivers/edac/edac_device.c",
85642 + .param1 = 1,
85643 +};
85644 +
85645 +struct size_overflow_hash _000667_hash = {
85646 + .next = NULL,
85647 + .name = "edac_mc_alloc",
85648 + .file = "drivers/edac/edac_mc.c",
85649 + .param1 = 1,
85650 +};
85651 +
85652 +struct size_overflow_hash _000668_hash = {
85653 + .next = NULL,
85654 + .name = "edac_pci_alloc_ctl_info",
85655 + .file = "drivers/edac/edac_pci.c",
85656 + .param1 = 1,
85657 +};
85658 +
85659 +struct size_overflow_hash _000669_hash = {
85660 + .next = NULL,
85661 + .name = "efivar_create_sysfs_entry",
85662 + .file = "drivers/firmware/efivars.c",
85663 + .param2 = 1,
85664 +};
85665 +
85666 +struct size_overflow_hash _000670_hash = {
85667 + .next = NULL,
85668 + .name = "efx_tsoh_heap_alloc",
85669 + .file = "drivers/net/ethernet/sfc/tx.c",
85670 + .param2 = 1,
85671 +};
85672 +
85673 +struct size_overflow_hash _000671_hash = {
85674 + .next = NULL,
85675 + .name = "encrypted_instantiate",
85676 + .file = "security/keys/encrypted-keys/encrypted.c",
85677 + .param3 = 1,
85678 +};
85679 +
85680 +struct size_overflow_hash _000672_hash = {
85681 + .next = NULL,
85682 + .name = "encrypted_update",
85683 + .file = "security/keys/encrypted-keys/encrypted.c",
85684 + .param3 = 1,
85685 +};
85686 +
85687 +struct size_overflow_hash _000673_hash = {
85688 + .next = NULL,
85689 + .name = "ep0_write",
85690 + .file = "drivers/usb/gadget/inode.c",
85691 + .param3 = 1,
85692 +};
85693 +
85694 +struct size_overflow_hash _000674_hash = {
85695 + .next = NULL,
85696 + .name = "ep_read",
85697 + .file = "drivers/usb/gadget/inode.c",
85698 + .param3 = 1,
85699 +};
85700 +
85701 +struct size_overflow_hash _000675_hash = {
85702 + .next = NULL,
85703 + .name = "ep_write",
85704 + .file = "drivers/usb/gadget/inode.c",
85705 + .param3 = 1,
85706 +};
85707 +
85708 +struct size_overflow_hash _000676_hash = {
85709 + .next = NULL,
85710 + .name = "erst_dbg_write",
85711 + .file = "drivers/acpi/apei/erst-dbg.c",
85712 + .param3 = 1,
85713 +};
85714 +
85715 +struct size_overflow_hash _000677_hash = {
85716 + .next = NULL,
85717 + .name = "et61x251_read",
85718 + .file = "drivers/media/video/et61x251/et61x251_core.c",
85719 + .param3 = 1,
85720 +};
85721 +
85722 +struct size_overflow_hash _000678_hash = {
85723 + .next = NULL,
85724 + .name = "event_calibration_read",
85725 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85726 + .param3 = 1,
85727 +};
85728 +
85729 +struct size_overflow_hash _000679_hash = {
85730 + .next = NULL,
85731 + .name = "event_heart_beat_read",
85732 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85733 + .param3 = 1,
85734 +};
85735 +
85736 +struct size_overflow_hash _000680_hash = {
85737 + .next = NULL,
85738 + .name = "event_oom_late_read",
85739 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85740 + .param3 = 1,
85741 +};
85742 +
85743 +struct size_overflow_hash _000681_hash = {
85744 + .next = NULL,
85745 + .name = "event_phy_transmit_error_read",
85746 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85747 + .param3 = 1,
85748 +};
85749 +
85750 +struct size_overflow_hash _000682_hash = {
85751 + .next = NULL,
85752 + .name = "event_rx_mem_empty_read",
85753 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85754 + .param3 = 1,
85755 +};
85756 +
85757 +struct size_overflow_hash _000683_hash = {
85758 + .next = NULL,
85759 + .name = "event_rx_mismatch_read",
85760 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85761 + .param3 = 1,
85762 +};
85763 +
85764 +struct size_overflow_hash _000684_hash = {
85765 + .next = NULL,
85766 + .name = "event_rx_pool_read",
85767 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85768 + .param3 = 1,
85769 +};
85770 +
85771 +struct size_overflow_hash _000685_hash = {
85772 + .next = NULL,
85773 + .name = "event_tx_stuck_read",
85774 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85775 + .param3 = 1,
85776 +};
85777 +
85778 +struct size_overflow_hash _000686_hash = {
85779 + .next = NULL,
85780 + .name = "excessive_retries_read",
85781 + .file = "drivers/net/wireless/wl1251/debugfs.c",
85782 + .param3 = 1,
85783 +};
85784 +
85785 +struct size_overflow_hash _000687_hash = {
85786 + .next = NULL,
85787 + .name = "exofs_read_lookup_dev_table",
85788 + .file = "fs/exofs/super.c",
85789 + .param3 = 1,
85790 +};
85791 +
85792 +struct size_overflow_hash _000688_hash = {
85793 + .next = NULL,
85794 + .name = "ext4_kvmalloc",
85795 + .file = "fs/ext4/super.c",
85796 + .param1 = 1,
85797 +};
85798 +
85799 +struct size_overflow_hash _000689_hash = {
85800 + .next = NULL,
85801 + .name = "ext4_kvzalloc",
85802 + .file = "fs/ext4/super.c",
85803 + .param1 = 1,
85804 +};
85805 +
85806 +struct size_overflow_hash _000690_hash = {
85807 + .next = NULL,
85808 + .name = "extend_netdev_table",
85809 + .file = "net/core/netprio_cgroup.c",
85810 + .param2 = 1,
85811 +};
85812 +
85813 +struct size_overflow_hash _000691_hash = {
85814 + .next = NULL,
85815 + .name = "fd_copyin",
85816 + .file = "drivers/block/floppy.c",
85817 + .param3 = 1,
85818 +};
85819 +
85820 +struct size_overflow_hash _000692_hash = {
85821 + .next = NULL,
85822 + .name = "fd_copyout",
85823 + .file = "drivers/block/floppy.c",
85824 + .param3 = 1,
85825 +};
85826 +
85827 +struct size_overflow_hash _000693_hash = {
85828 + .next = NULL,
85829 + .name = "__ffs_ep0_read_events",
85830 + .file = "drivers/usb/gadget/f_fs.c",
85831 + .param3 = 1,
85832 +};
85833 +
85834 +struct size_overflow_hash _000694_hash = {
85835 + .next = NULL,
85836 + .name = "ffs_epfile_io",
85837 + .file = "drivers/usb/gadget/f_fs.c",
85838 + .param3 = 1,
85839 +};
85840 +
85841 +struct size_overflow_hash _000695_hash = {
85842 + .next = NULL,
85843 + .name = "ffs_prepare_buffer",
85844 + .file = "drivers/usb/gadget/f_fs.c",
85845 + .param2 = 1,
85846 +};
85847 +
85848 +struct size_overflow_hash _000696_hash = {
85849 + .next = NULL,
85850 + .name = "f_hidg_read",
85851 + .file = "drivers/usb/gadget/f_hid.c",
85852 + .param3 = 1,
85853 +};
85854 +
85855 +struct size_overflow_hash _000697_hash = {
85856 + .next = NULL,
85857 + .name = "f_hidg_write",
85858 + .file = "drivers/usb/gadget/f_hid.c",
85859 + .param3 = 1,
85860 +};
85861 +
85862 +struct size_overflow_hash _000698_hash = {
85863 + .next = NULL,
85864 + .name = "fill_write_buffer",
85865 + .file = "fs/configfs/file.c",
85866 + .param3 = 1,
85867 +};
85868 +
85869 +struct size_overflow_hash _000699_hash = {
85870 + .next = NULL,
85871 + .name = "flexcop_device_kmalloc",
85872 + .file = "drivers/media/dvb/b2c2/flexcop.c",
85873 + .param1 = 1,
85874 +};
85875 +
85876 +struct size_overflow_hash _000700_hash = {
85877 + .next = NULL,
85878 + .name = "fops_read",
85879 + .file = "drivers/media/video/saa7164/saa7164-encoder.c",
85880 + .param3 = 1,
85881 +};
85882 +
85883 +struct size_overflow_hash _000701_hash = {
85884 + .next = NULL,
85885 + .name = "fops_read",
85886 + .file = "drivers/media/video/saa7164/saa7164-vbi.c",
85887 + .param3 = 1,
85888 +};
85889 +
85890 +struct size_overflow_hash _000702_hash = {
85891 + .next = NULL,
85892 + .name = "format_devstat_counter",
85893 + .file = "net/mac80211/debugfs.c",
85894 + .param3 = 1,
85895 +};
85896 +
85897 +struct size_overflow_hash _000703_hash = {
85898 + .next = NULL,
85899 + .name = "fragmentation_threshold_read",
85900 + .file = "net/wireless/debugfs.c",
85901 + .param3 = 1,
85902 +};
85903 +
85904 +struct size_overflow_hash _000704_hash = {
85905 + .next = NULL,
85906 + .name = "frame_alloc",
85907 + .file = "drivers/media/video/gspca/gspca.c",
85908 + .param4 = 1,
85909 +};
85910 +
85911 +struct size_overflow_hash _000705_hash = {
85912 + .next = NULL,
85913 + .name = "ftdi_elan_write",
85914 + .file = "drivers/usb/misc/ftdi-elan.c",
85915 + .param3 = 1,
85916 +};
85917 +
85918 +struct size_overflow_hash _000706_hash = {
85919 + .next = NULL,
85920 + .name = "fuse_conn_limit_read",
85921 + .file = "fs/fuse/control.c",
85922 + .param3 = 1,
85923 +};
85924 +
85925 +struct size_overflow_hash _000707_hash = {
85926 + .next = NULL,
85927 + .name = "fuse_conn_limit_write",
85928 + .file = "fs/fuse/control.c",
85929 + .param3 = 1,
85930 +};
85931 +
85932 +struct size_overflow_hash _000708_hash = {
85933 + .next = &_000531_hash,
85934 + .name = "fuse_conn_waiting_read",
85935 + .file = "fs/fuse/control.c",
85936 + .param3 = 1,
85937 +};
85938 +
85939 +struct size_overflow_hash _000709_hash = {
85940 + .next = NULL,
85941 + .name = "garp_attr_create",
85942 + .file = "net/802/garp.c",
85943 + .param3 = 1,
85944 +};
85945 +
85946 +struct size_overflow_hash _000710_hash = {
85947 + .next = NULL,
85948 + .name = "get_alua_req",
85949 + .file = "drivers/scsi/device_handler/scsi_dh_alua.c",
85950 + .param3 = 1,
85951 +};
85952 +
85953 +struct size_overflow_hash _000711_hash = {
85954 + .next = NULL,
85955 + .name = "get_derived_key",
85956 + .file = "security/keys/encrypted-keys/encrypted.c",
85957 + .param4 = 1,
85958 +};
85959 +
85960 +struct size_overflow_hash _000712_hash = {
85961 + .next = NULL,
85962 + .name = "getdqbuf",
85963 + .file = "fs/quota/quota_tree.c",
85964 + .param1 = 1,
85965 +};
85966 +
85967 +struct size_overflow_hash _000713_hash = {
85968 + .next = NULL,
85969 + .name = "get_fdb_entries",
85970 + .file = "net/bridge/br_ioctl.c",
85971 + .param3 = 1,
85972 +};
85973 +
85974 +struct size_overflow_hash _000714_hash = {
85975 + .next = NULL,
85976 + .name = "get_rdac_req",
85977 + .file = "drivers/scsi/device_handler/scsi_dh_rdac.c",
85978 + .param3 = 1,
85979 +};
85980 +
85981 +struct size_overflow_hash _000715_hash = {
85982 + .next = NULL,
85983 + .name = "get_registers",
85984 + .file = "drivers/net/usb/pegasus.c",
85985 + .param3 = 1,
85986 +};
85987 +
85988 +struct size_overflow_hash _000716_hash = {
85989 + .next = NULL,
85990 + .name = "get_server_iovec",
85991 + .file = "fs/cifs/connect.c",
85992 + .param2 = 1,
85993 +};
85994 +
85995 +struct size_overflow_hash _000717_hash = {
85996 + .next = NULL,
85997 + .name = "get_ucode_user",
85998 + .file = "arch/x86/kernel/microcode_intel.c",
85999 + .param3 = 1,
86000 +};
86001 +
86002 +struct size_overflow_hash _000718_hash = {
86003 + .next = NULL,
86004 + .name = "gfs2_alloc_sort_buffer",
86005 + .file = "fs/gfs2/dir.c",
86006 + .param1 = 1,
86007 +};
86008 +
86009 +struct size_overflow_hash _000719_hash = {
86010 + .next = NULL,
86011 + .name = "gfs2_glock_nq_m",
86012 + .file = "fs/gfs2/glock.c",
86013 + .param1 = 1,
86014 +};
86015 +
86016 +struct size_overflow_hash _000720_hash = {
86017 + .next = NULL,
86018 + .name = "gigaset_initdriver",
86019 + .file = "drivers/isdn/gigaset/common.c",
86020 + .param2 = 1,
86021 +};
86022 +
86023 +struct size_overflow_hash _000721_hash = {
86024 + .next = NULL,
86025 + .name = "gpio_power_read",
86026 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
86027 + .param3 = 1,
86028 +};
86029 +
86030 +struct size_overflow_hash _000722_hash = {
86031 + .next = NULL,
86032 + .name = "gs_alloc_req",
86033 + .file = "drivers/usb/gadget/u_serial.c",
86034 + .param2 = 1,
86035 +};
86036 +
86037 +struct size_overflow_hash _000723_hash = {
86038 + .next = NULL,
86039 + .name = "gs_buf_alloc",
86040 + .file = "drivers/usb/gadget/u_serial.c",
86041 + .param2 = 1,
86042 +};
86043 +
86044 +struct size_overflow_hash _000724_hash = {
86045 + .next = NULL,
86046 + .name = "gss_pipe_downcall",
86047 + .file = "net/sunrpc/auth_gss/auth_gss.c",
86048 + .param3 = 1,
86049 +};
86050 +
86051 +struct size_overflow_hash _000725_hash = {
86052 + .next = NULL,
86053 + .name = "handle_request",
86054 + .file = "drivers/firewire/core-cdev.c",
86055 + .param9 = 1,
86056 +};
86057 +
86058 +struct size_overflow_hash _000726_hash = {
86059 + .next = NULL,
86060 + .name = "hash_new",
86061 + .file = "net/batman-adv/hash.c",
86062 + .param1 = 1,
86063 +};
86064 +
86065 +struct size_overflow_hash _000727_hash = {
86066 + .next = NULL,
86067 + .name = "hash_setkey",
86068 + .file = "crypto/algif_hash.c",
86069 + .param3 = 1,
86070 +};
86071 +
86072 +struct size_overflow_hash _000728_hash = {
86073 + .next = NULL,
86074 + .name = "hcd_buffer_alloc",
86075 + .file = "include/linux/usb/hcd.h",
86076 + .param2 = 1,
86077 +};
86078 +
86079 +struct size_overflow_hash _000729_hash = {
86080 + .next = NULL,
86081 + .name = "hci_sock_setsockopt",
86082 + .file = "net/bluetooth/hci_sock.c",
86083 + .param5 = 1,
86084 +};
86085 +
86086 +struct size_overflow_hash _000730_hash = {
86087 + .next = NULL,
86088 + .name = "hdpvr_read",
86089 + .file = "drivers/media/video/hdpvr/hdpvr-video.c",
86090 + .param3 = 1,
86091 +};
86092 +
86093 +struct size_overflow_hash _000731_hash = {
86094 + .next = NULL,
86095 + .name = "hidraw_get_report",
86096 + .file = "drivers/hid/hidraw.c",
86097 + .param3 = 1,
86098 +};
86099 +
86100 +struct size_overflow_hash _000732_hash = {
86101 + .next = NULL,
86102 + .name = "hidraw_read",
86103 + .file = "drivers/hid/hidraw.c",
86104 + .param3 = 1,
86105 +};
86106 +
86107 +struct size_overflow_hash _000733_hash = {
86108 + .next = NULL,
86109 + .name = "hidraw_send_report",
86110 + .file = "drivers/hid/hidraw.c",
86111 + .param3 = 1,
86112 +};
86113 +
86114 +struct size_overflow_hash _000734_hash = {
86115 + .next = NULL,
86116 + .name = "hid_register_field",
86117 + .file = "drivers/hid/hid-core.c",
86118 + .param2 = 1,
86119 + .param3 = 1,
86120 +};
86121 +
86122 +struct size_overflow_hash _000736_hash = {
86123 + .next = NULL,
86124 + .name = "hpfs_translate_name",
86125 + .file = "fs/hpfs/name.c",
86126 + .param3 = 1,
86127 +};
86128 +
86129 +struct size_overflow_hash _000737_hash = {
86130 + .next = NULL,
86131 + .name = "hpi_alloc_control_cache",
86132 + .file = "sound/pci/asihpi/hpicmn.c",
86133 + .param1 = 1,
86134 +};
86135 +
86136 +struct size_overflow_hash _000738_hash = {
86137 + .next = NULL,
86138 + .name = "ht40allow_map_read",
86139 + .file = "net/wireless/debugfs.c",
86140 + .param3 = 1,
86141 +};
86142 +
86143 +struct size_overflow_hash _000739_hash = {
86144 + .next = NULL,
86145 + .name = "__hwahc_dev_set_key",
86146 + .file = "drivers/usb/host/hwa-hc.c",
86147 + .param5 = 1,
86148 +};
86149 +
86150 +struct size_overflow_hash _000740_hash = {
86151 + .next = NULL,
86152 + .name = "hwflags_read",
86153 + .file = "net/mac80211/debugfs.c",
86154 + .param3 = 1,
86155 +};
86156 +
86157 +struct size_overflow_hash _000741_hash = {
86158 + .next = NULL,
86159 + .name = "hysdn_conf_read",
86160 + .file = "drivers/isdn/hysdn/hysdn_procconf.c",
86161 + .param3 = 1,
86162 +};
86163 +
86164 +struct size_overflow_hash _000742_hash = {
86165 + .next = NULL,
86166 + .name = "hysdn_conf_write",
86167 + .file = "drivers/isdn/hysdn/hysdn_procconf.c",
86168 + .param3 = 1,
86169 +};
86170 +
86171 +struct size_overflow_hash _000743_hash = {
86172 + .next = NULL,
86173 + .name = "hysdn_log_write",
86174 + .file = "drivers/isdn/hysdn/hysdn_proclog.c",
86175 + .param3 = 1,
86176 +};
86177 +
86178 +struct size_overflow_hash _000744_hash = {
86179 + .next = NULL,
86180 + .name = "i2400m_rx_stats_read",
86181 + .file = "drivers/net/wimax/i2400m/debugfs.c",
86182 + .param3 = 1,
86183 +};
86184 +
86185 +struct size_overflow_hash _000745_hash = {
86186 + .next = NULL,
86187 + .name = "i2400m_tx_stats_read",
86188 + .file = "drivers/net/wimax/i2400m/debugfs.c",
86189 + .param3 = 1,
86190 +};
86191 +
86192 +struct size_overflow_hash _000746_hash = {
86193 + .next = NULL,
86194 + .name = "__i2400mu_send_barker",
86195 + .file = "drivers/net/wimax/i2400m/usb.c",
86196 + .param3 = 1,
86197 +};
86198 +
86199 +struct size_overflow_hash _000747_hash = {
86200 + .next = NULL,
86201 + .name = "i2400m_zrealloc_2x",
86202 + .file = "drivers/net/wimax/i2400m/fw.c",
86203 + .param3 = 1,
86204 +};
86205 +
86206 +struct size_overflow_hash _000748_hash = {
86207 + .next = NULL,
86208 + .name = "i2cdev_read",
86209 + .file = "drivers/i2c/i2c-dev.c",
86210 + .param3 = 1,
86211 +};
86212 +
86213 +struct size_overflow_hash _000749_hash = {
86214 + .next = &_000459_hash,
86215 + .name = "i2cdev_write",
86216 + .file = "drivers/i2c/i2c-dev.c",
86217 + .param3 = 1,
86218 +};
86219 +
86220 +struct size_overflow_hash _000750_hash = {
86221 + .next = NULL,
86222 + .name = "ib_alloc_device",
86223 + .file = "include/rdma/ib_verbs.h",
86224 + .param1 = 1,
86225 +};
86226 +
86227 +struct size_overflow_hash _000751_hash = {
86228 + .next = NULL,
86229 + .name = "ib_copy_from_udata",
86230 + .file = "include/rdma/ib_verbs.h",
86231 + .param3 = 1,
86232 +};
86233 +
86234 +struct size_overflow_hash _000752_hash = {
86235 + .next = NULL,
86236 + .name = "ib_copy_to_udata",
86237 + .file = "include/rdma/ib_verbs.h",
86238 + .param3 = 1,
86239 +};
86240 +
86241 +struct size_overflow_hash _000753_hash = {
86242 + .next = NULL,
86243 + .name = "ibmasm_new_command",
86244 + .file = "drivers/misc/ibmasm/command.c",
86245 + .param2 = 1,
86246 +};
86247 +
86248 +struct size_overflow_hash _000754_hash = {
86249 + .next = NULL,
86250 + .name = "ib_ucm_alloc_data",
86251 + .file = "drivers/infiniband/core/ucm.c",
86252 + .param3 = 1,
86253 +};
86254 +
86255 +struct size_overflow_hash _000755_hash = {
86256 + .next = NULL,
86257 + .name = "ib_umad_write",
86258 + .file = "drivers/infiniband/core/user_mad.c",
86259 + .param3 = 1,
86260 +};
86261 +
86262 +struct size_overflow_hash _000756_hash = {
86263 + .next = NULL,
86264 + .name = "ib_uverbs_unmarshall_recv",
86265 + .file = "drivers/infiniband/core/uverbs_cmd.c",
86266 + .param5 = 1,
86267 +};
86268 +
86269 +struct size_overflow_hash _000757_hash = {
86270 + .next = NULL,
86271 + .name = "ide_driver_proc_write",
86272 + .file = "drivers/ide/ide-proc.c",
86273 + .param3 = 1,
86274 +};
86275 +
86276 +struct size_overflow_hash _000758_hash = {
86277 + .next = NULL,
86278 + .name = "ide_queue_pc_tail",
86279 + .file = "include/linux/ide.h",
86280 + .param5 = 1,
86281 +};
86282 +
86283 +struct size_overflow_hash _000759_hash = {
86284 + .next = NULL,
86285 + .name = "ide_raw_taskfile",
86286 + .file = "include/linux/ide.h",
86287 + .param4 = 1,
86288 +};
86289 +
86290 +struct size_overflow_hash _000760_hash = {
86291 + .next = NULL,
86292 + .name = "ide_settings_proc_write",
86293 + .file = "drivers/ide/ide-proc.c",
86294 + .param3 = 1,
86295 +};
86296 +
86297 +struct size_overflow_hash _000761_hash = {
86298 + .next = NULL,
86299 + .name = "idetape_chrdev_read",
86300 + .file = "drivers/ide/ide-tape.c",
86301 + .param3 = 1,
86302 +};
86303 +
86304 +struct size_overflow_hash _000762_hash = {
86305 + .next = NULL,
86306 + .name = "idetape_chrdev_write",
86307 + .file = "drivers/ide/ide-tape.c",
86308 + .param3 = 1,
86309 +};
86310 +
86311 +struct size_overflow_hash _000763_hash = {
86312 + .next = NULL,
86313 + .name = "idmouse_read",
86314 + .file = "drivers/usb/misc/idmouse.c",
86315 + .param3 = 1,
86316 +};
86317 +
86318 +struct size_overflow_hash _000764_hash = {
86319 + .next = NULL,
86320 + .name = "ieee80211_build_probe_req",
86321 + .file = "net/mac80211/util.c",
86322 + .param7 = 1,
86323 +};
86324 +
86325 +struct size_overflow_hash _000765_hash = {
86326 + .next = NULL,
86327 + .name = "ieee80211_if_read",
86328 + .file = "net/mac80211/debugfs_netdev.c",
86329 + .param3 = 1,
86330 +};
86331 +
86332 +struct size_overflow_hash _000766_hash = {
86333 + .next = NULL,
86334 + .name = "ieee80211_if_write",
86335 + .file = "net/mac80211/debugfs_netdev.c",
86336 + .param3 = 1,
86337 +};
86338 +
86339 +struct size_overflow_hash _000767_hash = {
86340 + .next = NULL,
86341 + .name = "ieee80211_key_alloc",
86342 + .file = "net/mac80211/key.c",
86343 + .param3 = 1,
86344 +};
86345 +
86346 +struct size_overflow_hash _000768_hash = {
86347 + .next = NULL,
86348 + .name = "ieee80211_mgmt_tx",
86349 + .file = "net/mac80211/cfg.c",
86350 + .param9 = 1,
86351 +};
86352 +
86353 +struct size_overflow_hash _000769_hash = {
86354 + .next = NULL,
86355 + .name = "ikconfig_read_current",
86356 + .file = "kernel/configs.c",
86357 + .param3 = 1,
86358 +};
86359 +
86360 +struct size_overflow_hash _000770_hash = {
86361 + .next = NULL,
86362 + .name = "il3945_sta_dbgfs_stats_table_read",
86363 + .file = "drivers/net/wireless/iwlegacy/3945-rs.c",
86364 + .param3 = 1,
86365 +};
86366 +
86367 +struct size_overflow_hash _000771_hash = {
86368 + .next = NULL,
86369 + .name = "il3945_ucode_general_stats_read",
86370 + .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
86371 + .param3 = 1,
86372 +};
86373 +
86374 +struct size_overflow_hash _000772_hash = {
86375 + .next = NULL,
86376 + .name = "il3945_ucode_rx_stats_read",
86377 + .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
86378 + .param3 = 1,
86379 +};
86380 +
86381 +struct size_overflow_hash _000773_hash = {
86382 + .next = NULL,
86383 + .name = "il3945_ucode_tx_stats_read",
86384 + .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
86385 + .param3 = 1,
86386 +};
86387 +
86388 +struct size_overflow_hash _000774_hash = {
86389 + .next = NULL,
86390 + .name = "il4965_rs_sta_dbgfs_rate_scale_data_read",
86391 + .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
86392 + .param3 = 1,
86393 +};
86394 +
86395 +struct size_overflow_hash _000775_hash = {
86396 + .next = NULL,
86397 + .name = "il4965_rs_sta_dbgfs_scale_table_read",
86398 + .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
86399 + .param3 = 1,
86400 +};
86401 +
86402 +struct size_overflow_hash _000776_hash = {
86403 + .next = NULL,
86404 + .name = "il4965_rs_sta_dbgfs_stats_table_read",
86405 + .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
86406 + .param3 = 1,
86407 +};
86408 +
86409 +struct size_overflow_hash _000777_hash = {
86410 + .next = NULL,
86411 + .name = "il4965_ucode_general_stats_read",
86412 + .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
86413 + .param3 = 1,
86414 +};
86415 +
86416 +struct size_overflow_hash _000778_hash = {
86417 + .next = NULL,
86418 + .name = "il4965_ucode_rx_stats_read",
86419 + .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
86420 + .param3 = 1,
86421 +};
86422 +
86423 +struct size_overflow_hash _000779_hash = {
86424 + .next = NULL,
86425 + .name = "il4965_ucode_tx_stats_read",
86426 + .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
86427 + .param3 = 1,
86428 +};
86429 +
86430 +struct size_overflow_hash _000780_hash = {
86431 + .next = NULL,
86432 + .name = "il_dbgfs_chain_noise_read",
86433 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86434 + .param3 = 1,
86435 +};
86436 +
86437 +struct size_overflow_hash _000781_hash = {
86438 + .next = NULL,
86439 + .name = "il_dbgfs_channels_read",
86440 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86441 + .param3 = 1,
86442 +};
86443 +
86444 +struct size_overflow_hash _000782_hash = {
86445 + .next = NULL,
86446 + .name = "il_dbgfs_disable_ht40_read",
86447 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86448 + .param3 = 1,
86449 +};
86450 +
86451 +struct size_overflow_hash _000783_hash = {
86452 + .next = NULL,
86453 + .name = "il_dbgfs_fh_reg_read",
86454 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86455 + .param3 = 1,
86456 +};
86457 +
86458 +struct size_overflow_hash _000784_hash = {
86459 + .next = NULL,
86460 + .name = "il_dbgfs_force_reset_read",
86461 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86462 + .param3 = 1,
86463 +};
86464 +
86465 +struct size_overflow_hash _000785_hash = {
86466 + .next = NULL,
86467 + .name = "il_dbgfs_interrupt_read",
86468 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86469 + .param3 = 1,
86470 +};
86471 +
86472 +struct size_overflow_hash _000786_hash = {
86473 + .next = NULL,
86474 + .name = "il_dbgfs_missed_beacon_read",
86475 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86476 + .param3 = 1,
86477 +};
86478 +
86479 +struct size_overflow_hash _000787_hash = {
86480 + .next = NULL,
86481 + .name = "il_dbgfs_nvm_read",
86482 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86483 + .param3 = 1,
86484 +};
86485 +
86486 +struct size_overflow_hash _000788_hash = {
86487 + .next = NULL,
86488 + .name = "il_dbgfs_power_save_status_read",
86489 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86490 + .param3 = 1,
86491 +};
86492 +
86493 +struct size_overflow_hash _000789_hash = {
86494 + .next = NULL,
86495 + .name = "il_dbgfs_qos_read",
86496 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86497 + .param3 = 1,
86498 +};
86499 +
86500 +struct size_overflow_hash _000790_hash = {
86501 + .next = &_000221_hash,
86502 + .name = "il_dbgfs_rxon_filter_flags_read",
86503 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86504 + .param3 = 1,
86505 +};
86506 +
86507 +struct size_overflow_hash _000791_hash = {
86508 + .next = NULL,
86509 + .name = "il_dbgfs_rxon_flags_read",
86510 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86511 + .param3 = 1,
86512 +};
86513 +
86514 +struct size_overflow_hash _000792_hash = {
86515 + .next = NULL,
86516 + .name = "il_dbgfs_rx_queue_read",
86517 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86518 + .param3 = 1,
86519 +};
86520 +
86521 +struct size_overflow_hash _000793_hash = {
86522 + .next = NULL,
86523 + .name = "il_dbgfs_rx_stats_read",
86524 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86525 + .param3 = 1,
86526 +};
86527 +
86528 +struct size_overflow_hash _000794_hash = {
86529 + .next = NULL,
86530 + .name = "il_dbgfs_sensitivity_read",
86531 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86532 + .param3 = 1,
86533 +};
86534 +
86535 +struct size_overflow_hash _000795_hash = {
86536 + .next = NULL,
86537 + .name = "il_dbgfs_sram_read",
86538 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86539 + .param3 = 1,
86540 +};
86541 +
86542 +struct size_overflow_hash _000796_hash = {
86543 + .next = NULL,
86544 + .name = "il_dbgfs_stations_read",
86545 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86546 + .param3 = 1,
86547 +};
86548 +
86549 +struct size_overflow_hash _000797_hash = {
86550 + .next = NULL,
86551 + .name = "il_dbgfs_status_read",
86552 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86553 + .param3 = 1,
86554 +};
86555 +
86556 +struct size_overflow_hash _000798_hash = {
86557 + .next = NULL,
86558 + .name = "il_dbgfs_traffic_log_read",
86559 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86560 + .param3 = 1,
86561 +};
86562 +
86563 +struct size_overflow_hash _000799_hash = {
86564 + .next = NULL,
86565 + .name = "il_dbgfs_tx_queue_read",
86566 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86567 + .param3 = 1,
86568 +};
86569 +
86570 +struct size_overflow_hash _000800_hash = {
86571 + .next = NULL,
86572 + .name = "il_dbgfs_tx_stats_read",
86573 + .file = "drivers/net/wireless/iwlegacy/debug.c",
86574 + .param3 = 1,
86575 +};
86576 +
86577 +struct size_overflow_hash _000801_hash = {
86578 + .next = NULL,
86579 + .name = "ilo_read",
86580 + .file = "drivers/misc/hpilo.c",
86581 + .param3 = 1,
86582 +};
86583 +
86584 +struct size_overflow_hash _000802_hash = {
86585 + .next = NULL,
86586 + .name = "ilo_write",
86587 + .file = "drivers/misc/hpilo.c",
86588 + .param3 = 1,
86589 +};
86590 +
86591 +struct size_overflow_hash _000803_hash = {
86592 + .next = NULL,
86593 + .name = "init_data_container",
86594 + .file = "fs/btrfs/backref.c",
86595 + .param1 = 1,
86596 +};
86597 +
86598 +struct size_overflow_hash _000804_hash = {
86599 + .next = NULL,
86600 + .name = "init_list_set",
86601 + .file = "net/netfilter/ipset/ip_set_list_set.c",
86602 + .param2 = 1,
86603 + .param3 = 1,
86604 +};
86605 +
86606 +struct size_overflow_hash _000806_hash = {
86607 + .next = NULL,
86608 + .name = "interpret_user_input",
86609 + .file = "fs/ubifs/debug.c",
86610 + .param2 = 1,
86611 +};
86612 +
86613 +struct size_overflow_hash _000807_hash = {
86614 + .next = NULL,
86615 + .name = "int_proc_write",
86616 + .file = "drivers/net/wireless/ray_cs.c",
86617 + .param3 = 1,
86618 +};
86619 +
86620 +struct size_overflow_hash _000808_hash = {
86621 + .next = NULL,
86622 + .name = "iowarrior_read",
86623 + .file = "drivers/usb/misc/iowarrior.c",
86624 + .param3 = 1,
86625 +};
86626 +
86627 +struct size_overflow_hash _000809_hash = {
86628 + .next = NULL,
86629 + .name = "iowarrior_write",
86630 + .file = "drivers/usb/misc/iowarrior.c",
86631 + .param3 = 1,
86632 +};
86633 +
86634 +struct size_overflow_hash _000810_hash = {
86635 + .next = NULL,
86636 + .name = "ip_set_alloc",
86637 + .file = "include/linux/netfilter/ipset/ip_set.h",
86638 + .param1 = 1,
86639 +};
86640 +
86641 +struct size_overflow_hash _000811_hash = {
86642 + .next = NULL,
86643 + .name = "ip_vs_conn_fill_param_sync",
86644 + .file = "net/netfilter/ipvs/ip_vs_sync.c",
86645 + .param6 = 1,
86646 +};
86647 +
86648 +struct size_overflow_hash _000812_hash = {
86649 + .next = NULL,
86650 + .name = "irda_setsockopt",
86651 + .file = "net/irda/af_irda.c",
86652 + .param5 = 1,
86653 +};
86654 +
86655 +struct size_overflow_hash _000813_hash = {
86656 + .next = NULL,
86657 + .name = "ir_lirc_transmit_ir",
86658 + .file = "drivers/media/rc/ir-lirc-codec.c",
86659 + .param3 = 1,
86660 +};
86661 +
86662 +struct size_overflow_hash _000814_hash = {
86663 + .next = NULL,
86664 + .name = "irnet_ctrl_write",
86665 + .file = "net/irda/irnet/irnet_ppp.c",
86666 + .param3 = 1,
86667 +};
86668 +
86669 +struct size_overflow_hash _000815_hash = {
86670 + .next = NULL,
86671 + .name = "iscsi_decode_text_input",
86672 + .file = "drivers/target/iscsi/iscsi_target_parameters.c",
86673 + .param4 = 1,
86674 +};
86675 +
86676 +struct size_overflow_hash _000816_hash = {
86677 + .next = NULL,
86678 + .name = "iscsit_dump_data_payload",
86679 + .file = "drivers/target/iscsi/iscsi_target_erl1.c",
86680 + .param2 = 1,
86681 +};
86682 +
86683 +struct size_overflow_hash _000817_hash = {
86684 + .next = NULL,
86685 + .name = "isdn_read",
86686 + .file = "drivers/isdn/i4l/isdn_common.c",
86687 + .param3 = 1,
86688 +};
86689 +
86690 +struct size_overflow_hash _000818_hash = {
86691 + .next = NULL,
86692 + .name = "iso_callback",
86693 + .file = "drivers/firewire/core-cdev.c",
86694 + .param3 = 1,
86695 +};
86696 +
86697 +struct size_overflow_hash _000819_hash = {
86698 + .next = NULL,
86699 + .name = "iso_packets_buffer_init",
86700 + .file = "sound/firewire/packets-buffer.c",
86701 + .param3 = 1,
86702 +};
86703 +
86704 +struct size_overflow_hash _000820_hash = {
86705 + .next = NULL,
86706 + .name = "iso_sched_alloc",
86707 + .file = "drivers/usb/host/ehci-sched.c",
86708 + .param1 = 1,
86709 +};
86710 +
86711 +struct size_overflow_hash _000821_hash = {
86712 + .next = NULL,
86713 + .name = "isr_cmd_cmplt_read",
86714 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86715 + .param3 = 1,
86716 +};
86717 +
86718 +struct size_overflow_hash _000822_hash = {
86719 + .next = NULL,
86720 + .name = "isr_commands_read",
86721 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86722 + .param3 = 1,
86723 +};
86724 +
86725 +struct size_overflow_hash _000823_hash = {
86726 + .next = NULL,
86727 + .name = "isr_decrypt_done_read",
86728 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86729 + .param3 = 1,
86730 +};
86731 +
86732 +struct size_overflow_hash _000824_hash = {
86733 + .next = NULL,
86734 + .name = "isr_dma0_done_read",
86735 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86736 + .param3 = 1,
86737 +};
86738 +
86739 +struct size_overflow_hash _000825_hash = {
86740 + .next = NULL,
86741 + .name = "isr_dma1_done_read",
86742 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86743 + .param3 = 1,
86744 +};
86745 +
86746 +struct size_overflow_hash _000826_hash = {
86747 + .next = NULL,
86748 + .name = "isr_fiqs_read",
86749 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86750 + .param3 = 1,
86751 +};
86752 +
86753 +struct size_overflow_hash _000827_hash = {
86754 + .next = NULL,
86755 + .name = "isr_host_acknowledges_read",
86756 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86757 + .param3 = 1,
86758 +};
86759 +
86760 +struct size_overflow_hash _000828_hash = {
86761 + .next = &_000629_hash,
86762 + .name = "isr_hw_pm_mode_changes_read",
86763 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86764 + .param3 = 1,
86765 +};
86766 +
86767 +struct size_overflow_hash _000829_hash = {
86768 + .next = &_000329_hash,
86769 + .name = "isr_irqs_read",
86770 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86771 + .param3 = 1,
86772 +};
86773 +
86774 +struct size_overflow_hash _000830_hash = {
86775 + .next = NULL,
86776 + .name = "isr_low_rssi_read",
86777 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86778 + .param3 = 1,
86779 +};
86780 +
86781 +struct size_overflow_hash _000831_hash = {
86782 + .next = NULL,
86783 + .name = "isr_pci_pm_read",
86784 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86785 + .param3 = 1,
86786 +};
86787 +
86788 +struct size_overflow_hash _000832_hash = {
86789 + .next = NULL,
86790 + .name = "isr_rx_headers_read",
86791 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86792 + .param3 = 1,
86793 +};
86794 +
86795 +struct size_overflow_hash _000833_hash = {
86796 + .next = NULL,
86797 + .name = "isr_rx_mem_overflow_read",
86798 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86799 + .param3 = 1,
86800 +};
86801 +
86802 +struct size_overflow_hash _000834_hash = {
86803 + .next = NULL,
86804 + .name = "isr_rx_procs_read",
86805 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86806 + .param3 = 1,
86807 +};
86808 +
86809 +struct size_overflow_hash _000835_hash = {
86810 + .next = NULL,
86811 + .name = "isr_rx_rdys_read",
86812 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86813 + .param3 = 1,
86814 +};
86815 +
86816 +struct size_overflow_hash _000836_hash = {
86817 + .next = NULL,
86818 + .name = "isr_tx_exch_complete_read",
86819 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86820 + .param3 = 1,
86821 +};
86822 +
86823 +struct size_overflow_hash _000837_hash = {
86824 + .next = NULL,
86825 + .name = "isr_tx_procs_read",
86826 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86827 + .param3 = 1,
86828 +};
86829 +
86830 +struct size_overflow_hash _000838_hash = {
86831 + .next = NULL,
86832 + .name = "isr_wakeups_read",
86833 + .file = "drivers/net/wireless/wl1251/debugfs.c",
86834 + .param3 = 1,
86835 +};
86836 +
86837 +struct size_overflow_hash _000839_hash = {
86838 + .next = NULL,
86839 + .name = "ivtv_copy_buf_to_user",
86840 + .file = "drivers/media/video/ivtv/ivtv-fileops.c",
86841 + .param4 = 1,
86842 +};
86843 +
86844 +struct size_overflow_hash _000840_hash = {
86845 + .next = NULL,
86846 + .name = "iwl_dbgfs_bt_traffic_read",
86847 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86848 + .param3 = 1,
86849 +};
86850 +
86851 +struct size_overflow_hash _000841_hash = {
86852 + .next = NULL,
86853 + .name = "iwl_dbgfs_chain_noise_read",
86854 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86855 + .param3 = 1,
86856 +};
86857 +
86858 +struct size_overflow_hash _000842_hash = {
86859 + .next = NULL,
86860 + .name = "iwl_dbgfs_channels_read",
86861 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86862 + .param3 = 1,
86863 +};
86864 +
86865 +struct size_overflow_hash _000843_hash = {
86866 + .next = NULL,
86867 + .name = "iwl_dbgfs_current_sleep_command_read",
86868 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86869 + .param3 = 1,
86870 +};
86871 +
86872 +struct size_overflow_hash _000844_hash = {
86873 + .next = NULL,
86874 + .name = "iwl_dbgfs_debug_level_read",
86875 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86876 + .param3 = 1,
86877 +};
86878 +
86879 +struct size_overflow_hash _000845_hash = {
86880 + .next = NULL,
86881 + .name = "iwl_dbgfs_debug_level_write",
86882 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86883 + .param3 = 1,
86884 +};
86885 +
86886 +struct size_overflow_hash _000846_hash = {
86887 + .next = NULL,
86888 + .name = "iwl_dbgfs_disable_ht40_read",
86889 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86890 + .param3 = 1,
86891 +};
86892 +
86893 +struct size_overflow_hash _000847_hash = {
86894 + .next = NULL,
86895 + .name = "iwl_dbgfs_fh_reg_read",
86896 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
86897 + .param3 = 1,
86898 +};
86899 +
86900 +struct size_overflow_hash _000848_hash = {
86901 + .next = NULL,
86902 + .name = "iwl_dbgfs_force_reset_read",
86903 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86904 + .param3 = 1,
86905 +};
86906 +
86907 +struct size_overflow_hash _000849_hash = {
86908 + .next = NULL,
86909 + .name = "iwl_dbgfs_interrupt_read",
86910 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
86911 + .param3 = 1,
86912 +};
86913 +
86914 +struct size_overflow_hash _000850_hash = {
86915 + .next = NULL,
86916 + .name = "iwl_dbgfs_log_event_read",
86917 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
86918 + .param3 = 1,
86919 +};
86920 +
86921 +struct size_overflow_hash _000851_hash = {
86922 + .next = NULL,
86923 + .name = "iwl_dbgfs_missed_beacon_read",
86924 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86925 + .param3 = 1,
86926 +};
86927 +
86928 +struct size_overflow_hash _000852_hash = {
86929 + .next = NULL,
86930 + .name = "iwl_dbgfs_nvm_read",
86931 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86932 + .param3 = 1,
86933 +};
86934 +
86935 +struct size_overflow_hash _000853_hash = {
86936 + .next = NULL,
86937 + .name = "iwl_dbgfs_plcp_delta_read",
86938 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86939 + .param3 = 1,
86940 +};
86941 +
86942 +struct size_overflow_hash _000854_hash = {
86943 + .next = NULL,
86944 + .name = "iwl_dbgfs_power_save_status_read",
86945 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86946 + .param3 = 1,
86947 +};
86948 +
86949 +struct size_overflow_hash _000855_hash = {
86950 + .next = NULL,
86951 + .name = "iwl_dbgfs_protection_mode_read",
86952 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86953 + .param3 = 1,
86954 +};
86955 +
86956 +struct size_overflow_hash _000856_hash = {
86957 + .next = NULL,
86958 + .name = "iwl_dbgfs_qos_read",
86959 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86960 + .param3 = 1,
86961 +};
86962 +
86963 +struct size_overflow_hash _000857_hash = {
86964 + .next = NULL,
86965 + .name = "iwl_dbgfs_reply_tx_error_read",
86966 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86967 + .param3 = 1,
86968 +};
86969 +
86970 +struct size_overflow_hash _000858_hash = {
86971 + .next = NULL,
86972 + .name = "iwl_dbgfs_rx_handlers_read",
86973 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86974 + .param3 = 1,
86975 +};
86976 +
86977 +struct size_overflow_hash _000859_hash = {
86978 + .next = NULL,
86979 + .name = "iwl_dbgfs_rxon_filter_flags_read",
86980 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86981 + .param3 = 1,
86982 +};
86983 +
86984 +struct size_overflow_hash _000860_hash = {
86985 + .next = NULL,
86986 + .name = "iwl_dbgfs_rxon_flags_read",
86987 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
86988 + .param3 = 1,
86989 +};
86990 +
86991 +struct size_overflow_hash _000861_hash = {
86992 + .next = NULL,
86993 + .name = "iwl_dbgfs_rx_queue_read",
86994 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
86995 + .param3 = 1,
86996 +};
86997 +
86998 +struct size_overflow_hash _000862_hash = {
86999 + .next = NULL,
87000 + .name = "iwl_dbgfs_rx_statistics_read",
87001 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87002 + .param3 = 1,
87003 +};
87004 +
87005 +struct size_overflow_hash _000863_hash = {
87006 + .next = NULL,
87007 + .name = "iwl_dbgfs_sensitivity_read",
87008 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87009 + .param3 = 1,
87010 +};
87011 +
87012 +struct size_overflow_hash _000864_hash = {
87013 + .next = NULL,
87014 + .name = "iwl_dbgfs_sleep_level_override_read",
87015 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87016 + .param3 = 1,
87017 +};
87018 +
87019 +struct size_overflow_hash _000865_hash = {
87020 + .next = NULL,
87021 + .name = "iwl_dbgfs_sram_read",
87022 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87023 + .param3 = 1,
87024 +};
87025 +
87026 +struct size_overflow_hash _000866_hash = {
87027 + .next = NULL,
87028 + .name = "iwl_dbgfs_stations_read",
87029 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87030 + .param3 = 1,
87031 +};
87032 +
87033 +struct size_overflow_hash _000867_hash = {
87034 + .next = NULL,
87035 + .name = "iwl_dbgfs_status_read",
87036 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87037 + .param3 = 1,
87038 +};
87039 +
87040 +struct size_overflow_hash _000868_hash = {
87041 + .next = NULL,
87042 + .name = "iwl_dbgfs_temperature_read",
87043 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87044 + .param3 = 1,
87045 +};
87046 +
87047 +struct size_overflow_hash _000869_hash = {
87048 + .next = NULL,
87049 + .name = "iwl_dbgfs_thermal_throttling_read",
87050 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87051 + .param3 = 1,
87052 +};
87053 +
87054 +struct size_overflow_hash _000870_hash = {
87055 + .next = NULL,
87056 + .name = "iwl_dbgfs_traffic_log_read",
87057 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87058 + .param3 = 1,
87059 +};
87060 +
87061 +struct size_overflow_hash _000871_hash = {
87062 + .next = NULL,
87063 + .name = "iwl_dbgfs_tx_queue_read",
87064 + .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
87065 + .param3 = 1,
87066 +};
87067 +
87068 +struct size_overflow_hash _000872_hash = {
87069 + .next = NULL,
87070 + .name = "iwl_dbgfs_tx_statistics_read",
87071 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87072 + .param3 = 1,
87073 +};
87074 +
87075 +struct size_overflow_hash _000873_hash = {
87076 + .next = NULL,
87077 + .name = "iwl_dbgfs_ucode_bt_stats_read",
87078 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87079 + .param3 = 1,
87080 +};
87081 +
87082 +struct size_overflow_hash _000874_hash = {
87083 + .next = NULL,
87084 + .name = "iwl_dbgfs_ucode_general_stats_read",
87085 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87086 + .param3 = 1,
87087 +};
87088 +
87089 +struct size_overflow_hash _000875_hash = {
87090 + .next = NULL,
87091 + .name = "iwl_dbgfs_ucode_rx_stats_read",
87092 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87093 + .param3 = 1,
87094 +};
87095 +
87096 +struct size_overflow_hash _000876_hash = {
87097 + .next = NULL,
87098 + .name = "iwl_dbgfs_ucode_tracing_read",
87099 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87100 + .param3 = 1,
87101 +};
87102 +
87103 +struct size_overflow_hash _000877_hash = {
87104 + .next = NULL,
87105 + .name = "iwl_dbgfs_ucode_tx_stats_read",
87106 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87107 + .param3 = 1,
87108 +};
87109 +
87110 +struct size_overflow_hash _000878_hash = {
87111 + .next = NULL,
87112 + .name = "iwl_dbgfs_wowlan_sram_read",
87113 + .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
87114 + .param3 = 1,
87115 +};
87116 +
87117 +struct size_overflow_hash _000879_hash = {
87118 + .next = NULL,
87119 + .name = "iwmct_fw_parser_init",
87120 + .file = "drivers/misc/iwmc3200top/fw-download.c",
87121 + .param4 = 1,
87122 +};
87123 +
87124 +struct size_overflow_hash _000880_hash = {
87125 + .next = NULL,
87126 + .name = "iwm_notif_send",
87127 + .file = "drivers/net/wireless/iwmc3200wifi/main.c",
87128 + .param6 = 1,
87129 +};
87130 +
87131 +struct size_overflow_hash _000881_hash = {
87132 + .next = NULL,
87133 + .name = "iwm_ntf_calib_res",
87134 + .file = "drivers/net/wireless/iwmc3200wifi/rx.c",
87135 + .param3 = 1,
87136 +};
87137 +
87138 +struct size_overflow_hash _000882_hash = {
87139 + .next = NULL,
87140 + .name = "iwm_umac_set_config_var",
87141 + .file = "drivers/net/wireless/iwmc3200wifi/commands.c",
87142 + .param4 = 1,
87143 +};
87144 +
87145 +struct size_overflow_hash _000883_hash = {
87146 + .next = NULL,
87147 + .name = "jbd2_alloc",
87148 + .file = "include/linux/jbd2.h",
87149 + .param1 = 1,
87150 +};
87151 +
87152 +struct size_overflow_hash _000884_hash = {
87153 + .next = NULL,
87154 + .name = "key_algorithm_read",
87155 + .file = "net/mac80211/debugfs_key.c",
87156 + .param3 = 1,
87157 +};
87158 +
87159 +struct size_overflow_hash _000885_hash = {
87160 + .next = NULL,
87161 + .name = "key_icverrors_read",
87162 + .file = "net/mac80211/debugfs_key.c",
87163 + .param3 = 1,
87164 +};
87165 +
87166 +struct size_overflow_hash _000886_hash = {
87167 + .next = NULL,
87168 + .name = "key_key_read",
87169 + .file = "net/mac80211/debugfs_key.c",
87170 + .param3 = 1,
87171 +};
87172 +
87173 +struct size_overflow_hash _000887_hash = {
87174 + .next = NULL,
87175 + .name = "key_replays_read",
87176 + .file = "net/mac80211/debugfs_key.c",
87177 + .param3 = 1,
87178 +};
87179 +
87180 +struct size_overflow_hash _000888_hash = {
87181 + .next = NULL,
87182 + .name = "key_rx_spec_read",
87183 + .file = "net/mac80211/debugfs_key.c",
87184 + .param3 = 1,
87185 +};
87186 +
87187 +struct size_overflow_hash _000889_hash = {
87188 + .next = NULL,
87189 + .name = "key_tx_spec_read",
87190 + .file = "net/mac80211/debugfs_key.c",
87191 + .param3 = 1,
87192 +};
87193 +
87194 +struct size_overflow_hash _000890_hash = {
87195 + .next = NULL,
87196 + .name = "kmem_alloc",
87197 + .file = "fs/xfs/kmem.c",
87198 + .param1 = 1,
87199 +};
87200 +
87201 +struct size_overflow_hash _000891_hash = {
87202 + .next = NULL,
87203 + .name = "kmem_zalloc_large",
87204 + .file = "fs/xfs/kmem.h",
87205 + .param1 = 1,
87206 +};
87207 +
87208 +struct size_overflow_hash _000892_hash = {
87209 + .next = NULL,
87210 + .name = "kone_receive",
87211 + .file = "drivers/hid/hid-roccat-kone.c",
87212 + .param4 = 1,
87213 +};
87214 +
87215 +struct size_overflow_hash _000893_hash = {
87216 + .next = NULL,
87217 + .name = "kone_send",
87218 + .file = "drivers/hid/hid-roccat-kone.c",
87219 + .param4 = 1,
87220 +};
87221 +
87222 +struct size_overflow_hash _000894_hash = {
87223 + .next = NULL,
87224 + .name = "kvm_read_guest_atomic",
87225 + .file = "include/linux/kvm_host.h",
87226 + .param4 = 1,
87227 +};
87228 +
87229 +struct size_overflow_hash _000895_hash = {
87230 + .next = NULL,
87231 + .name = "kvm_read_guest_cached",
87232 + .file = "include/linux/kvm_host.h",
87233 + .param4 = 1,
87234 +};
87235 +
87236 +struct size_overflow_hash _000896_hash = {
87237 + .next = NULL,
87238 + .name = "kvm_set_irq_routing",
87239 + .file = "include/linux/kvm_host.h",
87240 + .param3 = 1,
87241 +};
87242 +
87243 +struct size_overflow_hash _000897_hash = {
87244 + .next = NULL,
87245 + .name = "kvm_write_guest_cached",
87246 + .file = "include/linux/kvm_host.h",
87247 + .param4 = 1,
87248 +};
87249 +
87250 +struct size_overflow_hash _000898_hash = {
87251 + .next = NULL,
87252 + .name = "l2cap_sock_setsockopt",
87253 + .file = "net/bluetooth/l2cap_sock.c",
87254 + .param5 = 1,
87255 +};
87256 +
87257 +struct size_overflow_hash _000899_hash = {
87258 + .next = NULL,
87259 + .name = "l2cap_sock_setsockopt_old",
87260 + .file = "net/bluetooth/l2cap_sock.c",
87261 + .param4 = 1,
87262 +};
87263 +
87264 +struct size_overflow_hash _000900_hash = {
87265 + .next = NULL,
87266 + .name = "lane2_associate_req",
87267 + .file = "net/atm/lec.c",
87268 + .param4 = 1,
87269 +};
87270 +
87271 +struct size_overflow_hash _000901_hash = {
87272 + .next = NULL,
87273 + .name = "lbs_debugfs_read",
87274 + .file = "drivers/net/wireless/libertas/debugfs.c",
87275 + .param3 = 1,
87276 +};
87277 +
87278 +struct size_overflow_hash _000902_hash = {
87279 + .next = NULL,
87280 + .name = "lbs_debugfs_write",
87281 + .file = "drivers/net/wireless/libertas/debugfs.c",
87282 + .param3 = 1,
87283 +};
87284 +
87285 +struct size_overflow_hash _000903_hash = {
87286 + .next = NULL,
87287 + .name = "lbs_dev_info",
87288 + .file = "drivers/net/wireless/libertas/debugfs.c",
87289 + .param3 = 1,
87290 +};
87291 +
87292 +struct size_overflow_hash _000904_hash = {
87293 + .next = NULL,
87294 + .name = "lbs_host_sleep_read",
87295 + .file = "drivers/net/wireless/libertas/debugfs.c",
87296 + .param3 = 1,
87297 +};
87298 +
87299 +struct size_overflow_hash _000905_hash = {
87300 + .next = NULL,
87301 + .name = "lbs_rdbbp_read",
87302 + .file = "drivers/net/wireless/libertas/debugfs.c",
87303 + .param3 = 1,
87304 +};
87305 +
87306 +struct size_overflow_hash _000906_hash = {
87307 + .next = NULL,
87308 + .name = "lbs_rdmac_read",
87309 + .file = "drivers/net/wireless/libertas/debugfs.c",
87310 + .param3 = 1,
87311 +};
87312 +
87313 +struct size_overflow_hash _000907_hash = {
87314 + .next = NULL,
87315 + .name = "lbs_rdrf_read",
87316 + .file = "drivers/net/wireless/libertas/debugfs.c",
87317 + .param3 = 1,
87318 +};
87319 +
87320 +struct size_overflow_hash _000908_hash = {
87321 + .next = NULL,
87322 + .name = "lbs_sleepparams_read",
87323 + .file = "drivers/net/wireless/libertas/debugfs.c",
87324 + .param3 = 1,
87325 +};
87326 +
87327 +struct size_overflow_hash _000909_hash = {
87328 + .next = NULL,
87329 + .name = "lbs_threshold_read",
87330 + .file = "drivers/net/wireless/libertas/debugfs.c",
87331 + .param5 = 1,
87332 +};
87333 +
87334 +struct size_overflow_hash _000910_hash = {
87335 + .next = NULL,
87336 + .name = "lc_create",
87337 + .file = "include/linux/lru_cache.h",
87338 + .param3 = 1,
87339 +};
87340 +
87341 +struct size_overflow_hash _000911_hash = {
87342 + .next = NULL,
87343 + .name = "lcd_write",
87344 + .file = "drivers/usb/misc/usblcd.c",
87345 + .param3 = 1,
87346 +};
87347 +
87348 +struct size_overflow_hash _000912_hash = {
87349 + .next = NULL,
87350 + .name = "leaf_dealloc",
87351 + .file = "fs/gfs2/dir.c",
87352 + .param3 = 1,
87353 +};
87354 +
87355 +struct size_overflow_hash _000913_hash = {
87356 + .next = NULL,
87357 + .name = "__lgread",
87358 + .file = "drivers/lguest/core.c",
87359 + .param4 = 1,
87360 +};
87361 +
87362 +struct size_overflow_hash _000914_hash = {
87363 + .next = NULL,
87364 + .name = "__lgwrite",
87365 + .file = "drivers/lguest/core.c",
87366 + .param4 = 1,
87367 +};
87368 +
87369 +struct size_overflow_hash _000915_hash = {
87370 + .next = NULL,
87371 + .name = "link_send_sections_long",
87372 + .file = "net/tipc/link.c",
87373 + .param4 = 1,
87374 +};
87375 +
87376 +struct size_overflow_hash _000916_hash = {
87377 + .next = NULL,
87378 + .name = "lirc_buffer_init",
87379 + .file = "include/media/lirc_dev.h",
87380 + .param2 = 1,
87381 + .param3 = 1,
87382 +};
87383 +
87384 +struct size_overflow_hash _000918_hash = {
87385 + .next = NULL,
87386 + .name = "lkdtm_debugfs_read",
87387 + .file = "drivers/misc/lkdtm.c",
87388 + .param3 = 1,
87389 +};
87390 +
87391 +struct size_overflow_hash _000919_hash = {
87392 + .next = NULL,
87393 + .name = "LoadBitmap",
87394 + .file = "drivers/media/dvb/ttpci/av7110_hw.c",
87395 + .param2 = 1,
87396 +};
87397 +
87398 +struct size_overflow_hash _000920_hash = {
87399 + .next = NULL,
87400 + .name = "long_retry_limit_read",
87401 + .file = "net/wireless/debugfs.c",
87402 + .param3 = 1,
87403 +};
87404 +
87405 +struct size_overflow_hash _000921_hash = {
87406 + .next = NULL,
87407 + .name = "lpfc_debugfs_dif_err_read",
87408 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87409 + .param3 = 1,
87410 +};
87411 +
87412 +struct size_overflow_hash _000922_hash = {
87413 + .next = NULL,
87414 + .name = "lpfc_debugfs_dif_err_write",
87415 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87416 + .param3 = 1,
87417 +};
87418 +
87419 +struct size_overflow_hash _000923_hash = {
87420 + .next = NULL,
87421 + .name = "lpfc_debugfs_read",
87422 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87423 + .param3 = 1,
87424 +};
87425 +
87426 +struct size_overflow_hash _000924_hash = {
87427 + .next = NULL,
87428 + .name = "lpfc_idiag_baracc_read",
87429 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87430 + .param3 = 1,
87431 +};
87432 +
87433 +struct size_overflow_hash _000925_hash = {
87434 + .next = NULL,
87435 + .name = "lpfc_idiag_ctlacc_read",
87436 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87437 + .param3 = 1,
87438 +};
87439 +
87440 +struct size_overflow_hash _000926_hash = {
87441 + .next = NULL,
87442 + .name = "lpfc_idiag_drbacc_read",
87443 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87444 + .param3 = 1,
87445 +};
87446 +
87447 +struct size_overflow_hash _000927_hash = {
87448 + .next = NULL,
87449 + .name = "lpfc_idiag_extacc_read",
87450 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87451 + .param3 = 1,
87452 +};
87453 +
87454 +struct size_overflow_hash _000928_hash = {
87455 + .next = NULL,
87456 + .name = "lpfc_idiag_mbxacc_read",
87457 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87458 + .param3 = 1,
87459 +};
87460 +
87461 +struct size_overflow_hash _000929_hash = {
87462 + .next = NULL,
87463 + .name = "lpfc_idiag_pcicfg_read",
87464 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87465 + .param3 = 1,
87466 +};
87467 +
87468 +struct size_overflow_hash _000930_hash = {
87469 + .next = NULL,
87470 + .name = "lpfc_idiag_queacc_read",
87471 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87472 + .param3 = 1,
87473 +};
87474 +
87475 +struct size_overflow_hash _000931_hash = {
87476 + .next = NULL,
87477 + .name = "lpfc_idiag_queinfo_read",
87478 + .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
87479 + .param3 = 1,
87480 +};
87481 +
87482 +struct size_overflow_hash _000932_hash = {
87483 + .next = NULL,
87484 + .name = "lpfc_sli4_queue_alloc",
87485 + .file = "drivers/scsi/lpfc/lpfc_sli.c",
87486 + .param3 = 1,
87487 +};
87488 +
87489 +struct size_overflow_hash _000933_hash = {
87490 + .next = NULL,
87491 + .name = "lp_write",
87492 + .file = "drivers/char/lp.c",
87493 + .param3 = 1,
87494 +};
87495 +
87496 +struct size_overflow_hash _000934_hash = {
87497 + .next = NULL,
87498 + .name = "mac80211_format_buffer",
87499 + .file = "net/mac80211/debugfs.c",
87500 + .param2 = 1,
87501 +};
87502 +
87503 +struct size_overflow_hash _000935_hash = {
87504 + .next = NULL,
87505 + .name = "mce_write",
87506 + .file = "arch/x86/kernel/cpu/mcheck/mce-inject.c",
87507 + .param3 = 1,
87508 +};
87509 +
87510 +struct size_overflow_hash _000936_hash = {
87511 + .next = NULL,
87512 + .name = "mcs7830_get_reg",
87513 + .file = "drivers/net/usb/mcs7830.c",
87514 + .param3 = 1,
87515 +};
87516 +
87517 +struct size_overflow_hash _000937_hash = {
87518 + .next = NULL,
87519 + .name = "mcs7830_set_reg",
87520 + .file = "drivers/net/usb/mcs7830.c",
87521 + .param3 = 1,
87522 +};
87523 +
87524 +struct size_overflow_hash _000938_hash = {
87525 + .next = NULL,
87526 + .name = "mdc800_device_read",
87527 + .file = "drivers/usb/image/mdc800.c",
87528 + .param3 = 1,
87529 +};
87530 +
87531 +struct size_overflow_hash _000939_hash = {
87532 + .next = NULL,
87533 + .name = "mdiobus_alloc_size",
87534 + .file = "include/linux/phy.h",
87535 + .param1 = 1,
87536 +};
87537 +
87538 +struct size_overflow_hash _000940_hash = {
87539 + .next = NULL,
87540 + .name = "media_entity_init",
87541 + .file = "include/media/media-entity.h",
87542 + .param2 = 1,
87543 + .param4 = 1,
87544 +};
87545 +
87546 +struct size_overflow_hash _000942_hash = {
87547 + .next = NULL,
87548 + .name = "memstick_alloc_host",
87549 + .file = "include/linux/memstick.h",
87550 + .param1 = 1,
87551 +};
87552 +
87553 +struct size_overflow_hash _000943_hash = {
87554 + .next = NULL,
87555 + .name = "mgmt_control",
87556 + .file = "include/net/bluetooth/hci_core.h",
87557 + .param3 = 1,
87558 +};
87559 +
87560 +struct size_overflow_hash _000944_hash = {
87561 + .next = NULL,
87562 + .name = "mgmt_pending_add",
87563 + .file = "net/bluetooth/mgmt.c",
87564 + .param5 = 1,
87565 +};
87566 +
87567 +struct size_overflow_hash _000945_hash = {
87568 + .next = &_000321_hash,
87569 + .name = "mic_calc_failure_read",
87570 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87571 + .param3 = 1,
87572 +};
87573 +
87574 +struct size_overflow_hash _000946_hash = {
87575 + .next = NULL,
87576 + .name = "mic_rx_pkts_read",
87577 + .file = "drivers/net/wireless/wl1251/debugfs.c",
87578 + .param3 = 1,
87579 +};
87580 +
87581 +struct size_overflow_hash _000947_hash = {
87582 + .next = NULL,
87583 + .name = "minstrel_stats_read",
87584 + .file = "net/mac80211/rc80211_minstrel_debugfs.c",
87585 + .param3 = 1,
87586 +};
87587 +
87588 +struct size_overflow_hash _000948_hash = {
87589 + .next = NULL,
87590 + .name = "mlx4_en_create_rx_ring",
87591 + .file = "drivers/net/ethernet/mellanox/mlx4/en_rx.c",
87592 + .param3 = 1,
87593 +};
87594 +
87595 +struct size_overflow_hash _000949_hash = {
87596 + .next = NULL,
87597 + .name = "mlx4_en_create_tx_ring",
87598 + .file = "drivers/net/ethernet/mellanox/mlx4/en_tx.c",
87599 + .param4 = 1,
87600 +};
87601 +
87602 +struct size_overflow_hash _000950_hash = {
87603 + .next = NULL,
87604 + .name = "mmc_ext_csd_read",
87605 + .file = "drivers/mmc/core/debugfs.c",
87606 + .param3 = 1,
87607 +};
87608 +
87609 +struct size_overflow_hash _000951_hash = {
87610 + .next = NULL,
87611 + .name = "mmc_send_bus_test",
87612 + .file = "drivers/mmc/core/mmc_ops.c",
87613 + .param4 = 1,
87614 +};
87615 +
87616 +struct size_overflow_hash _000952_hash = {
87617 + .next = NULL,
87618 + .name = "mmc_send_cxd_data",
87619 + .file = "drivers/mmc/core/mmc_ops.c",
87620 + .param5 = 1,
87621 +};
87622 +
87623 +struct size_overflow_hash _000953_hash = {
87624 + .next = NULL,
87625 + .name = "mmc_test_alloc_mem",
87626 + .file = "drivers/mmc/card/mmc_test.c",
87627 + .param3 = 1,
87628 +};
87629 +
87630 +struct size_overflow_hash _000954_hash = {
87631 + .next = NULL,
87632 + .name = "mon_bin_get_event",
87633 + .file = "drivers/usb/mon/mon_bin.c",
87634 + .param4 = 1,
87635 +};
87636 +
87637 +struct size_overflow_hash _000955_hash = {
87638 + .next = NULL,
87639 + .name = "mon_stat_read",
87640 + .file = "drivers/usb/mon/mon_stat.c",
87641 + .param3 = 1,
87642 +};
87643 +
87644 +struct size_overflow_hash _000956_hash = {
87645 + .next = NULL,
87646 + .name = "mptctl_getiocinfo",
87647 + .file = "drivers/message/fusion/mptctl.c",
87648 + .param2 = 1,
87649 +};
87650 +
87651 +struct size_overflow_hash _000957_hash = {
87652 + .next = NULL,
87653 + .name = "msnd_fifo_alloc",
87654 + .file = "sound/oss/msnd.c",
87655 + .param2 = 1,
87656 +};
87657 +
87658 +struct size_overflow_hash _000958_hash = {
87659 + .next = NULL,
87660 + .name = "mtdchar_readoob",
87661 + .file = "drivers/mtd/mtdchar.c",
87662 + .param4 = 1,
87663 +};
87664 +
87665 +struct size_overflow_hash _000959_hash = {
87666 + .next = NULL,
87667 + .name = "mtdchar_write",
87668 + .file = "drivers/mtd/mtdchar.c",
87669 + .param3 = 1,
87670 +};
87671 +
87672 +struct size_overflow_hash _000960_hash = {
87673 + .next = NULL,
87674 + .name = "mtdchar_writeoob",
87675 + .file = "drivers/mtd/mtdchar.c",
87676 + .param4 = 1,
87677 +};
87678 +
87679 +struct size_overflow_hash _000961_hash = {
87680 + .next = NULL,
87681 + .name = "mtdswap_init",
87682 + .file = "drivers/mtd/mtdswap.c",
87683 + .param2 = 1,
87684 +};
87685 +
87686 +struct size_overflow_hash _000962_hash = {
87687 + .next = NULL,
87688 + .name = "mtf_test_write",
87689 + .file = "drivers/mmc/card/mmc_test.c",
87690 + .param3 = 1,
87691 +};
87692 +
87693 +struct size_overflow_hash _000963_hash = {
87694 + .next = NULL,
87695 + .name = "musb_test_mode_write",
87696 + .file = "drivers/usb/musb/musb_debugfs.c",
87697 + .param3 = 1,
87698 +};
87699 +
87700 +struct size_overflow_hash _000964_hash = {
87701 + .next = NULL,
87702 + .name = "mvumi_alloc_mem_resource",
87703 + .file = "drivers/scsi/mvumi.c",
87704 + .param3 = 1,
87705 +};
87706 +
87707 +struct size_overflow_hash _000965_hash = {
87708 + .next = NULL,
87709 + .name = "mwifiex_alloc_sdio_mpa_buffers",
87710 + .file = "drivers/net/wireless/mwifiex/sdio.c",
87711 + .param2 = 1,
87712 + .param3 = 1,
87713 +};
87714 +
87715 +struct size_overflow_hash _000967_hash = {
87716 + .next = NULL,
87717 + .name = "mwifiex_debug_read",
87718 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
87719 + .param3 = 1,
87720 +};
87721 +
87722 +struct size_overflow_hash _000968_hash = {
87723 + .next = NULL,
87724 + .name = "mwifiex_get_common_rates",
87725 + .file = "drivers/net/wireless/mwifiex/join.c",
87726 + .param3 = 1,
87727 +};
87728 +
87729 +struct size_overflow_hash _000969_hash = {
87730 + .next = NULL,
87731 + .name = "mwifiex_getlog_read",
87732 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
87733 + .param3 = 1,
87734 +};
87735 +
87736 +struct size_overflow_hash _000970_hash = {
87737 + .next = NULL,
87738 + .name = "mwifiex_info_read",
87739 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
87740 + .param3 = 1,
87741 +};
87742 +
87743 +struct size_overflow_hash _000971_hash = {
87744 + .next = NULL,
87745 + .name = "mwifiex_rdeeprom_read",
87746 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
87747 + .param3 = 1,
87748 +};
87749 +
87750 +struct size_overflow_hash _000972_hash = {
87751 + .next = NULL,
87752 + .name = "mwifiex_regrdwr_read",
87753 + .file = "drivers/net/wireless/mwifiex/debugfs.c",
87754 + .param3 = 1,
87755 +};
87756 +
87757 +struct size_overflow_hash _000973_hash = {
87758 + .next = NULL,
87759 + .name = "mwifiex_update_curr_bss_params",
87760 + .file = "drivers/net/wireless/mwifiex/scan.c",
87761 + .param5 = 1,
87762 +};
87763 +
87764 +struct size_overflow_hash _000974_hash = {
87765 + .next = NULL,
87766 + .name = "nand_bch_init",
87767 + .file = "include/linux/mtd/nand_bch.h",
87768 + .param2 = 1,
87769 + .param3 = 1,
87770 +};
87771 +
87772 +struct size_overflow_hash _000976_hash = {
87773 + .next = NULL,
87774 + .name = "ncp_file_write",
87775 + .file = "fs/ncpfs/file.c",
87776 + .param3 = 1,
87777 +};
87778 +
87779 +struct size_overflow_hash _000977_hash = {
87780 + .next = NULL,
87781 + .name = "ncp__vol2io",
87782 + .file = "fs/ncpfs/ncplib_kernel.c",
87783 + .param5 = 1,
87784 +};
87785 +
87786 +struct size_overflow_hash _000978_hash = {
87787 + .next = NULL,
87788 + .name = "new_bind_ctl",
87789 + .file = "sound/pci/hda/patch_realtek.c",
87790 + .param2 = 1,
87791 +};
87792 +
87793 +struct size_overflow_hash _000979_hash = {
87794 + .next = NULL,
87795 + .name = "nfc_llcp_build_tlv",
87796 + .file = "net/nfc/llcp/commands.c",
87797 + .param3 = 1,
87798 +};
87799 +
87800 +struct size_overflow_hash _000980_hash = {
87801 + .next = NULL,
87802 + .name = "nfs4_alloc_slots",
87803 + .file = "fs/nfs/nfs4proc.c",
87804 + .param1 = 1,
87805 +};
87806 +
87807 +struct size_overflow_hash _000981_hash = {
87808 + .next = NULL,
87809 + .name = "nfs4_write_cached_acl",
87810 + .file = "fs/nfs/nfs4proc.c",
87811 + .param3 = 1,
87812 +};
87813 +
87814 +struct size_overflow_hash _000982_hash = {
87815 + .next = NULL,
87816 + .name = "nfsctl_transaction_read",
87817 + .file = "fs/nfsd/nfsctl.c",
87818 + .param3 = 1,
87819 +};
87820 +
87821 +struct size_overflow_hash _000983_hash = {
87822 + .next = NULL,
87823 + .name = "nfsctl_transaction_write",
87824 + .file = "fs/nfsd/nfsctl.c",
87825 + .param3 = 1,
87826 +};
87827 +
87828 +struct size_overflow_hash _000984_hash = {
87829 + .next = NULL,
87830 + .name = "nfsd_cache_update",
87831 + .file = "fs/nfsd/nfscache.c",
87832 + .param3 = 1,
87833 +};
87834 +
87835 +struct size_overflow_hash _000985_hash = {
87836 + .next = NULL,
87837 + .name = "nfs_idmap_get_desc",
87838 + .file = "fs/nfs/idmap.c",
87839 + .param2 = 1,
87840 + .param4 = 1,
87841 +};
87842 +
87843 +struct size_overflow_hash _000987_hash = {
87844 + .next = NULL,
87845 + .name = "nfs_readdata_alloc",
87846 + .file = "include/linux/nfs_fs.h",
87847 + .param1 = 1,
87848 +};
87849 +
87850 +struct size_overflow_hash _000988_hash = {
87851 + .next = NULL,
87852 + .name = "nfs_readdir_make_qstr",
87853 + .file = "fs/nfs/dir.c",
87854 + .param3 = 1,
87855 +};
87856 +
87857 +struct size_overflow_hash _000989_hash = {
87858 + .next = NULL,
87859 + .name = "nfs_writedata_alloc",
87860 + .file = "include/linux/nfs_fs.h",
87861 + .param1 = 1,
87862 +};
87863 +
87864 +struct size_overflow_hash _000990_hash = {
87865 + .next = NULL,
87866 + .name = "nsm_create_handle",
87867 + .file = "fs/lockd/mon.c",
87868 + .param4 = 1,
87869 +};
87870 +
87871 +struct size_overflow_hash _000991_hash = {
87872 + .next = NULL,
87873 + .name = "ntfs_copy_from_user",
87874 + .file = "fs/ntfs/file.c",
87875 + .param3 = 1,
87876 + .param5 = 1,
87877 +};
87878 +
87879 +struct size_overflow_hash _000993_hash = {
87880 + .next = NULL,
87881 + .name = "__ntfs_copy_from_user_iovec_inatomic",
87882 + .file = "fs/ntfs/file.c",
87883 + .param3 = 1,
87884 + .param4 = 1,
87885 +};
87886 +
87887 +struct size_overflow_hash _000995_hash = {
87888 + .next = NULL,
87889 + .name = "__ntfs_malloc",
87890 + .file = "fs/ntfs/malloc.h",
87891 + .param1 = 1,
87892 +};
87893 +
87894 +struct size_overflow_hash _000996_hash = {
87895 + .next = NULL,
87896 + .name = "nvme_alloc_iod",
87897 + .file = "drivers/block/nvme.c",
87898 + .param1 = 1,
87899 +};
87900 +
87901 +struct size_overflow_hash _000997_hash = {
87902 + .next = NULL,
87903 + .name = "nvram_write",
87904 + .file = "drivers/char/nvram.c",
87905 + .param3 = 1,
87906 +};
87907 +
87908 +struct size_overflow_hash _000998_hash = {
87909 + .next = NULL,
87910 + .name = "o2hb_debug_read",
87911 + .file = "fs/ocfs2/cluster/heartbeat.c",
87912 + .param3 = 1,
87913 +};
87914 +
87915 +struct size_overflow_hash _000999_hash = {
87916 + .next = NULL,
87917 + .name = "o2net_debug_read",
87918 + .file = "fs/ocfs2/cluster/netdebug.c",
87919 + .param3 = 1,
87920 +};
87921 +
87922 +struct size_overflow_hash _001000_hash = {
87923 + .next = NULL,
87924 + .name = "o2net_send_message_vec",
87925 + .file = "fs/ocfs2/cluster/tcp.c",
87926 + .param4 = 1,
87927 +};
87928 +
87929 +struct size_overflow_hash _001001_hash = {
87930 + .next = NULL,
87931 + .name = "ocfs2_control_cfu",
87932 + .file = "fs/ocfs2/stack_user.c",
87933 + .param2 = 1,
87934 +};
87935 +
87936 +struct size_overflow_hash _001002_hash = {
87937 + .next = NULL,
87938 + .name = "ocfs2_control_read",
87939 + .file = "fs/ocfs2/stack_user.c",
87940 + .param3 = 1,
87941 +};
87942 +
87943 +struct size_overflow_hash _001003_hash = {
87944 + .next = NULL,
87945 + .name = "ocfs2_debug_read",
87946 + .file = "fs/ocfs2/super.c",
87947 + .param3 = 1,
87948 +};
87949 +
87950 +struct size_overflow_hash _001004_hash = {
87951 + .next = NULL,
87952 + .name = "opera1_xilinx_rw",
87953 + .file = "drivers/media/dvb/dvb-usb/opera1.c",
87954 + .param5 = 1,
87955 +};
87956 +
87957 +struct size_overflow_hash _001005_hash = {
87958 + .next = NULL,
87959 + .name = "oprofilefs_str_to_user",
87960 + .file = "include/linux/oprofile.h",
87961 + .param3 = 1,
87962 +};
87963 +
87964 +struct size_overflow_hash _001006_hash = {
87965 + .next = NULL,
87966 + .name = "oprofilefs_ulong_from_user",
87967 + .file = "include/linux/oprofile.h",
87968 + .param3 = 1,
87969 +};
87970 +
87971 +struct size_overflow_hash _001007_hash = {
87972 + .next = &_000626_hash,
87973 + .name = "oprofilefs_ulong_to_user",
87974 + .file = "include/linux/oprofile.h",
87975 + .param3 = 1,
87976 +};
87977 +
87978 +struct size_overflow_hash _001008_hash = {
87979 + .next = NULL,
87980 + .name = "_ore_get_io_state",
87981 + .file = "fs/exofs/ore.c",
87982 + .param3 = 1,
87983 +};
87984 +
87985 +struct size_overflow_hash _001009_hash = {
87986 + .next = NULL,
87987 + .name = "_osd_realloc_seg",
87988 + .file = "drivers/scsi/osd/osd_initiator.c",
87989 + .param3 = 1,
87990 +};
87991 +
87992 +struct size_overflow_hash _001010_hash = {
87993 + .next = NULL,
87994 + .name = "_osd_req_list_objects",
87995 + .file = "drivers/scsi/osd/osd_initiator.c",
87996 + .param6 = 1,
87997 +};
87998 +
87999 +struct size_overflow_hash _001011_hash = {
88000 + .next = NULL,
88001 + .name = "osd_req_read_kern",
88002 + .file = "include/scsi/osd_initiator.h",
88003 + .param5 = 1,
88004 +};
88005 +
88006 +struct size_overflow_hash _001012_hash = {
88007 + .next = NULL,
88008 + .name = "osd_req_write_kern",
88009 + .file = "include/scsi/osd_initiator.h",
88010 + .param5 = 1,
88011 +};
88012 +
88013 +struct size_overflow_hash _001013_hash = {
88014 + .next = NULL,
88015 + .name = "osst_execute",
88016 + .file = "drivers/scsi/osst.c",
88017 + .param6 = 1,
88018 +};
88019 +
88020 +struct size_overflow_hash _001014_hash = {
88021 + .next = NULL,
88022 + .name = "otp_read",
88023 + .file = "drivers/mtd/devices/mtd_dataflash.c",
88024 + .param2 = 1,
88025 + .param5 = 1,
88026 +};
88027 +
88028 +struct size_overflow_hash _001016_hash = {
88029 + .next = NULL,
88030 + .name = "packet_buffer_init",
88031 + .file = "drivers/firewire/nosy.c",
88032 + .param2 = 1,
88033 +};
88034 +
88035 +struct size_overflow_hash _001017_hash = {
88036 + .next = NULL,
88037 + .name = "packet_setsockopt",
88038 + .file = "net/packet/af_packet.c",
88039 + .param5 = 1,
88040 +};
88041 +
88042 +struct size_overflow_hash _001018_hash = {
88043 + .next = NULL,
88044 + .name = "parse_arg",
88045 + .file = "drivers/platform/x86/asus_acpi.c",
88046 + .param2 = 1,
88047 +};
88048 +
88049 +struct size_overflow_hash _001019_hash = {
88050 + .next = NULL,
88051 + .name = "parse_command",
88052 + .file = "fs/binfmt_misc.c",
88053 + .param2 = 1,
88054 +};
88055 +
88056 +struct size_overflow_hash _001020_hash = {
88057 + .next = NULL,
88058 + .name = "pcmcia_replace_cis",
88059 + .file = "drivers/pcmcia/cistpl.c",
88060 + .param3 = 1,
88061 +};
88062 +
88063 +struct size_overflow_hash _001021_hash = {
88064 + .next = NULL,
88065 + .name = "pcnet32_realloc_rx_ring",
88066 + .file = "drivers/net/ethernet/amd/pcnet32.c",
88067 + .param3 = 1,
88068 +};
88069 +
88070 +struct size_overflow_hash _001022_hash = {
88071 + .next = NULL,
88072 + .name = "pcnet32_realloc_tx_ring",
88073 + .file = "drivers/net/ethernet/amd/pcnet32.c",
88074 + .param3 = 1,
88075 +};
88076 +
88077 +struct size_overflow_hash _001023_hash = {
88078 + .next = NULL,
88079 + .name = "pgctrl_write",
88080 + .file = "net/core/pktgen.c",
88081 + .param3 = 1,
88082 +};
88083 +
88084 +struct size_overflow_hash _001024_hash = {
88085 + .next = NULL,
88086 + .name = "pg_read",
88087 + .file = "drivers/block/paride/pg.c",
88088 + .param3 = 1,
88089 +};
88090 +
88091 +struct size_overflow_hash _001025_hash = {
88092 + .next = NULL,
88093 + .name = "pg_write",
88094 + .file = "drivers/block/paride/pg.c",
88095 + .param3 = 1,
88096 +};
88097 +
88098 +struct size_overflow_hash _001026_hash = {
88099 + .next = NULL,
88100 + .name = "picolcd_debug_eeprom_read",
88101 + .file = "drivers/hid/hid-picolcd.c",
88102 + .param3 = 1,
88103 +};
88104 +
88105 +struct size_overflow_hash _001027_hash = {
88106 + .next = NULL,
88107 + .name = "pkt_add",
88108 + .file = "drivers/usb/serial/garmin_gps.c",
88109 + .param3 = 1,
88110 +};
88111 +
88112 +struct size_overflow_hash _001028_hash = {
88113 + .next = NULL,
88114 + .name = "pktgen_if_write",
88115 + .file = "net/core/pktgen.c",
88116 + .param3 = 1,
88117 +};
88118 +
88119 +struct size_overflow_hash _001029_hash = {
88120 + .next = NULL,
88121 + .name = "platform_list_read_file",
88122 + .file = "sound/soc/soc-core.c",
88123 + .param3 = 1,
88124 +};
88125 +
88126 +struct size_overflow_hash _001030_hash = {
88127 + .next = NULL,
88128 + .name = "pm8001_store_update_fw",
88129 + .file = "drivers/scsi/pm8001/pm8001_ctl.c",
88130 + .param4 = 1,
88131 +};
88132 +
88133 +struct size_overflow_hash _001031_hash = {
88134 + .next = NULL,
88135 + .name = "port_show_regs",
88136 + .file = "drivers/tty/serial/mfd.c",
88137 + .param3 = 1,
88138 +};
88139 +
88140 +struct size_overflow_hash _001032_hash = {
88141 + .next = NULL,
88142 + .name = "ppp_cp_parse_cr",
88143 + .file = "drivers/net/wan/hdlc_ppp.c",
88144 + .param4 = 1,
88145 +};
88146 +
88147 +struct size_overflow_hash _001033_hash = {
88148 + .next = NULL,
88149 + .name = "ppp_write",
88150 + .file = "drivers/net/ppp/ppp_generic.c",
88151 + .param3 = 1,
88152 +};
88153 +
88154 +struct size_overflow_hash _001034_hash = {
88155 + .next = NULL,
88156 + .name = "pp_read",
88157 + .file = "drivers/char/ppdev.c",
88158 + .param3 = 1,
88159 +};
88160 +
88161 +struct size_overflow_hash _001035_hash = {
88162 + .next = NULL,
88163 + .name = "pp_write",
88164 + .file = "drivers/char/ppdev.c",
88165 + .param3 = 1,
88166 +};
88167 +
88168 +struct size_overflow_hash _001036_hash = {
88169 + .next = NULL,
88170 + .name = "printer_read",
88171 + .file = "drivers/usb/gadget/printer.c",
88172 + .param3 = 1,
88173 +};
88174 +
88175 +struct size_overflow_hash _001037_hash = {
88176 + .next = NULL,
88177 + .name = "printer_req_alloc",
88178 + .file = "drivers/usb/gadget/printer.c",
88179 + .param2 = 1,
88180 +};
88181 +
88182 +struct size_overflow_hash _001038_hash = {
88183 + .next = NULL,
88184 + .name = "printer_write",
88185 + .file = "drivers/usb/gadget/printer.c",
88186 + .param3 = 1,
88187 +};
88188 +
88189 +struct size_overflow_hash _001039_hash = {
88190 + .next = NULL,
88191 + .name = "prism2_set_genericelement",
88192 + .file = "drivers/net/wireless/hostap/hostap_ioctl.c",
88193 + .param3 = 1,
88194 +};
88195 +
88196 +struct size_overflow_hash _001040_hash = {
88197 + .next = NULL,
88198 + .name = "proc_read",
88199 + .file = "drivers/net/wireless/airo.c",
88200 + .param3 = 1,
88201 +};
88202 +
88203 +struct size_overflow_hash _001041_hash = {
88204 + .next = NULL,
88205 + .name = "proc_scsi_devinfo_write",
88206 + .file = "drivers/scsi/scsi_devinfo.c",
88207 + .param3 = 1,
88208 +};
88209 +
88210 +struct size_overflow_hash _001042_hash = {
88211 + .next = NULL,
88212 + .name = "proc_scsi_write",
88213 + .file = "drivers/scsi/scsi_proc.c",
88214 + .param3 = 1,
88215 +};
88216 +
88217 +struct size_overflow_hash _001043_hash = {
88218 + .next = NULL,
88219 + .name = "proc_scsi_write_proc",
88220 + .file = "drivers/scsi/scsi_proc.c",
88221 + .param3 = 1,
88222 +};
88223 +
88224 +struct size_overflow_hash _001044_hash = {
88225 + .next = NULL,
88226 + .name = "proc_write",
88227 + .file = "drivers/net/wireless/airo.c",
88228 + .param3 = 1,
88229 +};
88230 +
88231 +struct size_overflow_hash _001045_hash = {
88232 + .next = NULL,
88233 + .name = "provide_user_output",
88234 + .file = "fs/ubifs/debug.c",
88235 + .param3 = 1,
88236 +};
88237 +
88238 +struct size_overflow_hash _001046_hash = {
88239 + .next = NULL,
88240 + .name = "ps_pspoll_max_apturn_read",
88241 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88242 + .param3 = 1,
88243 +};
88244 +
88245 +struct size_overflow_hash _001047_hash = {
88246 + .next = NULL,
88247 + .name = "ps_pspoll_timeouts_read",
88248 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88249 + .param3 = 1,
88250 +};
88251 +
88252 +struct size_overflow_hash _001048_hash = {
88253 + .next = NULL,
88254 + .name = "ps_pspoll_utilization_read",
88255 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88256 + .param3 = 1,
88257 +};
88258 +
88259 +struct size_overflow_hash _001049_hash = {
88260 + .next = NULL,
88261 + .name = "ps_upsd_max_apturn_read",
88262 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88263 + .param3 = 1,
88264 +};
88265 +
88266 +struct size_overflow_hash _001050_hash = {
88267 + .next = NULL,
88268 + .name = "ps_upsd_max_sptime_read",
88269 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88270 + .param3 = 1,
88271 +};
88272 +
88273 +struct size_overflow_hash _001051_hash = {
88274 + .next = NULL,
88275 + .name = "ps_upsd_timeouts_read",
88276 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88277 + .param3 = 1,
88278 +};
88279 +
88280 +struct size_overflow_hash _001052_hash = {
88281 + .next = NULL,
88282 + .name = "ps_upsd_utilization_read",
88283 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88284 + .param3 = 1,
88285 +};
88286 +
88287 +struct size_overflow_hash _001053_hash = {
88288 + .next = NULL,
88289 + .name = "pti_char_write",
88290 + .file = "drivers/misc/pti.c",
88291 + .param3 = 1,
88292 +};
88293 +
88294 +struct size_overflow_hash _001054_hash = {
88295 + .next = NULL,
88296 + .name = "pt_read",
88297 + .file = "drivers/block/paride/pt.c",
88298 + .param3 = 1,
88299 +};
88300 +
88301 +struct size_overflow_hash _001055_hash = {
88302 + .next = NULL,
88303 + .name = "pt_write",
88304 + .file = "drivers/block/paride/pt.c",
88305 + .param3 = 1,
88306 +};
88307 +
88308 +struct size_overflow_hash _001056_hash = {
88309 + .next = NULL,
88310 + .name = "pvr2_ioread_read",
88311 + .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
88312 + .param3 = 1,
88313 +};
88314 +
88315 +struct size_overflow_hash _001057_hash = {
88316 + .next = NULL,
88317 + .name = "pvr2_ioread_set_sync_key",
88318 + .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
88319 + .param3 = 1,
88320 +};
88321 +
88322 +struct size_overflow_hash _001058_hash = {
88323 + .next = NULL,
88324 + .name = "pvr2_stream_buffer_count",
88325 + .file = "drivers/media/video/pvrusb2/pvrusb2-io.c",
88326 + .param2 = 1,
88327 +};
88328 +
88329 +struct size_overflow_hash _001059_hash = {
88330 + .next = NULL,
88331 + .name = "pwr_disable_ps_read",
88332 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88333 + .param3 = 1,
88334 +};
88335 +
88336 +struct size_overflow_hash _001060_hash = {
88337 + .next = NULL,
88338 + .name = "pwr_elp_enter_read",
88339 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88340 + .param3 = 1,
88341 +};
88342 +
88343 +struct size_overflow_hash _001061_hash = {
88344 + .next = NULL,
88345 + .name = "pwr_enable_ps_read",
88346 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88347 + .param3 = 1,
88348 +};
88349 +
88350 +struct size_overflow_hash _001062_hash = {
88351 + .next = NULL,
88352 + .name = "pwr_fix_tsf_ps_read",
88353 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88354 + .param3 = 1,
88355 +};
88356 +
88357 +struct size_overflow_hash _001063_hash = {
88358 + .next = NULL,
88359 + .name = "pwr_missing_bcns_read",
88360 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88361 + .param3 = 1,
88362 +};
88363 +
88364 +struct size_overflow_hash _001064_hash = {
88365 + .next = NULL,
88366 + .name = "pwr_power_save_off_read",
88367 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88368 + .param3 = 1,
88369 +};
88370 +
88371 +struct size_overflow_hash _001065_hash = {
88372 + .next = NULL,
88373 + .name = "pwr_ps_enter_read",
88374 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88375 + .param3 = 1,
88376 +};
88377 +
88378 +struct size_overflow_hash _001066_hash = {
88379 + .next = NULL,
88380 + .name = "pwr_rcvd_awake_beacons_read",
88381 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88382 + .param3 = 1,
88383 +};
88384 +
88385 +struct size_overflow_hash _001067_hash = {
88386 + .next = NULL,
88387 + .name = "pwr_rcvd_beacons_read",
88388 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88389 + .param3 = 1,
88390 +};
88391 +
88392 +struct size_overflow_hash _001068_hash = {
88393 + .next = NULL,
88394 + .name = "pwr_tx_without_ps_read",
88395 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88396 + .param3 = 1,
88397 +};
88398 +
88399 +struct size_overflow_hash _001069_hash = {
88400 + .next = NULL,
88401 + .name = "pwr_tx_with_ps_read",
88402 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88403 + .param3 = 1,
88404 +};
88405 +
88406 +struct size_overflow_hash _001070_hash = {
88407 + .next = NULL,
88408 + .name = "pwr_wake_on_host_read",
88409 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88410 + .param3 = 1,
88411 +};
88412 +
88413 +struct size_overflow_hash _001071_hash = {
88414 + .next = NULL,
88415 + .name = "pwr_wake_on_timer_exp_read",
88416 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88417 + .param3 = 1,
88418 +};
88419 +
88420 +struct size_overflow_hash _001072_hash = {
88421 + .next = NULL,
88422 + .name = "qc_capture",
88423 + .file = "drivers/media/video/c-qcam.c",
88424 + .param3 = 1,
88425 +};
88426 +
88427 +struct size_overflow_hash _001073_hash = {
88428 + .next = NULL,
88429 + .name = "qla2x00_get_ctx_bsg_sp",
88430 + .file = "drivers/scsi/qla2xxx/qla_bsg.c",
88431 + .param3 = 1,
88432 +};
88433 +
88434 +struct size_overflow_hash _001074_hash = {
88435 + .next = NULL,
88436 + .name = "qla2x00_get_ctx_sp",
88437 + .file = "drivers/scsi/qla2xxx/qla_init.c",
88438 + .param3 = 1,
88439 +};
88440 +
88441 +struct size_overflow_hash _001075_hash = {
88442 + .next = NULL,
88443 + .name = "qlcnic_alloc_msix_entries",
88444 + .file = "drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c",
88445 + .param2 = 1,
88446 +};
88447 +
88448 +struct size_overflow_hash _001076_hash = {
88449 + .next = NULL,
88450 + .name = "queues_read",
88451 + .file = "net/mac80211/debugfs.c",
88452 + .param3 = 1,
88453 +};
88454 +
88455 +struct size_overflow_hash _001077_hash = {
88456 + .next = NULL,
88457 + .name = "r3964_write",
88458 + .file = "drivers/tty/n_r3964.c",
88459 + .param4 = 1,
88460 +};
88461 +
88462 +struct size_overflow_hash _001078_hash = {
88463 + .next = NULL,
88464 + .name = "raw_setsockopt",
88465 + .file = "net/can/raw.c",
88466 + .param5 = 1,
88467 +};
88468 +
88469 +struct size_overflow_hash _001079_hash = {
88470 + .next = NULL,
88471 + .name = "ray_cs_essid_proc_write",
88472 + .file = "drivers/net/wireless/ray_cs.c",
88473 + .param3 = 1,
88474 +};
88475 +
88476 +struct size_overflow_hash _001080_hash = {
88477 + .next = NULL,
88478 + .name = "rbd_snap_add",
88479 + .file = "drivers/block/rbd.c",
88480 + .param4 = 1,
88481 +};
88482 +
88483 +struct size_overflow_hash _001081_hash = {
88484 + .next = NULL,
88485 + .name = "rcname_read",
88486 + .file = "net/mac80211/rate.c",
88487 + .param3 = 1,
88488 +};
88489 +
88490 +struct size_overflow_hash _001082_hash = {
88491 + .next = NULL,
88492 + .name = "rds_message_alloc",
88493 + .file = "net/rds/message.c",
88494 + .param1 = 1,
88495 +};
88496 +
88497 +struct size_overflow_hash _001083_hash = {
88498 + .next = NULL,
88499 + .name = "rds_page_copy_user",
88500 + .file = "net/rds/page.c",
88501 + .param4 = 1,
88502 +};
88503 +
88504 +struct size_overflow_hash _001084_hash = {
88505 + .next = NULL,
88506 + .name = "read",
88507 + .file = "drivers/pci/hotplug/cpqphp_sysfs.c",
88508 + .param3 = 1,
88509 +};
88510 +
88511 +struct size_overflow_hash _001085_hash = {
88512 + .next = NULL,
88513 + .name = "read_4k_modal_eeprom",
88514 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88515 + .param3 = 1,
88516 +};
88517 +
88518 +struct size_overflow_hash _001086_hash = {
88519 + .next = NULL,
88520 + .name = "read_9287_modal_eeprom",
88521 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88522 + .param3 = 1,
88523 +};
88524 +
88525 +struct size_overflow_hash _001087_hash = {
88526 + .next = NULL,
88527 + .name = "read_buf",
88528 + .file = "fs/nfsd/nfs4xdr.c",
88529 + .param2 = 1,
88530 +};
88531 +
88532 +struct size_overflow_hash _001088_hash = {
88533 + .next = NULL,
88534 + .name = "read_cis_cache",
88535 + .file = "drivers/pcmcia/cistpl.c",
88536 + .param4 = 1,
88537 +};
88538 +
88539 +struct size_overflow_hash _001089_hash = {
88540 + .next = NULL,
88541 + .name = "read_def_modal_eeprom",
88542 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88543 + .param3 = 1,
88544 +};
88545 +
88546 +struct size_overflow_hash _001090_hash = {
88547 + .next = NULL,
88548 + .name = "read_file_ani",
88549 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
88550 + .param3 = 1,
88551 +};
88552 +
88553 +struct size_overflow_hash _001091_hash = {
88554 + .next = NULL,
88555 + .name = "read_file_antenna",
88556 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
88557 + .param3 = 1,
88558 +};
88559 +
88560 +struct size_overflow_hash _001092_hash = {
88561 + .next = NULL,
88562 + .name = "read_file_base_eeprom",
88563 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88564 + .param3 = 1,
88565 +};
88566 +
88567 +struct size_overflow_hash _001093_hash = {
88568 + .next = NULL,
88569 + .name = "read_file_base_eeprom",
88570 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88571 + .param3 = 1,
88572 +};
88573 +
88574 +struct size_overflow_hash _001094_hash = {
88575 + .next = NULL,
88576 + .name = "read_file_beacon",
88577 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
88578 + .param3 = 1,
88579 +};
88580 +
88581 +struct size_overflow_hash _001095_hash = {
88582 + .next = NULL,
88583 + .name = "read_file_credit_dist_stats",
88584 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
88585 + .param3 = 1,
88586 +};
88587 +
88588 +struct size_overflow_hash _001096_hash = {
88589 + .next = NULL,
88590 + .name = "read_file_debug",
88591 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88592 + .param3 = 1,
88593 +};
88594 +
88595 +struct size_overflow_hash _001097_hash = {
88596 + .next = NULL,
88597 + .name = "read_file_debug",
88598 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88599 + .param3 = 1,
88600 +};
88601 +
88602 +struct size_overflow_hash _001098_hash = {
88603 + .next = NULL,
88604 + .name = "read_file_debug",
88605 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
88606 + .param3 = 1,
88607 +};
88608 +
88609 +struct size_overflow_hash _001099_hash = {
88610 + .next = NULL,
88611 + .name = "read_file_disable_ani",
88612 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88613 + .param3 = 1,
88614 +};
88615 +
88616 +struct size_overflow_hash _001100_hash = {
88617 + .next = NULL,
88618 + .name = "read_file_dma",
88619 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88620 + .param3 = 1,
88621 +};
88622 +
88623 +struct size_overflow_hash _001101_hash = {
88624 + .next = NULL,
88625 + .name = "read_file_dump_nfcal",
88626 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88627 + .param3 = 1,
88628 +};
88629 +
88630 +struct size_overflow_hash _001102_hash = {
88631 + .next = NULL,
88632 + .name = "read_file_frameerrors",
88633 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
88634 + .param3 = 1,
88635 +};
88636 +
88637 +struct size_overflow_hash _001103_hash = {
88638 + .next = NULL,
88639 + .name = "read_file_interrupt",
88640 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88641 + .param3 = 1,
88642 +};
88643 +
88644 +struct size_overflow_hash _001104_hash = {
88645 + .next = NULL,
88646 + .name = "read_file_misc",
88647 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
88648 + .param3 = 1,
88649 +};
88650 +
88651 +struct size_overflow_hash _001105_hash = {
88652 + .next = NULL,
88653 + .name = "read_file_misc",
88654 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88655 + .param3 = 1,
88656 +};
88657 +
88658 +struct size_overflow_hash _001106_hash = {
88659 + .next = NULL,
88660 + .name = "read_file_modal_eeprom",
88661 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88662 + .param3 = 1,
88663 +};
88664 +
88665 +struct size_overflow_hash _001107_hash = {
88666 + .next = NULL,
88667 + .name = "read_file_queue",
88668 + .file = "drivers/net/wireless/ath/ath5k/debug.c",
88669 + .param3 = 1,
88670 +};
88671 +
88672 +struct size_overflow_hash _001108_hash = {
88673 + .next = NULL,
88674 + .name = "read_file_queue",
88675 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88676 + .param3 = 1,
88677 +};
88678 +
88679 +struct size_overflow_hash _001109_hash = {
88680 + .next = NULL,
88681 + .name = "read_file_rcstat",
88682 + .file = "drivers/net/wireless/ath/ath9k/rc.c",
88683 + .param3 = 1,
88684 +};
88685 +
88686 +struct size_overflow_hash _001110_hash = {
88687 + .next = NULL,
88688 + .name = "read_file_recv",
88689 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88690 + .param3 = 1,
88691 +};
88692 +
88693 +struct size_overflow_hash _001111_hash = {
88694 + .next = NULL,
88695 + .name = "read_file_recv",
88696 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88697 + .param3 = 1,
88698 +};
88699 +
88700 +struct size_overflow_hash _001112_hash = {
88701 + .next = NULL,
88702 + .name = "read_file_regidx",
88703 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88704 + .param3 = 1,
88705 +};
88706 +
88707 +struct size_overflow_hash _001113_hash = {
88708 + .next = &_001103_hash,
88709 + .name = "read_file_regval",
88710 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88711 + .param3 = 1,
88712 +};
88713 +
88714 +struct size_overflow_hash _001114_hash = {
88715 + .next = NULL,
88716 + .name = "read_file_rx_chainmask",
88717 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88718 + .param3 = 1,
88719 +};
88720 +
88721 +struct size_overflow_hash _001115_hash = {
88722 + .next = NULL,
88723 + .name = "read_file_slot",
88724 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88725 + .param3 = 1,
88726 +};
88727 +
88728 +struct size_overflow_hash _001116_hash = {
88729 + .next = NULL,
88730 + .name = "read_file_stations",
88731 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88732 + .param3 = 1,
88733 +};
88734 +
88735 +struct size_overflow_hash _001117_hash = {
88736 + .next = NULL,
88737 + .name = "read_file_tgt_int_stats",
88738 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88739 + .param3 = 1,
88740 +};
88741 +
88742 +struct size_overflow_hash _001118_hash = {
88743 + .next = NULL,
88744 + .name = "read_file_tgt_rx_stats",
88745 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88746 + .param3 = 1,
88747 +};
88748 +
88749 +struct size_overflow_hash _001119_hash = {
88750 + .next = NULL,
88751 + .name = "read_file_tgt_stats",
88752 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
88753 + .param3 = 1,
88754 +};
88755 +
88756 +struct size_overflow_hash _001120_hash = {
88757 + .next = NULL,
88758 + .name = "read_file_tgt_tx_stats",
88759 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88760 + .param3 = 1,
88761 +};
88762 +
88763 +struct size_overflow_hash _001121_hash = {
88764 + .next = NULL,
88765 + .name = "read_file_tx_chainmask",
88766 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88767 + .param3 = 1,
88768 +};
88769 +
88770 +struct size_overflow_hash _001122_hash = {
88771 + .next = NULL,
88772 + .name = "read_file_war_stats",
88773 + .file = "drivers/net/wireless/ath/ath6kl/debug.c",
88774 + .param3 = 1,
88775 +};
88776 +
88777 +struct size_overflow_hash _001123_hash = {
88778 + .next = NULL,
88779 + .name = "read_file_wiphy",
88780 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88781 + .param3 = 1,
88782 +};
88783 +
88784 +struct size_overflow_hash _001124_hash = {
88785 + .next = NULL,
88786 + .name = "read_file_xmit",
88787 + .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
88788 + .param3 = 1,
88789 +};
88790 +
88791 +struct size_overflow_hash _001125_hash = {
88792 + .next = NULL,
88793 + .name = "read_file_xmit",
88794 + .file = "drivers/net/wireless/ath/ath9k/debug.c",
88795 + .param3 = 1,
88796 +};
88797 +
88798 +struct size_overflow_hash _001126_hash = {
88799 + .next = NULL,
88800 + .name = "read_flush",
88801 + .file = "net/sunrpc/cache.c",
88802 + .param3 = 1,
88803 +};
88804 +
88805 +struct size_overflow_hash _001127_hash = {
88806 + .next = NULL,
88807 + .name = "realloc_buffer",
88808 + .file = "drivers/scsi/device_handler/scsi_dh_alua.c",
88809 + .param2 = 1,
88810 +};
88811 +
88812 +struct size_overflow_hash _001128_hash = {
88813 + .next = NULL,
88814 + .name = "receive_DataRequest",
88815 + .file = "drivers/block/drbd/drbd_receiver.c",
88816 + .param3 = 1,
88817 +};
88818 +
88819 +struct size_overflow_hash _001129_hash = {
88820 + .next = NULL,
88821 + .name = "recent_mt_proc_write",
88822 + .file = "net/netfilter/xt_recent.c",
88823 + .param3 = 1,
88824 +};
88825 +
88826 +struct size_overflow_hash _001130_hash = {
88827 + .next = NULL,
88828 + .name = "redrat3_transmit_ir",
88829 + .file = "drivers/media/rc/redrat3.c",
88830 + .param3 = 1,
88831 +};
88832 +
88833 +struct size_overflow_hash _001131_hash = {
88834 + .next = NULL,
88835 + .name = "reg_w_buf",
88836 + .file = "drivers/media/video/gspca/t613.c",
88837 + .param3 = 1,
88838 +};
88839 +
88840 +struct size_overflow_hash _001132_hash = {
88841 + .next = NULL,
88842 + .name = "reg_w_ixbuf",
88843 + .file = "drivers/media/video/gspca/t613.c",
88844 + .param4 = 1,
88845 +};
88846 +
88847 +struct size_overflow_hash _001133_hash = {
88848 + .next = NULL,
88849 + .name = "reiserfs_allocate_list_bitmaps",
88850 + .file = "include/linux/reiserfs_fs.h",
88851 + .param3 = 1,
88852 +};
88853 +
88854 +struct size_overflow_hash _001134_hash = {
88855 + .next = NULL,
88856 + .name = "reiserfs_resize",
88857 + .file = "include/linux/reiserfs_fs_sb.h",
88858 + .param2 = 1,
88859 +};
88860 +
88861 +struct size_overflow_hash _001135_hash = {
88862 + .next = NULL,
88863 + .name = "remote_settings_file_write",
88864 + .file = "drivers/misc/ibmasm/ibmasmfs.c",
88865 + .param3 = 1,
88866 +};
88867 +
88868 +struct size_overflow_hash _001136_hash = {
88869 + .next = NULL,
88870 + .name = "_req_append_segment",
88871 + .file = "drivers/scsi/osd/osd_initiator.c",
88872 + .param2 = 1,
88873 +};
88874 +
88875 +struct size_overflow_hash _001137_hash = {
88876 + .next = NULL,
88877 + .name = "retry_count_read",
88878 + .file = "drivers/net/wireless/wl1251/debugfs.c",
88879 + .param3 = 1,
88880 +};
88881 +
88882 +struct size_overflow_hash _001138_hash = {
88883 + .next = NULL,
88884 + .name = "revalidate",
88885 + .file = "drivers/block/aoe/aoechr.c",
88886 + .param2 = 1,
88887 +};
88888 +
88889 +struct size_overflow_hash _001139_hash = {
88890 + .next = NULL,
88891 + .name = "rfcomm_sock_setsockopt",
88892 + .file = "net/bluetooth/rfcomm/sock.c",
88893 + .param5 = 1,
88894 +};
88895 +
88896 +struct size_overflow_hash _001140_hash = {
88897 + .next = NULL,
88898 + .name = "rfkill_fop_read",
88899 + .file = "net/rfkill/core.c",
88900 + .param3 = 1,
88901 +};
88902 +
88903 +struct size_overflow_hash _001141_hash = {
88904 + .next = NULL,
88905 + .name = "rndis_add_response",
88906 + .file = "drivers/usb/gadget/rndis.c",
88907 + .param2 = 1,
88908 +};
88909 +
88910 +struct size_overflow_hash _001142_hash = {
88911 + .next = NULL,
88912 + .name = "rng_dev_read",
88913 + .file = "drivers/char/hw_random/core.c",
88914 + .param3 = 1,
88915 +};
88916 +
88917 +struct size_overflow_hash _001143_hash = {
88918 + .next = NULL,
88919 + .name = "roccat_common_receive",
88920 + .file = "drivers/hid/hid-roccat-common.c",
88921 + .param4 = 1,
88922 +};
88923 +
88924 +struct size_overflow_hash _001144_hash = {
88925 + .next = NULL,
88926 + .name = "roccat_common_send",
88927 + .file = "drivers/hid/hid-roccat-common.c",
88928 + .param4 = 1,
88929 +};
88930 +
88931 +struct size_overflow_hash _001145_hash = {
88932 + .next = NULL,
88933 + .name = "roccat_read",
88934 + .file = "drivers/hid/hid-roccat.c",
88935 + .param3 = 1,
88936 +};
88937 +
88938 +struct size_overflow_hash _001146_hash = {
88939 + .next = NULL,
88940 + .name = "rpc_malloc",
88941 + .file = "include/linux/sunrpc/sched.h",
88942 + .param2 = 1,
88943 +};
88944 +
88945 +struct size_overflow_hash _001147_hash = {
88946 + .next = NULL,
88947 + .name = "rs_sta_dbgfs_rate_scale_data_read",
88948 + .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
88949 + .param3 = 1,
88950 +};
88951 +
88952 +struct size_overflow_hash _001148_hash = {
88953 + .next = NULL,
88954 + .name = "rs_sta_dbgfs_scale_table_read",
88955 + .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
88956 + .param3 = 1,
88957 +};
88958 +
88959 +struct size_overflow_hash _001149_hash = {
88960 + .next = NULL,
88961 + .name = "rs_sta_dbgfs_stats_table_read",
88962 + .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
88963 + .param3 = 1,
88964 +};
88965 +
88966 +struct size_overflow_hash _001150_hash = {
88967 + .next = NULL,
88968 + .name = "rt2x00debug_write_bbp",
88969 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
88970 + .param3 = 1,
88971 +};
88972 +
88973 +struct size_overflow_hash _001151_hash = {
88974 + .next = NULL,
88975 + .name = "rt2x00debug_write_csr",
88976 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
88977 + .param3 = 1,
88978 +};
88979 +
88980 +struct size_overflow_hash _001152_hash = {
88981 + .next = &_000808_hash,
88982 + .name = "rt2x00debug_write_eeprom",
88983 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
88984 + .param3 = 1,
88985 +};
88986 +
88987 +struct size_overflow_hash _001153_hash = {
88988 + .next = NULL,
88989 + .name = "rt2x00debug_write_rf",
88990 + .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
88991 + .param3 = 1,
88992 +};
88993 +
88994 +struct size_overflow_hash _001154_hash = {
88995 + .next = NULL,
88996 + .name = "rts51x_read_mem",
88997 + .file = "drivers/usb/storage/realtek_cr.c",
88998 + .param4 = 1,
88999 +};
89000 +
89001 +struct size_overflow_hash _001155_hash = {
89002 + .next = NULL,
89003 + .name = "rts51x_write_mem",
89004 + .file = "drivers/usb/storage/realtek_cr.c",
89005 + .param4 = 1,
89006 +};
89007 +
89008 +struct size_overflow_hash _001156_hash = {
89009 + .next = NULL,
89010 + .name = "rts_threshold_read",
89011 + .file = "net/wireless/debugfs.c",
89012 + .param3 = 1,
89013 +};
89014 +
89015 +struct size_overflow_hash _001157_hash = {
89016 + .next = NULL,
89017 + .name = "rx_dropped_read",
89018 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89019 + .param3 = 1,
89020 +};
89021 +
89022 +struct size_overflow_hash _001158_hash = {
89023 + .next = NULL,
89024 + .name = "rx_fcs_err_read",
89025 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89026 + .param3 = 1,
89027 +};
89028 +
89029 +struct size_overflow_hash _001159_hash = {
89030 + .next = NULL,
89031 + .name = "rx_hdr_overflow_read",
89032 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89033 + .param3 = 1,
89034 +};
89035 +
89036 +struct size_overflow_hash _001160_hash = {
89037 + .next = NULL,
89038 + .name = "rx_hw_stuck_read",
89039 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89040 + .param3 = 1,
89041 +};
89042 +
89043 +struct size_overflow_hash _001161_hash = {
89044 + .next = NULL,
89045 + .name = "rx_out_of_mem_read",
89046 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89047 + .param3 = 1,
89048 +};
89049 +
89050 +struct size_overflow_hash _001162_hash = {
89051 + .next = NULL,
89052 + .name = "rx_path_reset_read",
89053 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89054 + .param3 = 1,
89055 +};
89056 +
89057 +struct size_overflow_hash _001163_hash = {
89058 + .next = NULL,
89059 + .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read",
89060 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89061 + .param3 = 1,
89062 +};
89063 +
89064 +struct size_overflow_hash _001164_hash = {
89065 + .next = NULL,
89066 + .name = "rxpipe_descr_host_int_trig_rx_data_read",
89067 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89068 + .param3 = 1,
89069 +};
89070 +
89071 +struct size_overflow_hash _001165_hash = {
89072 + .next = NULL,
89073 + .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read",
89074 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89075 + .param3 = 1,
89076 +};
89077 +
89078 +struct size_overflow_hash _001166_hash = {
89079 + .next = NULL,
89080 + .name = "rxpipe_rx_prep_beacon_drop_read",
89081 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89082 + .param3 = 1,
89083 +};
89084 +
89085 +struct size_overflow_hash _001167_hash = {
89086 + .next = NULL,
89087 + .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read",
89088 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89089 + .param3 = 1,
89090 +};
89091 +
89092 +struct size_overflow_hash _001168_hash = {
89093 + .next = NULL,
89094 + .name = "rx_reset_counter_read",
89095 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89096 + .param3 = 1,
89097 +};
89098 +
89099 +struct size_overflow_hash _001169_hash = {
89100 + .next = NULL,
89101 + .name = "rx_xfr_hint_trig_read",
89102 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89103 + .param3 = 1,
89104 +};
89105 +
89106 +struct size_overflow_hash _001170_hash = {
89107 + .next = NULL,
89108 + .name = "saa7164_buffer_alloc_user",
89109 + .file = "drivers/media/video/saa7164/saa7164-buffer.c",
89110 + .param2 = 1,
89111 +};
89112 +
89113 +struct size_overflow_hash _001171_hash = {
89114 + .next = NULL,
89115 + .name = "scsi_execute",
89116 + .file = "include/scsi/scsi_device.h",
89117 + .param5 = 1,
89118 +};
89119 +
89120 +struct size_overflow_hash _001172_hash = {
89121 + .next = NULL,
89122 + .name = "scsi_tgt_copy_sense",
89123 + .file = "drivers/scsi/scsi_tgt_lib.c",
89124 + .param3 = 1,
89125 +};
89126 +
89127 +struct size_overflow_hash _001173_hash = {
89128 + .next = NULL,
89129 + .name = "sctp_auth_create_key",
89130 + .file = "net/sctp/auth.c",
89131 + .param1 = 1,
89132 +};
89133 +
89134 +struct size_overflow_hash _001174_hash = {
89135 + .next = NULL,
89136 + .name = "sctp_make_abort_user",
89137 + .file = "include/net/sctp/sm.h",
89138 + .param3 = 1,
89139 +};
89140 +
89141 +struct size_overflow_hash _001175_hash = {
89142 + .next = NULL,
89143 + .name = "sctpprobe_read",
89144 + .file = "net/sctp/probe.c",
89145 + .param3 = 1,
89146 +};
89147 +
89148 +struct size_overflow_hash _001176_hash = {
89149 + .next = NULL,
89150 + .name = "sctp_setsockopt_active_key",
89151 + .file = "net/sctp/socket.c",
89152 + .param3 = 1,
89153 +};
89154 +
89155 +struct size_overflow_hash _001177_hash = {
89156 + .next = NULL,
89157 + .name = "sctp_setsockopt_adaptation_layer",
89158 + .file = "net/sctp/socket.c",
89159 + .param3 = 1,
89160 +};
89161 +
89162 +struct size_overflow_hash _001178_hash = {
89163 + .next = NULL,
89164 + .name = "sctp_setsockopt_associnfo",
89165 + .file = "net/sctp/socket.c",
89166 + .param3 = 1,
89167 +};
89168 +
89169 +struct size_overflow_hash _001179_hash = {
89170 + .next = NULL,
89171 + .name = "sctp_setsockopt_auth_chunk",
89172 + .file = "net/sctp/socket.c",
89173 + .param3 = 1,
89174 +};
89175 +
89176 +struct size_overflow_hash _001180_hash = {
89177 + .next = NULL,
89178 + .name = "sctp_setsockopt_auth_key",
89179 + .file = "net/sctp/socket.c",
89180 + .param3 = 1,
89181 +};
89182 +
89183 +struct size_overflow_hash _001181_hash = {
89184 + .next = NULL,
89185 + .name = "sctp_setsockopt_autoclose",
89186 + .file = "net/sctp/socket.c",
89187 + .param3 = 1,
89188 +};
89189 +
89190 +struct size_overflow_hash _001182_hash = {
89191 + .next = NULL,
89192 + .name = "sctp_setsockopt_context",
89193 + .file = "net/sctp/socket.c",
89194 + .param3 = 1,
89195 +};
89196 +
89197 +struct size_overflow_hash _001183_hash = {
89198 + .next = NULL,
89199 + .name = "sctp_setsockopt_default_send_param",
89200 + .file = "net/sctp/socket.c",
89201 + .param3 = 1,
89202 +};
89203 +
89204 +struct size_overflow_hash _001184_hash = {
89205 + .next = NULL,
89206 + .name = "sctp_setsockopt_delayed_ack",
89207 + .file = "net/sctp/socket.c",
89208 + .param3 = 1,
89209 +};
89210 +
89211 +struct size_overflow_hash _001185_hash = {
89212 + .next = NULL,
89213 + .name = "sctp_setsockopt_del_key",
89214 + .file = "net/sctp/socket.c",
89215 + .param3 = 1,
89216 +};
89217 +
89218 +struct size_overflow_hash _001186_hash = {
89219 + .next = NULL,
89220 + .name = "sctp_setsockopt_events",
89221 + .file = "net/sctp/socket.c",
89222 + .param3 = 1,
89223 +};
89224 +
89225 +struct size_overflow_hash _001187_hash = {
89226 + .next = NULL,
89227 + .name = "sctp_setsockopt_hmac_ident",
89228 + .file = "net/sctp/socket.c",
89229 + .param3 = 1,
89230 +};
89231 +
89232 +struct size_overflow_hash _001188_hash = {
89233 + .next = NULL,
89234 + .name = "sctp_setsockopt_initmsg",
89235 + .file = "net/sctp/socket.c",
89236 + .param3 = 1,
89237 +};
89238 +
89239 +struct size_overflow_hash _001189_hash = {
89240 + .next = NULL,
89241 + .name = "sctp_setsockopt_maxburst",
89242 + .file = "net/sctp/socket.c",
89243 + .param3 = 1,
89244 +};
89245 +
89246 +struct size_overflow_hash _001190_hash = {
89247 + .next = NULL,
89248 + .name = "sctp_setsockopt_maxseg",
89249 + .file = "net/sctp/socket.c",
89250 + .param3 = 1,
89251 +};
89252 +
89253 +struct size_overflow_hash _001191_hash = {
89254 + .next = NULL,
89255 + .name = "sctp_setsockopt_peer_addr_params",
89256 + .file = "net/sctp/socket.c",
89257 + .param3 = 1,
89258 +};
89259 +
89260 +struct size_overflow_hash _001192_hash = {
89261 + .next = NULL,
89262 + .name = "sctp_setsockopt_peer_primary_addr",
89263 + .file = "net/sctp/socket.c",
89264 + .param3 = 1,
89265 +};
89266 +
89267 +struct size_overflow_hash _001193_hash = {
89268 + .next = NULL,
89269 + .name = "sctp_setsockopt_rtoinfo",
89270 + .file = "net/sctp/socket.c",
89271 + .param3 = 1,
89272 +};
89273 +
89274 +struct size_overflow_hash _001194_hash = {
89275 + .next = NULL,
89276 + .name = "sctp_tsnmap_init",
89277 + .file = "include/net/sctp/tsnmap.h",
89278 + .param2 = 1,
89279 +};
89280 +
89281 +struct size_overflow_hash _001195_hash = {
89282 + .next = NULL,
89283 + .name = "send_control_msg",
89284 + .file = "drivers/media/video/zr364xx.c",
89285 + .param6 = 1,
89286 +};
89287 +
89288 +struct size_overflow_hash _001196_hash = {
89289 + .next = NULL,
89290 + .name = "set_aoe_iflist",
89291 + .file = "drivers/block/aoe/aoenet.c",
89292 + .param2 = 1,
89293 +};
89294 +
89295 +struct size_overflow_hash _001197_hash = {
89296 + .next = NULL,
89297 + .name = "set_registers",
89298 + .file = "drivers/net/usb/pegasus.c",
89299 + .param3 = 1,
89300 +};
89301 +
89302 +struct size_overflow_hash _001198_hash = {
89303 + .next = NULL,
89304 + .name = "setsockopt",
89305 + .file = "net/caif/caif_socket.c",
89306 + .param5 = 1,
89307 +};
89308 +
89309 +struct size_overflow_hash _001199_hash = {
89310 + .next = NULL,
89311 + .name = "setup_req",
89312 + .file = "drivers/usb/gadget/inode.c",
89313 + .param3 = 1,
89314 +};
89315 +
89316 +struct size_overflow_hash _001200_hash = {
89317 + .next = NULL,
89318 + .name = "sfq_alloc",
89319 + .file = "net/sched/sch_sfq.c",
89320 + .param1 = 1,
89321 +};
89322 +
89323 +struct size_overflow_hash _001201_hash = {
89324 + .next = NULL,
89325 + .name = "sgl_map_user_pages",
89326 + .file = "drivers/scsi/st.c",
89327 + .param2 = 1,
89328 +};
89329 +
89330 +struct size_overflow_hash _001202_hash = {
89331 + .next = NULL,
89332 + .name = "short_retry_limit_read",
89333 + .file = "net/wireless/debugfs.c",
89334 + .param3 = 1,
89335 +};
89336 +
89337 +struct size_overflow_hash _001203_hash = {
89338 + .next = NULL,
89339 + .name = "sm501_create_subdev",
89340 + .file = "drivers/mfd/sm501.c",
89341 + .param3 = 1,
89342 + .param4 = 1,
89343 +};
89344 +
89345 +struct size_overflow_hash _001205_hash = {
89346 + .next = NULL,
89347 + .name = "sn9c102_read",
89348 + .file = "drivers/media/video/sn9c102/sn9c102_core.c",
89349 + .param3 = 1,
89350 +};
89351 +
89352 +struct size_overflow_hash _001206_hash = {
89353 + .next = NULL,
89354 + .name = "snd_ac97_pcm_assign",
89355 + .file = "include/sound/ac97_codec.h",
89356 + .param2 = 1,
89357 +};
89358 +
89359 +struct size_overflow_hash _001207_hash = {
89360 + .next = NULL,
89361 + .name = "snd_ctl_elem_user_tlv",
89362 + .file = "sound/core/control.c",
89363 + .param3 = 1,
89364 +};
89365 +
89366 +struct size_overflow_hash _001208_hash = {
89367 + .next = NULL,
89368 + .name = "snd_emu10k1_fx8010_read",
89369 + .file = "sound/pci/emu10k1/emuproc.c",
89370 + .param5 = 1,
89371 +};
89372 +
89373 +struct size_overflow_hash _001209_hash = {
89374 + .next = NULL,
89375 + .name = "snd_es1938_capture_copy",
89376 + .file = "sound/pci/es1938.c",
89377 + .param5 = 1,
89378 +};
89379 +
89380 +struct size_overflow_hash _001210_hash = {
89381 + .next = NULL,
89382 + .name = "snd_gus_dram_peek",
89383 + .file = "sound/isa/gus/gus_dram.c",
89384 + .param4 = 1,
89385 +};
89386 +
89387 +struct size_overflow_hash _001211_hash = {
89388 + .next = NULL,
89389 + .name = "snd_gus_dram_poke",
89390 + .file = "sound/isa/gus/gus_dram.c",
89391 + .param4 = 1,
89392 +};
89393 +
89394 +struct size_overflow_hash _001212_hash = {
89395 + .next = NULL,
89396 + .name = "snd_hdsp_capture_copy",
89397 + .file = "sound/pci/rme9652/hdsp.c",
89398 + .param5 = 1,
89399 +};
89400 +
89401 +struct size_overflow_hash _001213_hash = {
89402 + .next = NULL,
89403 + .name = "snd_hdsp_playback_copy",
89404 + .file = "sound/pci/rme9652/hdsp.c",
89405 + .param5 = 1,
89406 +};
89407 +
89408 +struct size_overflow_hash _001214_hash = {
89409 + .next = NULL,
89410 + .name = "snd_info_entry_write",
89411 + .file = "sound/core/info.c",
89412 + .param3 = 1,
89413 +};
89414 +
89415 +struct size_overflow_hash _001215_hash = {
89416 + .next = NULL,
89417 + .name = "snd_opl4_mem_proc_read",
89418 + .file = "sound/drivers/opl4/opl4_proc.c",
89419 + .param5 = 1,
89420 +};
89421 +
89422 +struct size_overflow_hash _001216_hash = {
89423 + .next = NULL,
89424 + .name = "snd_opl4_mem_proc_write",
89425 + .file = "sound/drivers/opl4/opl4_proc.c",
89426 + .param5 = 1,
89427 +};
89428 +
89429 +struct size_overflow_hash _001217_hash = {
89430 + .next = NULL,
89431 + .name = "snd_pcm_aio_read",
89432 + .file = "sound/core/pcm_native.c",
89433 + .param3 = 1,
89434 +};
89435 +
89436 +struct size_overflow_hash _001218_hash = {
89437 + .next = NULL,
89438 + .name = "snd_pcm_aio_write",
89439 + .file = "sound/core/pcm_native.c",
89440 + .param3 = 1,
89441 +};
89442 +
89443 +struct size_overflow_hash _001219_hash = {
89444 + .next = NULL,
89445 + .name = "snd_pcm_alloc_vmalloc_buffer",
89446 + .file = "drivers/media/video/cx231xx/cx231xx-audio.c",
89447 + .param2 = 1,
89448 +};
89449 +
89450 +struct size_overflow_hash _001220_hash = {
89451 + .next = NULL,
89452 + .name = "snd_pcm_alloc_vmalloc_buffer",
89453 + .file = "drivers/media/video/cx18/cx18-alsa-pcm.c",
89454 + .param2 = 1,
89455 +};
89456 +
89457 +struct size_overflow_hash _001221_hash = {
89458 + .next = NULL,
89459 + .name = "snd_pcm_alloc_vmalloc_buffer",
89460 + .file = "drivers/media/video/em28xx/em28xx-audio.c",
89461 + .param2 = 1,
89462 +};
89463 +
89464 +struct size_overflow_hash _001222_hash = {
89465 + .next = NULL,
89466 + .name = "_snd_pcm_lib_alloc_vmalloc_buffer",
89467 + .file = "include/sound/pcm.h",
89468 + .param2 = 1,
89469 +};
89470 +
89471 +struct size_overflow_hash _001223_hash = {
89472 + .next = NULL,
89473 + .name = "snd_pcm_oss_read1",
89474 + .file = "sound/core/oss/pcm_oss.c",
89475 + .param3 = 1,
89476 +};
89477 +
89478 +struct size_overflow_hash _001224_hash = {
89479 + .next = NULL,
89480 + .name = "snd_pcm_oss_write1",
89481 + .file = "sound/core/oss/pcm_oss.c",
89482 + .param3 = 1,
89483 +};
89484 +
89485 +struct size_overflow_hash _001225_hash = {
89486 + .next = NULL,
89487 + .name = "snd_pcm_oss_write2",
89488 + .file = "sound/core/oss/pcm_oss.c",
89489 + .param3 = 1,
89490 +};
89491 +
89492 +struct size_overflow_hash _001226_hash = {
89493 + .next = NULL,
89494 + .name = "snd_pcm_plugin_build",
89495 + .file = "sound/core/oss/pcm_plugin.c",
89496 + .param5 = 1,
89497 +};
89498 +
89499 +struct size_overflow_hash _001227_hash = {
89500 + .next = NULL,
89501 + .name = "snd_rme9652_capture_copy",
89502 + .file = "sound/pci/rme9652/rme9652.c",
89503 + .param5 = 1,
89504 +};
89505 +
89506 +struct size_overflow_hash _001228_hash = {
89507 + .next = NULL,
89508 + .name = "snd_rme9652_playback_copy",
89509 + .file = "sound/pci/rme9652/rme9652.c",
89510 + .param5 = 1,
89511 +};
89512 +
89513 +struct size_overflow_hash _001229_hash = {
89514 + .next = NULL,
89515 + .name = "snd_soc_hw_bulk_write_raw",
89516 + .file = "sound/soc/soc-io.c",
89517 + .param4 = 1,
89518 +};
89519 +
89520 +struct size_overflow_hash _001230_hash = {
89521 + .next = NULL,
89522 + .name = "snd_usb_ctl_msg",
89523 + .file = "sound/usb/helper.c",
89524 + .param8 = 1,
89525 +};
89526 +
89527 +struct size_overflow_hash _001231_hash = {
89528 + .next = NULL,
89529 + .name = "_sp2d_alloc",
89530 + .file = "fs/exofs/ore_raid.c",
89531 + .param1 = 1,
89532 +};
89533 +
89534 +struct size_overflow_hash _001232_hash = {
89535 + .next = NULL,
89536 + .name = "spidev_message",
89537 + .file = "drivers/spi/spidev.c",
89538 + .param3 = 1,
89539 +};
89540 +
89541 +struct size_overflow_hash _001233_hash = {
89542 + .next = NULL,
89543 + .name = "spidev_write",
89544 + .file = "drivers/spi/spidev.c",
89545 + .param3 = 1,
89546 +};
89547 +
89548 +struct size_overflow_hash _001234_hash = {
89549 + .next = NULL,
89550 + .name = "spi_show_regs",
89551 + .file = "drivers/spi/spi-dw.c",
89552 + .param3 = 1,
89553 +};
89554 +
89555 +struct size_overflow_hash _001235_hash = {
89556 + .next = NULL,
89557 + .name = "srp_alloc_iu",
89558 + .file = "drivers/infiniband/ulp/srp/ib_srp.c",
89559 + .param2 = 1,
89560 +};
89561 +
89562 +struct size_overflow_hash _001236_hash = {
89563 + .next = NULL,
89564 + .name = "srp_iu_pool_alloc",
89565 + .file = "drivers/scsi/libsrp.c",
89566 + .param2 = 1,
89567 +};
89568 +
89569 +struct size_overflow_hash _001237_hash = {
89570 + .next = NULL,
89571 + .name = "srp_ring_alloc",
89572 + .file = "drivers/scsi/libsrp.c",
89573 + .param2 = 1,
89574 +};
89575 +
89576 +struct size_overflow_hash _001238_hash = {
89577 + .next = NULL,
89578 + .name = "sta_agg_status_read",
89579 + .file = "net/mac80211/debugfs_sta.c",
89580 + .param3 = 1,
89581 +};
89582 +
89583 +struct size_overflow_hash _001239_hash = {
89584 + .next = NULL,
89585 + .name = "sta_agg_status_write",
89586 + .file = "net/mac80211/debugfs_sta.c",
89587 + .param3 = 1,
89588 +};
89589 +
89590 +struct size_overflow_hash _001240_hash = {
89591 + .next = NULL,
89592 + .name = "sta_connected_time_read",
89593 + .file = "net/mac80211/debugfs_sta.c",
89594 + .param3 = 1,
89595 +};
89596 +
89597 +struct size_overflow_hash _001241_hash = {
89598 + .next = NULL,
89599 + .name = "sta_flags_read",
89600 + .file = "net/mac80211/debugfs_sta.c",
89601 + .param3 = 1,
89602 +};
89603 +
89604 +struct size_overflow_hash _001242_hash = {
89605 + .next = NULL,
89606 + .name = "sta_ht_capa_read",
89607 + .file = "net/mac80211/debugfs_sta.c",
89608 + .param3 = 1,
89609 +};
89610 +
89611 +struct size_overflow_hash _001243_hash = {
89612 + .next = NULL,
89613 + .name = "sta_last_seq_ctrl_read",
89614 + .file = "net/mac80211/debugfs_sta.c",
89615 + .param3 = 1,
89616 +};
89617 +
89618 +struct size_overflow_hash _001244_hash = {
89619 + .next = NULL,
89620 + .name = "sta_num_ps_buf_frames_read",
89621 + .file = "net/mac80211/debugfs_sta.c",
89622 + .param3 = 1,
89623 +};
89624 +
89625 +struct size_overflow_hash _001245_hash = {
89626 + .next = NULL,
89627 + .name = "stk_prepare_sio_buffers",
89628 + .file = "drivers/media/video/stk-webcam.c",
89629 + .param2 = 1,
89630 +};
89631 +
89632 +struct size_overflow_hash _001246_hash = {
89633 + .next = NULL,
89634 + .name = "store_iwmct_log_level",
89635 + .file = "drivers/misc/iwmc3200top/log.c",
89636 + .param4 = 1,
89637 +};
89638 +
89639 +struct size_overflow_hash _001247_hash = {
89640 + .next = NULL,
89641 + .name = "store_iwmct_log_level_fw",
89642 + .file = "drivers/misc/iwmc3200top/log.c",
89643 + .param4 = 1,
89644 +};
89645 +
89646 +struct size_overflow_hash _001248_hash = {
89647 + .next = NULL,
89648 + .name = "str_to_user",
89649 + .file = "drivers/input/evdev.c",
89650 + .param2 = 1,
89651 +};
89652 +
89653 +struct size_overflow_hash _001249_hash = {
89654 + .next = NULL,
89655 + .name = "svc_pool_map_alloc_arrays",
89656 + .file = "net/sunrpc/svc.c",
89657 + .param2 = 1,
89658 +};
89659 +
89660 +struct size_overflow_hash _001250_hash = {
89661 + .next = NULL,
89662 + .name = "svc_setsockopt",
89663 + .file = "net/atm/svc.c",
89664 + .param5 = 1,
89665 +};
89666 +
89667 +struct size_overflow_hash _001251_hash = {
89668 + .next = NULL,
89669 + .name = "t4_alloc_mem",
89670 + .file = "drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c",
89671 + .param1 = 1,
89672 +};
89673 +
89674 +struct size_overflow_hash _001252_hash = {
89675 + .next = NULL,
89676 + .name = "tda10048_writeregbulk",
89677 + .file = "drivers/media/dvb/frontends/tda10048.c",
89678 + .param4 = 1,
89679 +};
89680 +
89681 +struct size_overflow_hash _001253_hash = {
89682 + .next = NULL,
89683 + .name = "__team_options_register",
89684 + .file = "drivers/net/team/team.c",
89685 + .param3 = 1,
89686 +};
89687 +
89688 +struct size_overflow_hash _001254_hash = {
89689 + .next = NULL,
89690 + .name = "tifm_alloc_adapter",
89691 + .file = "include/linux/tifm.h",
89692 + .param1 = 1,
89693 +};
89694 +
89695 +struct size_overflow_hash _001255_hash = {
89696 + .next = NULL,
89697 + .name = "tipc_subseq_alloc",
89698 + .file = "net/tipc/name_table.c",
89699 + .param1 = 1,
89700 +};
89701 +
89702 +struct size_overflow_hash _001256_hash = {
89703 + .next = NULL,
89704 + .name = "tm6000_read_write_usb",
89705 + .file = "drivers/media/video/tm6000/tm6000-core.c",
89706 + .param7 = 1,
89707 +};
89708 +
89709 +struct size_overflow_hash _001257_hash = {
89710 + .next = NULL,
89711 + .name = "tower_write",
89712 + .file = "drivers/usb/misc/legousbtower.c",
89713 + .param3 = 1,
89714 +};
89715 +
89716 +struct size_overflow_hash _001258_hash = {
89717 + .next = NULL,
89718 + .name = "trusted_instantiate",
89719 + .file = "security/keys/trusted.c",
89720 + .param3 = 1,
89721 +};
89722 +
89723 +struct size_overflow_hash _001259_hash = {
89724 + .next = NULL,
89725 + .name = "trusted_update",
89726 + .file = "security/keys/trusted.c",
89727 + .param3 = 1,
89728 +};
89729 +
89730 +struct size_overflow_hash _001260_hash = {
89731 + .next = NULL,
89732 + .name = "TSS_rawhmac",
89733 + .file = "security/keys/trusted.c",
89734 + .param3 = 1,
89735 +};
89736 +
89737 +struct size_overflow_hash _001261_hash = {
89738 + .next = NULL,
89739 + .name = "tx_internal_desc_overflow_read",
89740 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89741 + .param3 = 1,
89742 +};
89743 +
89744 +struct size_overflow_hash _001262_hash = {
89745 + .next = NULL,
89746 + .name = "tx_queue_len_read",
89747 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89748 + .param3 = 1,
89749 +};
89750 +
89751 +struct size_overflow_hash _001263_hash = {
89752 + .next = NULL,
89753 + .name = "tx_queue_len_read",
89754 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
89755 + .param3 = 1,
89756 +};
89757 +
89758 +struct size_overflow_hash _001264_hash = {
89759 + .next = NULL,
89760 + .name = "tx_queue_status_read",
89761 + .file = "drivers/net/wireless/wl1251/debugfs.c",
89762 + .param3 = 1,
89763 +};
89764 +
89765 +struct size_overflow_hash _001265_hash = {
89766 + .next = NULL,
89767 + .name = "udf_alloc_i_data",
89768 + .file = "fs/udf/inode.c",
89769 + .param2 = 1,
89770 +};
89771 +
89772 +struct size_overflow_hash _001266_hash = {
89773 + .next = NULL,
89774 + .name = "udf_sb_alloc_partition_maps",
89775 + .file = "fs/udf/super.c",
89776 + .param2 = 1,
89777 +};
89778 +
89779 +struct size_overflow_hash _001267_hash = {
89780 + .next = NULL,
89781 + .name = "uea_idma_write",
89782 + .file = "drivers/usb/atm/ueagle-atm.c",
89783 + .param3 = 1,
89784 +};
89785 +
89786 +struct size_overflow_hash _001268_hash = {
89787 + .next = NULL,
89788 + .name = "uea_request",
89789 + .file = "drivers/usb/atm/ueagle-atm.c",
89790 + .param4 = 1,
89791 +};
89792 +
89793 +struct size_overflow_hash _001269_hash = {
89794 + .next = NULL,
89795 + .name = "uea_send_modem_cmd",
89796 + .file = "drivers/usb/atm/ueagle-atm.c",
89797 + .param3 = 1,
89798 +};
89799 +
89800 +struct size_overflow_hash _001270_hash = {
89801 + .next = NULL,
89802 + .name = "uhci_debug_read",
89803 + .file = "drivers/usb/host/uhci-debug.c",
89804 + .param3 = 1,
89805 +};
89806 +
89807 +struct size_overflow_hash _001271_hash = {
89808 + .next = NULL,
89809 + .name = "uio_read",
89810 + .file = "drivers/uio/uio.c",
89811 + .param3 = 1,
89812 +};
89813 +
89814 +struct size_overflow_hash _001272_hash = {
89815 + .next = NULL,
89816 + .name = "uio_write",
89817 + .file = "drivers/uio/uio.c",
89818 + .param3 = 1,
89819 +};
89820 +
89821 +struct size_overflow_hash _001273_hash = {
89822 + .next = NULL,
89823 + .name = "um_idi_write",
89824 + .file = "drivers/isdn/hardware/eicon/divasi.c",
89825 + .param3 = 1,
89826 +};
89827 +
89828 +struct size_overflow_hash _001274_hash = {
89829 + .next = NULL,
89830 + .name = "unlink_queued",
89831 + .file = "drivers/usb/misc/usbtest.c",
89832 + .param3 = 1,
89833 + .param4 = 1,
89834 +};
89835 +
89836 +struct size_overflow_hash _001275_hash = {
89837 + .next = NULL,
89838 + .name = "us122l_ctl_msg",
89839 + .file = "sound/usb/usx2y/us122l.c",
89840 + .param8 = 1,
89841 +};
89842 +
89843 +struct size_overflow_hash _001276_hash = {
89844 + .next = NULL,
89845 + .name = "usbdev_read",
89846 + .file = "drivers/usb/core/devio.c",
89847 + .param3 = 1,
89848 +};
89849 +
89850 +struct size_overflow_hash _001277_hash = {
89851 + .next = NULL,
89852 + .name = "usblp_read",
89853 + .file = "drivers/usb/class/usblp.c",
89854 + .param3 = 1,
89855 +};
89856 +
89857 +struct size_overflow_hash _001278_hash = {
89858 + .next = NULL,
89859 + .name = "usblp_write",
89860 + .file = "drivers/usb/class/usblp.c",
89861 + .param3 = 1,
89862 +};
89863 +
89864 +struct size_overflow_hash _001279_hash = {
89865 + .next = NULL,
89866 + .name = "usbtest_alloc_urb",
89867 + .file = "drivers/usb/misc/usbtest.c",
89868 + .param3 = 1,
89869 + .param5 = 1,
89870 +};
89871 +
89872 +struct size_overflow_hash _001281_hash = {
89873 + .next = NULL,
89874 + .name = "usbtmc_read",
89875 + .file = "drivers/usb/class/usbtmc.c",
89876 + .param3 = 1,
89877 +};
89878 +
89879 +struct size_overflow_hash _001282_hash = {
89880 + .next = NULL,
89881 + .name = "usbtmc_write",
89882 + .file = "drivers/usb/class/usbtmc.c",
89883 + .param3 = 1,
89884 +};
89885 +
89886 +struct size_overflow_hash _001283_hash = {
89887 + .next = NULL,
89888 + .name = "usbvision_v4l2_read",
89889 + .file = "drivers/media/video/usbvision/usbvision-video.c",
89890 + .param3 = 1,
89891 +};
89892 +
89893 +struct size_overflow_hash _001284_hash = {
89894 + .next = NULL,
89895 + .name = "uvc_alloc_buffers",
89896 + .file = "drivers/usb/gadget/uvc_queue.c",
89897 + .param2 = 1,
89898 +};
89899 +
89900 +struct size_overflow_hash _001285_hash = {
89901 + .next = NULL,
89902 + .name = "uvc_alloc_entity",
89903 + .file = "drivers/media/video/uvc/uvc_driver.c",
89904 + .param3 = 1,
89905 +};
89906 +
89907 +struct size_overflow_hash _001286_hash = {
89908 + .next = NULL,
89909 + .name = "uvc_debugfs_stats_read",
89910 + .file = "drivers/media/video/uvc/uvc_debugfs.c",
89911 + .param3 = 1,
89912 +};
89913 +
89914 +struct size_overflow_hash _001287_hash = {
89915 + .next = NULL,
89916 + .name = "uvc_simplify_fraction",
89917 + .file = "drivers/media/video/uvc/uvc_driver.c",
89918 + .param3 = 1,
89919 +};
89920 +
89921 +struct size_overflow_hash _001288_hash = {
89922 + .next = NULL,
89923 + .name = "uwb_rc_neh_grok_event",
89924 + .file = "drivers/uwb/neh.c",
89925 + .param3 = 1,
89926 +};
89927 +
89928 +struct size_overflow_hash _001289_hash = {
89929 + .next = NULL,
89930 + .name = "v4l2_event_subscribe",
89931 + .file = "include/media/v4l2-event.h",
89932 + .param3 = 1,
89933 +};
89934 +
89935 +struct size_overflow_hash _001290_hash = {
89936 + .next = NULL,
89937 + .name = "v4l_stk_read",
89938 + .file = "drivers/media/video/stk-webcam.c",
89939 + .param3 = 1,
89940 +};
89941 +
89942 +struct size_overflow_hash _001291_hash = {
89943 + .next = NULL,
89944 + .name = "__vb2_perform_fileio",
89945 + .file = "drivers/media/video/videobuf2-core.c",
89946 + .param3 = 1,
89947 +};
89948 +
89949 +struct size_overflow_hash _001292_hash = {
89950 + .next = NULL,
89951 + .name = "vdma_mem_alloc",
89952 + .file = "arch/x86/include/asm/floppy.h",
89953 + .param1 = 1,
89954 +};
89955 +
89956 +struct size_overflow_hash _001293_hash = {
89957 + .next = NULL,
89958 + .name = "vfd_write",
89959 + .file = "drivers/media/rc/imon.c",
89960 + .param3 = 1,
89961 +};
89962 +
89963 +struct size_overflow_hash _001294_hash = {
89964 + .next = NULL,
89965 + .name = "vhci_get_user",
89966 + .file = "drivers/bluetooth/hci_vhci.c",
89967 + .param3 = 1,
89968 +};
89969 +
89970 +struct size_overflow_hash _001295_hash = {
89971 + .next = NULL,
89972 + .name = "__vhost_add_used_n",
89973 + .file = "drivers/vhost/vhost.c",
89974 + .param3 = 1,
89975 +};
89976 +
89977 +struct size_overflow_hash _001296_hash = {
89978 + .next = NULL,
89979 + .name = "__videobuf_alloc_vb",
89980 + .file = "drivers/media/video/videobuf-dma-sg.c",
89981 + .param1 = 1,
89982 +};
89983 +
89984 +struct size_overflow_hash _001297_hash = {
89985 + .next = NULL,
89986 + .name = "__videobuf_alloc_vb",
89987 + .file = "drivers/media/video/videobuf-dma-contig.c",
89988 + .param1 = 1,
89989 +};
89990 +
89991 +struct size_overflow_hash _001298_hash = {
89992 + .next = NULL,
89993 + .name = "__videobuf_alloc_vb",
89994 + .file = "drivers/media/video/videobuf-vmalloc.c",
89995 + .param1 = 1,
89996 +};
89997 +
89998 +struct size_overflow_hash _001299_hash = {
89999 + .next = NULL,
90000 + .name = "__videobuf_copy_to_user",
90001 + .file = "drivers/media/video/videobuf-core.c",
90002 + .param4 = 1,
90003 +};
90004 +
90005 +struct size_overflow_hash _001300_hash = {
90006 + .next = NULL,
90007 + .name = "video_proc_write",
90008 + .file = "drivers/platform/x86/toshiba_acpi.c",
90009 + .param3 = 1,
90010 +};
90011 +
90012 +struct size_overflow_hash _001301_hash = {
90013 + .next = NULL,
90014 + .name = "vifs_state_read",
90015 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90016 + .param3 = 1,
90017 +};
90018 +
90019 +struct size_overflow_hash _001302_hash = {
90020 + .next = NULL,
90021 + .name = "vlsi_alloc_ring",
90022 + .file = "drivers/net/irda/vlsi_ir.c",
90023 + .param3 = 1,
90024 + .param4 = 1,
90025 +};
90026 +
90027 +struct size_overflow_hash _001304_hash = {
90028 + .next = NULL,
90029 + .name = "vol_cdev_direct_write",
90030 + .file = "drivers/mtd/ubi/cdev.c",
90031 + .param3 = 1,
90032 +};
90033 +
90034 +struct size_overflow_hash _001305_hash = {
90035 + .next = NULL,
90036 + .name = "vol_cdev_read",
90037 + .file = "drivers/mtd/ubi/cdev.c",
90038 + .param3 = 1,
90039 +};
90040 +
90041 +struct size_overflow_hash _001306_hash = {
90042 + .next = NULL,
90043 + .name = "vring_add_indirect",
90044 + .file = "drivers/virtio/virtio_ring.c",
90045 + .param3 = 1,
90046 + .param4 = 1,
90047 +};
90048 +
90049 +struct size_overflow_hash _001308_hash = {
90050 + .next = NULL,
90051 + .name = "vring_new_virtqueue",
90052 + .file = "include/linux/virtio_ring.h",
90053 + .param1 = 1,
90054 +};
90055 +
90056 +struct size_overflow_hash _001309_hash = {
90057 + .next = NULL,
90058 + .name = "__vxge_hw_channel_allocate",
90059 + .file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
90060 + .param3 = 1,
90061 +};
90062 +
90063 +struct size_overflow_hash _001310_hash = {
90064 + .next = NULL,
90065 + .name = "vxge_os_dma_malloc",
90066 + .file = "drivers/net/ethernet/neterion/vxge/vxge-config.h",
90067 + .param2 = 1,
90068 +};
90069 +
90070 +struct size_overflow_hash _001311_hash = {
90071 + .next = NULL,
90072 + .name = "vxge_os_dma_malloc_async",
90073 + .file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
90074 + .param3 = 1,
90075 +};
90076 +
90077 +struct size_overflow_hash _001312_hash = {
90078 + .next = NULL,
90079 + .name = "w9966_v4l_read",
90080 + .file = "drivers/media/video/w9966.c",
90081 + .param3 = 1,
90082 +};
90083 +
90084 +struct size_overflow_hash _001313_hash = {
90085 + .next = NULL,
90086 + .name = "waiters_read",
90087 + .file = "fs/dlm/debug_fs.c",
90088 + .param3 = 1,
90089 +};
90090 +
90091 +struct size_overflow_hash _001314_hash = {
90092 + .next = NULL,
90093 + .name = "wa_nep_queue",
90094 + .file = "drivers/usb/wusbcore/wa-nep.c",
90095 + .param2 = 1,
90096 +};
90097 +
90098 +struct size_overflow_hash _001315_hash = {
90099 + .next = NULL,
90100 + .name = "__wa_xfer_setup_segs",
90101 + .file = "drivers/usb/wusbcore/wa-xfer.c",
90102 + .param2 = 1,
90103 +};
90104 +
90105 +struct size_overflow_hash _001316_hash = {
90106 + .next = NULL,
90107 + .name = "wdm_read",
90108 + .file = "drivers/usb/class/cdc-wdm.c",
90109 + .param3 = 1,
90110 +};
90111 +
90112 +struct size_overflow_hash _001317_hash = {
90113 + .next = NULL,
90114 + .name = "wdm_write",
90115 + .file = "drivers/usb/class/cdc-wdm.c",
90116 + .param3 = 1,
90117 +};
90118 +
90119 +struct size_overflow_hash _001318_hash = {
90120 + .next = NULL,
90121 + .name = "wep_addr_key_count_read",
90122 + .file = "drivers/net/wireless/wl1251/debugfs.c",
90123 + .param3 = 1,
90124 +};
90125 +
90126 +struct size_overflow_hash _001319_hash = {
90127 + .next = &_000480_hash,
90128 + .name = "wep_decrypt_fail_read",
90129 + .file = "drivers/net/wireless/wl1251/debugfs.c",
90130 + .param3 = 1,
90131 +};
90132 +
90133 +struct size_overflow_hash _001320_hash = {
90134 + .next = NULL,
90135 + .name = "wep_default_key_count_read",
90136 + .file = "drivers/net/wireless/wl1251/debugfs.c",
90137 + .param3 = 1,
90138 +};
90139 +
90140 +struct size_overflow_hash _001321_hash = {
90141 + .next = NULL,
90142 + .name = "wep_interrupt_read",
90143 + .file = "drivers/net/wireless/wl1251/debugfs.c",
90144 + .param3 = 1,
90145 +};
90146 +
90147 +struct size_overflow_hash _001322_hash = {
90148 + .next = NULL,
90149 + .name = "wep_key_not_found_read",
90150 + .file = "drivers/net/wireless/wl1251/debugfs.c",
90151 + .param3 = 1,
90152 +};
90153 +
90154 +struct size_overflow_hash _001323_hash = {
90155 + .next = NULL,
90156 + .name = "wep_packets_read",
90157 + .file = "drivers/net/wireless/wl1251/debugfs.c",
90158 + .param3 = 1,
90159 +};
90160 +
90161 +struct size_overflow_hash _001324_hash = {
90162 + .next = NULL,
90163 + .name = "wiimote_hid_send",
90164 + .file = "drivers/hid/hid-wiimote-core.c",
90165 + .param3 = 1,
90166 +};
90167 +
90168 +struct size_overflow_hash _001325_hash = {
90169 + .next = NULL,
90170 + .name = "wl1271_format_buffer",
90171 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90172 + .param2 = 1,
90173 +};
90174 +
90175 +struct size_overflow_hash _001326_hash = {
90176 + .next = NULL,
90177 + .name = "wl1273_fm_fops_write",
90178 + .file = "drivers/media/radio/radio-wl1273.c",
90179 + .param3 = 1,
90180 +};
90181 +
90182 +struct size_overflow_hash _001327_hash = {
90183 + .next = NULL,
90184 + .name = "wlc_phy_loadsampletable_nphy",
90185 + .file = "drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c",
90186 + .param3 = 1,
90187 +};
90188 +
90189 +struct size_overflow_hash _001328_hash = {
90190 + .next = NULL,
90191 + .name = "wpan_phy_alloc",
90192 + .file = "include/net/wpan-phy.h",
90193 + .param1 = 1,
90194 +};
90195 +
90196 +struct size_overflow_hash _001329_hash = {
90197 + .next = NULL,
90198 + .name = "write_flush",
90199 + .file = "net/sunrpc/cache.c",
90200 + .param3 = 1,
90201 +};
90202 +
90203 +struct size_overflow_hash _001330_hash = {
90204 + .next = NULL,
90205 + .name = "write_rio",
90206 + .file = "drivers/usb/misc/rio500.c",
90207 + .param3 = 1,
90208 +};
90209 +
90210 +struct size_overflow_hash _001331_hash = {
90211 + .next = NULL,
90212 + .name = "wusb_ccm_mac",
90213 + .file = "drivers/usb/wusbcore/crypto.c",
90214 + .param7 = 1,
90215 +};
90216 +
90217 +struct size_overflow_hash _001332_hash = {
90218 + .next = NULL,
90219 + .name = "xfs_attrmulti_attr_set",
90220 + .file = "fs/xfs/xfs_ioctl.c",
90221 + .param4 = 1,
90222 +};
90223 +
90224 +struct size_overflow_hash _001333_hash = {
90225 + .next = NULL,
90226 + .name = "xfs_handle_to_dentry",
90227 + .file = "fs/xfs/xfs_ioctl.c",
90228 + .param3 = 1,
90229 +};
90230 +
90231 +struct size_overflow_hash _001334_hash = {
90232 + .next = NULL,
90233 + .name = "xhci_alloc_stream_info",
90234 + .file = "drivers/usb/host/xhci-mem.c",
90235 + .param3 = 1,
90236 +};
90237 +
90238 +struct size_overflow_hash _001335_hash = {
90239 + .next = NULL,
90240 + .name = "xprt_alloc",
90241 + .file = "include/linux/sunrpc/xprt.h",
90242 + .param2 = 1,
90243 +};
90244 +
90245 +struct size_overflow_hash _001336_hash = {
90246 + .next = NULL,
90247 + .name = "xprt_rdma_allocate",
90248 + .file = "net/sunrpc/xprtrdma/transport.c",
90249 + .param2 = 1,
90250 +};
90251 +
90252 +struct size_overflow_hash _001337_hash = {
90253 + .next = NULL,
90254 + .name = "xt_alloc_table_info",
90255 + .file = "include/linux/netfilter/x_tables.h",
90256 + .param1 = 1,
90257 +};
90258 +
90259 +struct size_overflow_hash _001338_hash = {
90260 + .next = NULL,
90261 + .name = "zd_usb_iowrite16v_async",
90262 + .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
90263 + .param3 = 1,
90264 +};
90265 +
90266 +struct size_overflow_hash _001339_hash = {
90267 + .next = NULL,
90268 + .name = "zd_usb_read_fw",
90269 + .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
90270 + .param4 = 1,
90271 +};
90272 +
90273 +struct size_overflow_hash _001340_hash = {
90274 + .next = NULL,
90275 + .name = "zoran_write",
90276 + .file = "drivers/media/video/zoran/zoran_procfs.c",
90277 + .param3 = 1,
90278 +};
90279 +
90280 +struct size_overflow_hash _001341_hash = {
90281 + .next = NULL,
90282 + .name = "ad7879_spi_multi_read",
90283 + .file = "drivers/input/touchscreen/ad7879-spi.c",
90284 + .param3 = 1,
90285 +};
90286 +
90287 +struct size_overflow_hash _001342_hash = {
90288 + .next = NULL,
90289 + .name = "aes_decrypt_fail_read",
90290 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90291 + .param3 = 1,
90292 +};
90293 +
90294 +struct size_overflow_hash _001343_hash = {
90295 + .next = NULL,
90296 + .name = "aes_decrypt_interrupt_read",
90297 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90298 + .param3 = 1,
90299 +};
90300 +
90301 +struct size_overflow_hash _001344_hash = {
90302 + .next = NULL,
90303 + .name = "aes_decrypt_packets_read",
90304 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90305 + .param3 = 1,
90306 +};
90307 +
90308 +struct size_overflow_hash _001345_hash = {
90309 + .next = NULL,
90310 + .name = "aes_encrypt_fail_read",
90311 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90312 + .param3 = 1,
90313 +};
90314 +
90315 +struct size_overflow_hash _001346_hash = {
90316 + .next = NULL,
90317 + .name = "aes_encrypt_interrupt_read",
90318 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90319 + .param3 = 1,
90320 +};
90321 +
90322 +struct size_overflow_hash _001347_hash = {
90323 + .next = NULL,
90324 + .name = "aes_encrypt_packets_read",
90325 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90326 + .param3 = 1,
90327 +};
90328 +
90329 +struct size_overflow_hash _001348_hash = {
90330 + .next = NULL,
90331 + .name = "afs_cell_create",
90332 + .file = "fs/afs/cell.c",
90333 + .param2 = 1,
90334 +};
90335 +
90336 +struct size_overflow_hash _001349_hash = {
90337 + .next = NULL,
90338 + .name = "agp_create_user_memory",
90339 + .file = "drivers/char/agp/generic.c",
90340 + .param1 = 1,
90341 +};
90342 +
90343 +struct size_overflow_hash _001350_hash = {
90344 + .next = NULL,
90345 + .name = "alg_setsockopt",
90346 + .file = "crypto/af_alg.c",
90347 + .param5 = 1,
90348 +};
90349 +
90350 +struct size_overflow_hash _001351_hash = {
90351 + .next = NULL,
90352 + .name = "alloc_targets",
90353 + .file = "drivers/md/dm-table.c",
90354 + .param2 = 1,
90355 +};
90356 +
90357 +struct size_overflow_hash _001352_hash = {
90358 + .next = NULL,
90359 + .name = "aoechr_write",
90360 + .file = "drivers/block/aoe/aoechr.c",
90361 + .param3 = 1,
90362 +};
90363 +
90364 +struct size_overflow_hash _001353_hash = {
90365 + .next = NULL,
90366 + .name = "ath6kl_cfg80211_connect_event",
90367 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
90368 + .param7 = 1,
90369 + .param9 = 1,
90370 + .param8 = 1,
90371 +};
90372 +
90373 +struct size_overflow_hash _001356_hash = {
90374 + .next = NULL,
90375 + .name = "ath6kl_mgmt_tx",
90376 + .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
90377 + .param9 = 1,
90378 +};
90379 +
90380 +struct size_overflow_hash _001357_hash = {
90381 + .next = NULL,
90382 + .name = "atomic_read_file",
90383 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
90384 + .param3 = 1,
90385 +};
90386 +
90387 +struct size_overflow_hash _001358_hash = {
90388 + .next = NULL,
90389 + .name = "beacon_interval_read",
90390 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90391 + .param3 = 1,
90392 +};
90393 +
90394 +struct size_overflow_hash _001359_hash = {
90395 + .next = NULL,
90396 + .name = "bm_entry_write",
90397 + .file = "fs/binfmt_misc.c",
90398 + .param3 = 1,
90399 +};
90400 +
90401 +struct size_overflow_hash _001360_hash = {
90402 + .next = NULL,
90403 + .name = "bm_init",
90404 + .file = "lib/ts_bm.c",
90405 + .param2 = 1,
90406 +};
90407 +
90408 +struct size_overflow_hash _001361_hash = {
90409 + .next = NULL,
90410 + .name = "bm_register_write",
90411 + .file = "fs/binfmt_misc.c",
90412 + .param3 = 1,
90413 +};
90414 +
90415 +struct size_overflow_hash _001362_hash = {
90416 + .next = NULL,
90417 + .name = "bm_status_write",
90418 + .file = "fs/binfmt_misc.c",
90419 + .param3 = 1,
90420 +};
90421 +
90422 +struct size_overflow_hash _001363_hash = {
90423 + .next = NULL,
90424 + .name = "brn_proc_write",
90425 + .file = "drivers/platform/x86/asus_acpi.c",
90426 + .param3 = 1,
90427 +};
90428 +
90429 +struct size_overflow_hash _001364_hash = {
90430 + .next = NULL,
90431 + .name = "btrfs_map_block",
90432 + .file = "fs/btrfs/volumes.c",
90433 + .param3 = 1,
90434 +};
90435 +
90436 +struct size_overflow_hash _001365_hash = {
90437 + .next = NULL,
90438 + .name = "cache_downcall",
90439 + .file = "net/sunrpc/cache.c",
90440 + .param3 = 1,
90441 +};
90442 +
90443 +struct size_overflow_hash _001366_hash = {
90444 + .next = NULL,
90445 + .name = "cache_slow_downcall",
90446 + .file = "net/sunrpc/cache.c",
90447 + .param2 = 1,
90448 +};
90449 +
90450 +struct size_overflow_hash _001367_hash = {
90451 + .next = NULL,
90452 + .name = "ceph_dns_resolve_name",
90453 + .file = "net/ceph/messenger.c",
90454 + .param1 = 1,
90455 +};
90456 +
90457 +struct size_overflow_hash _001368_hash = {
90458 + .next = NULL,
90459 + .name = "cfg80211_roamed",
90460 + .file = "include/net/cfg80211.h",
90461 + .param5 = 1,
90462 + .param7 = 1,
90463 +};
90464 +
90465 +struct size_overflow_hash _001370_hash = {
90466 + .next = NULL,
90467 + .name = "cifs_readv_from_socket",
90468 + .file = "fs/cifs/connect.c",
90469 + .param3 = 1,
90470 +};
90471 +
90472 +struct size_overflow_hash _001371_hash = {
90473 + .next = NULL,
90474 + .name = "configfs_write_file",
90475 + .file = "fs/configfs/file.c",
90476 + .param3 = 1,
90477 +};
90478 +
90479 +struct size_overflow_hash _001372_hash = {
90480 + .next = &_001370_hash,
90481 + .name = "cpu_type_read",
90482 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
90483 + .param3 = 1,
90484 +};
90485 +
90486 +struct size_overflow_hash _001373_hash = {
90487 + .next = NULL,
90488 + .name = "cx18_copy_mdl_to_user",
90489 + .file = "drivers/media/video/cx18/cx18-fileops.c",
90490 + .param4 = 1,
90491 +};
90492 +
90493 +struct size_overflow_hash _001374_hash = {
90494 + .next = NULL,
90495 + .name = "cxgbi_ddp_reserve",
90496 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
90497 + .param4 = 1,
90498 +};
90499 +
90500 +struct size_overflow_hash _001375_hash = {
90501 + .next = NULL,
90502 + .name = "cxgbi_device_portmap_create",
90503 + .file = "drivers/scsi/cxgbi/libcxgbi.c",
90504 + .param3 = 1,
90505 +};
90506 +
90507 +struct size_overflow_hash _001376_hash = {
90508 + .next = NULL,
90509 + .name = "datablob_hmac_append",
90510 + .file = "security/keys/encrypted-keys/encrypted.c",
90511 + .param3 = 1,
90512 +};
90513 +
90514 +struct size_overflow_hash _001377_hash = {
90515 + .next = NULL,
90516 + .name = "datablob_hmac_verify",
90517 + .file = "security/keys/encrypted-keys/encrypted.c",
90518 + .param4 = 1,
90519 +};
90520 +
90521 +struct size_overflow_hash _001378_hash = {
90522 + .next = NULL,
90523 + .name = "dataflash_read_fact_otp",
90524 + .file = "drivers/mtd/devices/mtd_dataflash.c",
90525 + .param3 = 1,
90526 +};
90527 +
90528 +struct size_overflow_hash _001379_hash = {
90529 + .next = NULL,
90530 + .name = "dataflash_read_user_otp",
90531 + .file = "drivers/mtd/devices/mtd_dataflash.c",
90532 + .param3 = 1,
90533 +};
90534 +
90535 +struct size_overflow_hash _001380_hash = {
90536 + .next = NULL,
90537 + .name = "depth_read",
90538 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
90539 + .param3 = 1,
90540 +};
90541 +
90542 +struct size_overflow_hash _001381_hash = {
90543 + .next = NULL,
90544 + .name = "depth_write",
90545 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
90546 + .param3 = 1,
90547 +};
90548 +
90549 +struct size_overflow_hash _001382_hash = {
90550 + .next = NULL,
90551 + .name = "dev_irnet_write",
90552 + .file = "net/irda/irnet/irnet_ppp.c",
90553 + .param3 = 1,
90554 +};
90555 +
90556 +struct size_overflow_hash _001383_hash = {
90557 + .next = NULL,
90558 + .name = "dev_write",
90559 + .file = "sound/oss/msnd_pinnacle.c",
90560 + .param3 = 1,
90561 +};
90562 +
90563 +struct size_overflow_hash _001384_hash = {
90564 + .next = NULL,
90565 + .name = "dfs_file_read",
90566 + .file = "fs/ubifs/debug.c",
90567 + .param3 = 1,
90568 +};
90569 +
90570 +struct size_overflow_hash _001385_hash = {
90571 + .next = NULL,
90572 + .name = "dfs_file_write",
90573 + .file = "fs/ubifs/debug.c",
90574 + .param3 = 1,
90575 +};
90576 +
90577 +struct size_overflow_hash _001386_hash = {
90578 + .next = NULL,
90579 + .name = "dfs_global_file_read",
90580 + .file = "fs/ubifs/debug.c",
90581 + .param3 = 1,
90582 +};
90583 +
90584 +struct size_overflow_hash _001387_hash = {
90585 + .next = NULL,
90586 + .name = "dfs_global_file_write",
90587 + .file = "fs/ubifs/debug.c",
90588 + .param3 = 1,
90589 +};
90590 +
90591 +struct size_overflow_hash _001388_hash = {
90592 + .next = NULL,
90593 + .name = "disconnect",
90594 + .file = "net/bluetooth/mgmt.c",
90595 + .param4 = 1,
90596 +};
90597 +
90598 +struct size_overflow_hash _001389_hash = {
90599 + .next = NULL,
90600 + .name = "disp_proc_write",
90601 + .file = "drivers/platform/x86/asus_acpi.c",
90602 + .param3 = 1,
90603 +};
90604 +
90605 +struct size_overflow_hash _001390_hash = {
90606 + .next = NULL,
90607 + .name = "dma_rx_errors_read",
90608 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90609 + .param3 = 1,
90610 +};
90611 +
90612 +struct size_overflow_hash _001391_hash = {
90613 + .next = NULL,
90614 + .name = "dma_rx_requested_read",
90615 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90616 + .param3 = 1,
90617 +};
90618 +
90619 +struct size_overflow_hash _001392_hash = {
90620 + .next = NULL,
90621 + .name = "dma_tx_errors_read",
90622 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90623 + .param3 = 1,
90624 +};
90625 +
90626 +struct size_overflow_hash _001393_hash = {
90627 + .next = NULL,
90628 + .name = "dma_tx_requested_read",
90629 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90630 + .param3 = 1,
90631 +};
90632 +
90633 +struct size_overflow_hash _001394_hash = {
90634 + .next = NULL,
90635 + .name = "dm_exception_table_init",
90636 + .file = "drivers/md/dm-snap.c",
90637 + .param2 = 1,
90638 +};
90639 +
90640 +struct size_overflow_hash _001395_hash = {
90641 + .next = NULL,
90642 + .name = "do_dccp_setsockopt",
90643 + .file = "net/dccp/proto.c",
90644 + .param5 = 1,
90645 +};
90646 +
90647 +struct size_overflow_hash _001396_hash = {
90648 + .next = NULL,
90649 + .name = "dtim_interval_read",
90650 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90651 + .param3 = 1,
90652 +};
90653 +
90654 +struct size_overflow_hash _001397_hash = {
90655 + .next = NULL,
90656 + .name = "dvb_audio_write",
90657 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
90658 + .param3 = 1,
90659 +};
90660 +
90661 +struct size_overflow_hash _001398_hash = {
90662 + .next = NULL,
90663 + .name = "dvb_demux_do_ioctl",
90664 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
90665 + .param3 = 1,
90666 +};
90667 +
90668 +struct size_overflow_hash _001399_hash = {
90669 + .next = NULL,
90670 + .name = "dvb_dvr_do_ioctl",
90671 + .file = "drivers/media/dvb/dvb-core/dmxdev.c",
90672 + .param3 = 1,
90673 +};
90674 +
90675 +struct size_overflow_hash _001400_hash = {
90676 + .next = NULL,
90677 + .name = "dvb_video_write",
90678 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
90679 + .param3 = 1,
90680 +};
90681 +
90682 +struct size_overflow_hash _001401_hash = {
90683 + .next = NULL,
90684 + .name = "ecryptfs_decode_and_decrypt_filename",
90685 + .file = "fs/ecryptfs/crypto.c",
90686 + .param5 = 1,
90687 +};
90688 +
90689 +struct size_overflow_hash _001402_hash = {
90690 + .next = NULL,
90691 + .name = "ecryptfs_encrypt_and_encode_filename",
90692 + .file = "fs/ecryptfs/crypto.c",
90693 + .param6 = 1,
90694 +};
90695 +
90696 +struct size_overflow_hash _001403_hash = {
90697 + .next = NULL,
90698 + .name = "enable_read",
90699 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
90700 + .param3 = 1,
90701 +};
90702 +
90703 +struct size_overflow_hash _001404_hash = {
90704 + .next = NULL,
90705 + .name = "enable_write",
90706 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
90707 + .param3 = 1,
90708 +};
90709 +
90710 +struct size_overflow_hash _001405_hash = {
90711 + .next = NULL,
90712 + .name = "event_calibration_read",
90713 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90714 + .param3 = 1,
90715 +};
90716 +
90717 +struct size_overflow_hash _001406_hash = {
90718 + .next = NULL,
90719 + .name = "event_heart_beat_read",
90720 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90721 + .param3 = 1,
90722 +};
90723 +
90724 +struct size_overflow_hash _001407_hash = {
90725 + .next = NULL,
90726 + .name = "event_oom_late_read",
90727 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90728 + .param3 = 1,
90729 +};
90730 +
90731 +struct size_overflow_hash _001408_hash = {
90732 + .next = NULL,
90733 + .name = "event_phy_transmit_error_read",
90734 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90735 + .param3 = 1,
90736 +};
90737 +
90738 +struct size_overflow_hash _001409_hash = {
90739 + .next = NULL,
90740 + .name = "event_rx_mem_empty_read",
90741 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90742 + .param3 = 1,
90743 +};
90744 +
90745 +struct size_overflow_hash _001410_hash = {
90746 + .next = NULL,
90747 + .name = "event_rx_mismatch_read",
90748 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90749 + .param3 = 1,
90750 +};
90751 +
90752 +struct size_overflow_hash _001411_hash = {
90753 + .next = NULL,
90754 + .name = "event_rx_pool_read",
90755 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90756 + .param3 = 1,
90757 +};
90758 +
90759 +struct size_overflow_hash _001412_hash = {
90760 + .next = NULL,
90761 + .name = "event_tx_stuck_read",
90762 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90763 + .param3 = 1,
90764 +};
90765 +
90766 +struct size_overflow_hash _001413_hash = {
90767 + .next = NULL,
90768 + .name = "excessive_retries_read",
90769 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
90770 + .param3 = 1,
90771 +};
90772 +
90773 +struct size_overflow_hash _001414_hash = {
90774 + .next = NULL,
90775 + .name = "exofs_read_kern",
90776 + .file = "fs/exofs/super.c",
90777 + .param6 = 1,
90778 +};
90779 +
90780 +struct size_overflow_hash _001415_hash = {
90781 + .next = NULL,
90782 + .name = "fallback_on_nodma_alloc",
90783 + .file = "drivers/block/floppy.c",
90784 + .param2 = 1,
90785 +};
90786 +
90787 +struct size_overflow_hash _001416_hash = {
90788 + .next = NULL,
90789 + .name = "__feat_register_sp",
90790 + .file = "net/dccp/feat.c",
90791 + .param6 = 1,
90792 +};
90793 +
90794 +struct size_overflow_hash _001417_hash = {
90795 + .next = NULL,
90796 + .name = "ffs_ep0_write",
90797 + .file = "drivers/usb/gadget/f_fs.c",
90798 + .param3 = 1,
90799 +};
90800 +
90801 +struct size_overflow_hash _001418_hash = {
90802 + .next = NULL,
90803 + .name = "ffs_epfile_read",
90804 + .file = "drivers/usb/gadget/f_fs.c",
90805 + .param3 = 1,
90806 +};
90807 +
90808 +struct size_overflow_hash _001419_hash = {
90809 + .next = NULL,
90810 + .name = "ffs_epfile_write",
90811 + .file = "drivers/usb/gadget/f_fs.c",
90812 + .param3 = 1,
90813 +};
90814 +
90815 +struct size_overflow_hash _001420_hash = {
90816 + .next = NULL,
90817 + .name = "frequency_read",
90818 + .file = "net/mac80211/debugfs.c",
90819 + .param3 = 1,
90820 +};
90821 +
90822 +struct size_overflow_hash _001421_hash = {
90823 + .next = NULL,
90824 + .name = "fsm_init",
90825 + .file = "lib/ts_fsm.c",
90826 + .param2 = 1,
90827 +};
90828 +
90829 +struct size_overflow_hash _001422_hash = {
90830 + .next = NULL,
90831 + .name = "garmin_read_process",
90832 + .file = "drivers/usb/serial/garmin_gps.c",
90833 + .param3 = 1,
90834 +};
90835 +
90836 +struct size_overflow_hash _001423_hash = {
90837 + .next = NULL,
90838 + .name = "garp_request_join",
90839 + .file = "include/net/garp.h",
90840 + .param4 = 1,
90841 +};
90842 +
90843 +struct size_overflow_hash _001424_hash = {
90844 + .next = NULL,
90845 + .name = "hcd_alloc_coherent",
90846 + .file = "drivers/usb/core/hcd.c",
90847 + .param5 = 1,
90848 +};
90849 +
90850 +struct size_overflow_hash _001425_hash = {
90851 + .next = NULL,
90852 + .name = "hci_sock_sendmsg",
90853 + .file = "net/bluetooth/hci_sock.c",
90854 + .param4 = 1,
90855 +};
90856 +
90857 +struct size_overflow_hash _001426_hash = {
90858 + .next = NULL,
90859 + .name = "__hwahc_op_set_gtk",
90860 + .file = "drivers/usb/host/hwa-hc.c",
90861 + .param4 = 1,
90862 +};
90863 +
90864 +struct size_overflow_hash _001427_hash = {
90865 + .next = NULL,
90866 + .name = "__hwahc_op_set_ptk",
90867 + .file = "drivers/usb/host/hwa-hc.c",
90868 + .param5 = 1,
90869 +};
90870 +
90871 +struct size_overflow_hash _001428_hash = {
90872 + .next = NULL,
90873 + .name = "ib_send_cm_drep",
90874 + .file = "include/rdma/ib_cm.h",
90875 + .param3 = 1,
90876 +};
90877 +
90878 +struct size_overflow_hash _001429_hash = {
90879 + .next = NULL,
90880 + .name = "ib_send_cm_mra",
90881 + .file = "include/rdma/ib_cm.h",
90882 + .param4 = 1,
90883 +};
90884 +
90885 +struct size_overflow_hash _001430_hash = {
90886 + .next = NULL,
90887 + .name = "ib_send_cm_rtu",
90888 + .file = "include/rdma/ib_cm.h",
90889 + .param3 = 1,
90890 +};
90891 +
90892 +struct size_overflow_hash _001431_hash = {
90893 + .next = NULL,
90894 + .name = "ieee80211_bss_info_update",
90895 + .file = "net/mac80211/scan.c",
90896 + .param4 = 1,
90897 +};
90898 +
90899 +struct size_overflow_hash _001432_hash = {
90900 + .next = NULL,
90901 + .name = "ieee80211_if_read_aid",
90902 + .file = "net/mac80211/debugfs_netdev.c",
90903 + .param3 = 1,
90904 +};
90905 +
90906 +struct size_overflow_hash _001433_hash = {
90907 + .next = NULL,
90908 + .name = "ieee80211_if_read_auto_open_plinks",
90909 + .file = "net/mac80211/debugfs_netdev.c",
90910 + .param3 = 1,
90911 +};
90912 +
90913 +struct size_overflow_hash _001434_hash = {
90914 + .next = NULL,
90915 + .name = "ieee80211_if_read_ave_beacon",
90916 + .file = "net/mac80211/debugfs_netdev.c",
90917 + .param3 = 1,
90918 +};
90919 +
90920 +struct size_overflow_hash _001435_hash = {
90921 + .next = NULL,
90922 + .name = "ieee80211_if_read_bssid",
90923 + .file = "net/mac80211/debugfs_netdev.c",
90924 + .param3 = 1,
90925 +};
90926 +
90927 +struct size_overflow_hash _001436_hash = {
90928 + .next = NULL,
90929 + .name = "ieee80211_if_read_channel_type",
90930 + .file = "net/mac80211/debugfs_netdev.c",
90931 + .param3 = 1,
90932 +};
90933 +
90934 +struct size_overflow_hash _001437_hash = {
90935 + .next = NULL,
90936 + .name = "ieee80211_if_read_dot11MeshConfirmTimeout",
90937 + .file = "net/mac80211/debugfs_netdev.c",
90938 + .param3 = 1,
90939 +};
90940 +
90941 +struct size_overflow_hash _001438_hash = {
90942 + .next = NULL,
90943 + .name = "ieee80211_if_read_dot11MeshGateAnnouncementProtocol",
90944 + .file = "net/mac80211/debugfs_netdev.c",
90945 + .param3 = 1,
90946 +};
90947 +
90948 +struct size_overflow_hash _001439_hash = {
90949 + .next = NULL,
90950 + .name = "ieee80211_if_read_dot11MeshHoldingTimeout",
90951 + .file = "net/mac80211/debugfs_netdev.c",
90952 + .param3 = 1,
90953 +};
90954 +
90955 +struct size_overflow_hash _001440_hash = {
90956 + .next = NULL,
90957 + .name = "ieee80211_if_read_dot11MeshHWMPactivePathTimeout",
90958 + .file = "net/mac80211/debugfs_netdev.c",
90959 + .param3 = 1,
90960 +};
90961 +
90962 +struct size_overflow_hash _001441_hash = {
90963 + .next = NULL,
90964 + .name = "ieee80211_if_read_dot11MeshHWMPmaxPREQretries",
90965 + .file = "net/mac80211/debugfs_netdev.c",
90966 + .param3 = 1,
90967 +};
90968 +
90969 +struct size_overflow_hash _001442_hash = {
90970 + .next = NULL,
90971 + .name = "ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime",
90972 + .file = "net/mac80211/debugfs_netdev.c",
90973 + .param3 = 1,
90974 +};
90975 +
90976 +struct size_overflow_hash _001443_hash = {
90977 + .next = NULL,
90978 + .name = "ieee80211_if_read_dot11MeshHWMPperrMinInterval",
90979 + .file = "net/mac80211/debugfs_netdev.c",
90980 + .param3 = 1,
90981 +};
90982 +
90983 +struct size_overflow_hash _001444_hash = {
90984 + .next = NULL,
90985 + .name = "ieee80211_if_read_dot11MeshHWMPpreqMinInterval",
90986 + .file = "net/mac80211/debugfs_netdev.c",
90987 + .param3 = 1,
90988 +};
90989 +
90990 +struct size_overflow_hash _001445_hash = {
90991 + .next = NULL,
90992 + .name = "ieee80211_if_read_dot11MeshHWMPRannInterval",
90993 + .file = "net/mac80211/debugfs_netdev.c",
90994 + .param3 = 1,
90995 +};
90996 +
90997 +struct size_overflow_hash _001446_hash = {
90998 + .next = NULL,
90999 + .name = "ieee80211_if_read_dot11MeshHWMPRootMode",
91000 + .file = "net/mac80211/debugfs_netdev.c",
91001 + .param3 = 1,
91002 +};
91003 +
91004 +struct size_overflow_hash _001447_hash = {
91005 + .next = NULL,
91006 + .name = "ieee80211_if_read_dot11MeshMaxPeerLinks",
91007 + .file = "net/mac80211/debugfs_netdev.c",
91008 + .param3 = 1,
91009 +};
91010 +
91011 +struct size_overflow_hash _001448_hash = {
91012 + .next = NULL,
91013 + .name = "ieee80211_if_read_dot11MeshMaxRetries",
91014 + .file = "net/mac80211/debugfs_netdev.c",
91015 + .param3 = 1,
91016 +};
91017 +
91018 +struct size_overflow_hash _001449_hash = {
91019 + .next = NULL,
91020 + .name = "ieee80211_if_read_dot11MeshRetryTimeout",
91021 + .file = "net/mac80211/debugfs_netdev.c",
91022 + .param3 = 1,
91023 +};
91024 +
91025 +struct size_overflow_hash _001450_hash = {
91026 + .next = NULL,
91027 + .name = "ieee80211_if_read_dot11MeshTTL",
91028 + .file = "net/mac80211/debugfs_netdev.c",
91029 + .param3 = 1,
91030 +};
91031 +
91032 +struct size_overflow_hash _001451_hash = {
91033 + .next = NULL,
91034 + .name = "ieee80211_if_read_dropped_frames_congestion",
91035 + .file = "net/mac80211/debugfs_netdev.c",
91036 + .param3 = 1,
91037 +};
91038 +
91039 +struct size_overflow_hash _001452_hash = {
91040 + .next = NULL,
91041 + .name = "ieee80211_if_read_dropped_frames_no_route",
91042 + .file = "net/mac80211/debugfs_netdev.c",
91043 + .param3 = 1,
91044 +};
91045 +
91046 +struct size_overflow_hash _001453_hash = {
91047 + .next = NULL,
91048 + .name = "ieee80211_if_read_dropped_frames_ttl",
91049 + .file = "net/mac80211/debugfs_netdev.c",
91050 + .param3 = 1,
91051 +};
91052 +
91053 +struct size_overflow_hash _001454_hash = {
91054 + .next = NULL,
91055 + .name = "ieee80211_if_read_drop_unencrypted",
91056 + .file = "net/mac80211/debugfs_netdev.c",
91057 + .param3 = 1,
91058 +};
91059 +
91060 +struct size_overflow_hash _001455_hash = {
91061 + .next = NULL,
91062 + .name = "ieee80211_if_read_dtim_count",
91063 + .file = "net/mac80211/debugfs_netdev.c",
91064 + .param3 = 1,
91065 +};
91066 +
91067 +struct size_overflow_hash _001456_hash = {
91068 + .next = NULL,
91069 + .name = "ieee80211_if_read_element_ttl",
91070 + .file = "net/mac80211/debugfs_netdev.c",
91071 + .param3 = 1,
91072 +};
91073 +
91074 +struct size_overflow_hash _001457_hash = {
91075 + .next = NULL,
91076 + .name = "ieee80211_if_read_estab_plinks",
91077 + .file = "net/mac80211/debugfs_netdev.c",
91078 + .param3 = 1,
91079 +};
91080 +
91081 +struct size_overflow_hash _001458_hash = {
91082 + .next = NULL,
91083 + .name = "ieee80211_if_read_flags",
91084 + .file = "net/mac80211/debugfs_netdev.c",
91085 + .param3 = 1,
91086 +};
91087 +
91088 +struct size_overflow_hash _001459_hash = {
91089 + .next = NULL,
91090 + .name = "ieee80211_if_read_fwded_frames",
91091 + .file = "net/mac80211/debugfs_netdev.c",
91092 + .param3 = 1,
91093 +};
91094 +
91095 +struct size_overflow_hash _001460_hash = {
91096 + .next = NULL,
91097 + .name = "ieee80211_if_read_fwded_mcast",
91098 + .file = "net/mac80211/debugfs_netdev.c",
91099 + .param3 = 1,
91100 +};
91101 +
91102 +struct size_overflow_hash _001461_hash = {
91103 + .next = NULL,
91104 + .name = "ieee80211_if_read_fwded_unicast",
91105 + .file = "net/mac80211/debugfs_netdev.c",
91106 + .param3 = 1,
91107 +};
91108 +
91109 +struct size_overflow_hash _001462_hash = {
91110 + .next = NULL,
91111 + .name = "ieee80211_if_read_last_beacon",
91112 + .file = "net/mac80211/debugfs_netdev.c",
91113 + .param3 = 1,
91114 +};
91115 +
91116 +struct size_overflow_hash _001463_hash = {
91117 + .next = NULL,
91118 + .name = "ieee80211_if_read_min_discovery_timeout",
91119 + .file = "net/mac80211/debugfs_netdev.c",
91120 + .param3 = 1,
91121 +};
91122 +
91123 +struct size_overflow_hash _001464_hash = {
91124 + .next = NULL,
91125 + .name = "ieee80211_if_read_num_buffered_multicast",
91126 + .file = "net/mac80211/debugfs_netdev.c",
91127 + .param3 = 1,
91128 +};
91129 +
91130 +struct size_overflow_hash _001465_hash = {
91131 + .next = NULL,
91132 + .name = "ieee80211_if_read_num_sta_authorized",
91133 + .file = "net/mac80211/debugfs_netdev.c",
91134 + .param3 = 1,
91135 +};
91136 +
91137 +struct size_overflow_hash _001466_hash = {
91138 + .next = NULL,
91139 + .name = "ieee80211_if_read_num_sta_ps",
91140 + .file = "net/mac80211/debugfs_netdev.c",
91141 + .param3 = 1,
91142 +};
91143 +
91144 +struct size_overflow_hash _001467_hash = {
91145 + .next = NULL,
91146 + .name = "ieee80211_if_read_path_refresh_time",
91147 + .file = "net/mac80211/debugfs_netdev.c",
91148 + .param3 = 1,
91149 +};
91150 +
91151 +struct size_overflow_hash _001468_hash = {
91152 + .next = NULL,
91153 + .name = "ieee80211_if_read_peer",
91154 + .file = "net/mac80211/debugfs_netdev.c",
91155 + .param3 = 1,
91156 +};
91157 +
91158 +struct size_overflow_hash _001469_hash = {
91159 + .next = NULL,
91160 + .name = "ieee80211_if_read_rc_rateidx_mask_2ghz",
91161 + .file = "net/mac80211/debugfs_netdev.c",
91162 + .param3 = 1,
91163 +};
91164 +
91165 +struct size_overflow_hash _001470_hash = {
91166 + .next = NULL,
91167 + .name = "ieee80211_if_read_rc_rateidx_mask_5ghz",
91168 + .file = "net/mac80211/debugfs_netdev.c",
91169 + .param3 = 1,
91170 +};
91171 +
91172 +struct size_overflow_hash _001471_hash = {
91173 + .next = NULL,
91174 + .name = "ieee80211_if_read_smps",
91175 + .file = "net/mac80211/debugfs_netdev.c",
91176 + .param3 = 1,
91177 +};
91178 +
91179 +struct size_overflow_hash _001472_hash = {
91180 + .next = NULL,
91181 + .name = "ieee80211_if_read_state",
91182 + .file = "net/mac80211/debugfs_netdev.c",
91183 + .param3 = 1,
91184 +};
91185 +
91186 +struct size_overflow_hash _001473_hash = {
91187 + .next = NULL,
91188 + .name = "ieee80211_if_read_tkip_mic_test",
91189 + .file = "net/mac80211/debugfs_netdev.c",
91190 + .param3 = 1,
91191 +};
91192 +
91193 +struct size_overflow_hash _001474_hash = {
91194 + .next = NULL,
91195 + .name = "ieee80211_if_read_tsf",
91196 + .file = "net/mac80211/debugfs_netdev.c",
91197 + .param3 = 1,
91198 +};
91199 +
91200 +struct size_overflow_hash _001475_hash = {
91201 + .next = NULL,
91202 + .name = "ieee80211_send_probe_req",
91203 + .file = "net/mac80211/util.c",
91204 + .param6 = 1,
91205 +};
91206 +
91207 +struct size_overflow_hash _001476_hash = {
91208 + .next = NULL,
91209 + .name = "init_map_ipmac",
91210 + .file = "net/netfilter/ipset/ip_set_bitmap_ipmac.c",
91211 + .param3 = 1,
91212 + .param4 = 1,
91213 +};
91214 +
91215 +struct size_overflow_hash _001478_hash = {
91216 + .next = NULL,
91217 + .name = "init_tid_tabs",
91218 + .file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
91219 + .param2 = 1,
91220 + .param4 = 1,
91221 + .param3 = 1,
91222 +};
91223 +
91224 +struct size_overflow_hash _001481_hash = {
91225 + .next = NULL,
91226 + .name = "isr_cmd_cmplt_read",
91227 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91228 + .param3 = 1,
91229 +};
91230 +
91231 +struct size_overflow_hash _001482_hash = {
91232 + .next = NULL,
91233 + .name = "isr_commands_read",
91234 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91235 + .param3 = 1,
91236 +};
91237 +
91238 +struct size_overflow_hash _001483_hash = {
91239 + .next = NULL,
91240 + .name = "isr_decrypt_done_read",
91241 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91242 + .param3 = 1,
91243 +};
91244 +
91245 +struct size_overflow_hash _001484_hash = {
91246 + .next = NULL,
91247 + .name = "isr_dma0_done_read",
91248 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91249 + .param3 = 1,
91250 +};
91251 +
91252 +struct size_overflow_hash _001485_hash = {
91253 + .next = NULL,
91254 + .name = "isr_dma1_done_read",
91255 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91256 + .param3 = 1,
91257 +};
91258 +
91259 +struct size_overflow_hash _001486_hash = {
91260 + .next = NULL,
91261 + .name = "isr_fiqs_read",
91262 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91263 + .param3 = 1,
91264 +};
91265 +
91266 +struct size_overflow_hash _001487_hash = {
91267 + .next = NULL,
91268 + .name = "isr_host_acknowledges_read",
91269 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91270 + .param3 = 1,
91271 +};
91272 +
91273 +struct size_overflow_hash _001488_hash = {
91274 + .next = &_001393_hash,
91275 + .name = "isr_hw_pm_mode_changes_read",
91276 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91277 + .param3 = 1,
91278 +};
91279 +
91280 +struct size_overflow_hash _001489_hash = {
91281 + .next = &_001205_hash,
91282 + .name = "isr_irqs_read",
91283 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91284 + .param3 = 1,
91285 +};
91286 +
91287 +struct size_overflow_hash _001490_hash = {
91288 + .next = NULL,
91289 + .name = "isr_low_rssi_read",
91290 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91291 + .param3 = 1,
91292 +};
91293 +
91294 +struct size_overflow_hash _001491_hash = {
91295 + .next = NULL,
91296 + .name = "isr_pci_pm_read",
91297 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91298 + .param3 = 1,
91299 +};
91300 +
91301 +struct size_overflow_hash _001492_hash = {
91302 + .next = NULL,
91303 + .name = "isr_rx_headers_read",
91304 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91305 + .param3 = 1,
91306 +};
91307 +
91308 +struct size_overflow_hash _001493_hash = {
91309 + .next = NULL,
91310 + .name = "isr_rx_mem_overflow_read",
91311 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91312 + .param3 = 1,
91313 +};
91314 +
91315 +struct size_overflow_hash _001494_hash = {
91316 + .next = NULL,
91317 + .name = "isr_rx_procs_read",
91318 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91319 + .param3 = 1,
91320 +};
91321 +
91322 +struct size_overflow_hash _001495_hash = {
91323 + .next = NULL,
91324 + .name = "isr_rx_rdys_read",
91325 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91326 + .param3 = 1,
91327 +};
91328 +
91329 +struct size_overflow_hash _001496_hash = {
91330 + .next = NULL,
91331 + .name = "isr_tx_exch_complete_read",
91332 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91333 + .param3 = 1,
91334 +};
91335 +
91336 +struct size_overflow_hash _001497_hash = {
91337 + .next = NULL,
91338 + .name = "isr_tx_procs_read",
91339 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91340 + .param3 = 1,
91341 +};
91342 +
91343 +struct size_overflow_hash _001498_hash = {
91344 + .next = NULL,
91345 + .name = "isr_wakeups_read",
91346 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91347 + .param3 = 1,
91348 +};
91349 +
91350 +struct size_overflow_hash _001499_hash = {
91351 + .next = NULL,
91352 + .name = "ivtv_read",
91353 + .file = "drivers/media/video/ivtv/ivtv-fileops.c",
91354 + .param3 = 1,
91355 +};
91356 +
91357 +struct size_overflow_hash _001500_hash = {
91358 + .next = NULL,
91359 + .name = "kmem_realloc",
91360 + .file = "fs/xfs/kmem.c",
91361 + .param2 = 1,
91362 +};
91363 +
91364 +struct size_overflow_hash _001501_hash = {
91365 + .next = NULL,
91366 + .name = "kmem_zalloc",
91367 + .file = "fs/xfs/kmem.c",
91368 + .param1 = 1,
91369 +};
91370 +
91371 +struct size_overflow_hash _001502_hash = {
91372 + .next = NULL,
91373 + .name = "kmem_zalloc_greedy",
91374 + .file = "fs/xfs/kmem.c",
91375 + .param2 = 1,
91376 + .param3 = 1,
91377 +};
91378 +
91379 +struct size_overflow_hash _001504_hash = {
91380 + .next = NULL,
91381 + .name = "kmp_init",
91382 + .file = "lib/ts_kmp.c",
91383 + .param2 = 1,
91384 +};
91385 +
91386 +struct size_overflow_hash _001505_hash = {
91387 + .next = NULL,
91388 + .name = "lcd_proc_write",
91389 + .file = "drivers/platform/x86/asus_acpi.c",
91390 + .param3 = 1,
91391 +};
91392 +
91393 +struct size_overflow_hash _001506_hash = {
91394 + .next = NULL,
91395 + .name = "ledd_proc_write",
91396 + .file = "drivers/platform/x86/asus_acpi.c",
91397 + .param3 = 1,
91398 +};
91399 +
91400 +struct size_overflow_hash _001507_hash = {
91401 + .next = NULL,
91402 + .name = "mic_calc_failure_read",
91403 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91404 + .param3 = 1,
91405 +};
91406 +
91407 +struct size_overflow_hash _001508_hash = {
91408 + .next = NULL,
91409 + .name = "mic_rx_pkts_read",
91410 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91411 + .param3 = 1,
91412 +};
91413 +
91414 +struct size_overflow_hash _001509_hash = {
91415 + .next = NULL,
91416 + .name = "nfs4_realloc_slot_table",
91417 + .file = "fs/nfs/nfs4proc.c",
91418 + .param2 = 1,
91419 +};
91420 +
91421 +struct size_overflow_hash _001510_hash = {
91422 + .next = NULL,
91423 + .name = "nfs_idmap_request_key",
91424 + .file = "fs/nfs/idmap.c",
91425 + .param2 = 1,
91426 +};
91427 +
91428 +struct size_overflow_hash _001511_hash = {
91429 + .next = NULL,
91430 + .name = "nsm_get_handle",
91431 + .file = "include/linux/lockd/lockd.h",
91432 + .param4 = 1,
91433 +};
91434 +
91435 +struct size_overflow_hash _001512_hash = {
91436 + .next = NULL,
91437 + .name = "ntfs_copy_from_user_iovec",
91438 + .file = "fs/ntfs/file.c",
91439 + .param3 = 1,
91440 + .param6 = 1,
91441 +};
91442 +
91443 +struct size_overflow_hash _001514_hash = {
91444 + .next = NULL,
91445 + .name = "ntfs_file_buffered_write",
91446 + .file = "fs/ntfs/file.c",
91447 + .param6 = 1,
91448 +};
91449 +
91450 +struct size_overflow_hash _001515_hash = {
91451 + .next = NULL,
91452 + .name = "ntfs_malloc_nofs",
91453 + .file = "fs/ntfs/malloc.h",
91454 + .param1 = 1,
91455 +};
91456 +
91457 +struct size_overflow_hash _001516_hash = {
91458 + .next = NULL,
91459 + .name = "ntfs_malloc_nofs_nofail",
91460 + .file = "fs/ntfs/malloc.h",
91461 + .param1 = 1,
91462 +};
91463 +
91464 +struct size_overflow_hash _001517_hash = {
91465 + .next = NULL,
91466 + .name = "ocfs2_control_message",
91467 + .file = "fs/ocfs2/stack_user.c",
91468 + .param3 = 1,
91469 +};
91470 +
91471 +struct size_overflow_hash _001518_hash = {
91472 + .next = NULL,
91473 + .name = "opera1_usb_i2c_msgxfer",
91474 + .file = "drivers/media/dvb/dvb-usb/opera1.c",
91475 + .param4 = 1,
91476 +};
91477 +
91478 +struct size_overflow_hash _001519_hash = {
91479 + .next = NULL,
91480 + .name = "orinoco_add_extscan_result",
91481 + .file = "drivers/net/wireless/orinoco/scan.c",
91482 + .param3 = 1,
91483 +};
91484 +
91485 +struct size_overflow_hash _001520_hash = {
91486 + .next = NULL,
91487 + .name = "osd_req_list_collection_objects",
91488 + .file = "include/scsi/osd_initiator.h",
91489 + .param5 = 1,
91490 +};
91491 +
91492 +struct size_overflow_hash _001521_hash = {
91493 + .next = NULL,
91494 + .name = "osd_req_list_partition_objects",
91495 + .file = "include/scsi/osd_initiator.h",
91496 + .param5 = 1,
91497 +};
91498 +
91499 +struct size_overflow_hash _001522_hash = {
91500 + .next = NULL,
91501 + .name = "pair_device",
91502 + .file = "net/bluetooth/mgmt.c",
91503 + .param4 = 1,
91504 +};
91505 +
91506 +struct size_overflow_hash _001523_hash = {
91507 + .next = NULL,
91508 + .name = "pccard_store_cis",
91509 + .file = "drivers/pcmcia/cistpl.c",
91510 + .param6 = 1,
91511 +};
91512 +
91513 +struct size_overflow_hash _001524_hash = {
91514 + .next = NULL,
91515 + .name = "pin_code_reply",
91516 + .file = "net/bluetooth/mgmt.c",
91517 + .param4 = 1,
91518 +};
91519 +
91520 +struct size_overflow_hash _001525_hash = {
91521 + .next = NULL,
91522 + .name = "play_iframe",
91523 + .file = "drivers/media/dvb/ttpci/av7110_av.c",
91524 + .param3 = 1,
91525 +};
91526 +
91527 +struct size_overflow_hash _001526_hash = {
91528 + .next = NULL,
91529 + .name = "pointer_size_read",
91530 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
91531 + .param3 = 1,
91532 +};
91533 +
91534 +struct size_overflow_hash _001527_hash = {
91535 + .next = NULL,
91536 + .name = "power_read",
91537 + .file = "net/mac80211/debugfs.c",
91538 + .param3 = 1,
91539 +};
91540 +
91541 +struct size_overflow_hash _001528_hash = {
91542 + .next = NULL,
91543 + .name = "ps_pspoll_max_apturn_read",
91544 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91545 + .param3 = 1,
91546 +};
91547 +
91548 +struct size_overflow_hash _001529_hash = {
91549 + .next = NULL,
91550 + .name = "ps_pspoll_timeouts_read",
91551 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91552 + .param3 = 1,
91553 +};
91554 +
91555 +struct size_overflow_hash _001530_hash = {
91556 + .next = NULL,
91557 + .name = "ps_pspoll_utilization_read",
91558 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91559 + .param3 = 1,
91560 +};
91561 +
91562 +struct size_overflow_hash _001531_hash = {
91563 + .next = NULL,
91564 + .name = "ps_upsd_max_apturn_read",
91565 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91566 + .param3 = 1,
91567 +};
91568 +
91569 +struct size_overflow_hash _001532_hash = {
91570 + .next = NULL,
91571 + .name = "ps_upsd_max_sptime_read",
91572 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91573 + .param3 = 1,
91574 +};
91575 +
91576 +struct size_overflow_hash _001533_hash = {
91577 + .next = NULL,
91578 + .name = "ps_upsd_timeouts_read",
91579 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91580 + .param3 = 1,
91581 +};
91582 +
91583 +struct size_overflow_hash _001534_hash = {
91584 + .next = NULL,
91585 + .name = "ps_upsd_utilization_read",
91586 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91587 + .param3 = 1,
91588 +};
91589 +
91590 +struct size_overflow_hash _001535_hash = {
91591 + .next = NULL,
91592 + .name = "pwr_disable_ps_read",
91593 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91594 + .param3 = 1,
91595 +};
91596 +
91597 +struct size_overflow_hash _001536_hash = {
91598 + .next = NULL,
91599 + .name = "pwr_elp_enter_read",
91600 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91601 + .param3 = 1,
91602 +};
91603 +
91604 +struct size_overflow_hash _001537_hash = {
91605 + .next = NULL,
91606 + .name = "pwr_enable_ps_read",
91607 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91608 + .param3 = 1,
91609 +};
91610 +
91611 +struct size_overflow_hash _001538_hash = {
91612 + .next = NULL,
91613 + .name = "pwr_fix_tsf_ps_read",
91614 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91615 + .param3 = 1,
91616 +};
91617 +
91618 +struct size_overflow_hash _001539_hash = {
91619 + .next = NULL,
91620 + .name = "pwr_missing_bcns_read",
91621 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91622 + .param3 = 1,
91623 +};
91624 +
91625 +struct size_overflow_hash _001540_hash = {
91626 + .next = NULL,
91627 + .name = "pwr_power_save_off_read",
91628 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91629 + .param3 = 1,
91630 +};
91631 +
91632 +struct size_overflow_hash _001541_hash = {
91633 + .next = NULL,
91634 + .name = "pwr_ps_enter_read",
91635 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91636 + .param3 = 1,
91637 +};
91638 +
91639 +struct size_overflow_hash _001542_hash = {
91640 + .next = NULL,
91641 + .name = "pwr_rcvd_awake_beacons_read",
91642 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91643 + .param3 = 1,
91644 +};
91645 +
91646 +struct size_overflow_hash _001543_hash = {
91647 + .next = NULL,
91648 + .name = "pwr_rcvd_beacons_read",
91649 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91650 + .param3 = 1,
91651 +};
91652 +
91653 +struct size_overflow_hash _001544_hash = {
91654 + .next = NULL,
91655 + .name = "pwr_tx_without_ps_read",
91656 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91657 + .param3 = 1,
91658 +};
91659 +
91660 +struct size_overflow_hash _001545_hash = {
91661 + .next = NULL,
91662 + .name = "pwr_tx_with_ps_read",
91663 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91664 + .param3 = 1,
91665 +};
91666 +
91667 +struct size_overflow_hash _001546_hash = {
91668 + .next = NULL,
91669 + .name = "pwr_wake_on_host_read",
91670 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91671 + .param3 = 1,
91672 +};
91673 +
91674 +struct size_overflow_hash _001547_hash = {
91675 + .next = NULL,
91676 + .name = "pwr_wake_on_timer_exp_read",
91677 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91678 + .param3 = 1,
91679 +};
91680 +
91681 +struct size_overflow_hash _001548_hash = {
91682 + .next = NULL,
91683 + .name = "qcam_read",
91684 + .file = "drivers/media/video/c-qcam.c",
91685 + .param3 = 1,
91686 +};
91687 +
91688 +struct size_overflow_hash _001549_hash = {
91689 + .next = NULL,
91690 + .name = "retry_count_read",
91691 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91692 + .param3 = 1,
91693 +};
91694 +
91695 +struct size_overflow_hash _001550_hash = {
91696 + .next = NULL,
91697 + .name = "rx_dropped_read",
91698 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91699 + .param3 = 1,
91700 +};
91701 +
91702 +struct size_overflow_hash _001551_hash = {
91703 + .next = NULL,
91704 + .name = "rx_fcs_err_read",
91705 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91706 + .param3 = 1,
91707 +};
91708 +
91709 +struct size_overflow_hash _001552_hash = {
91710 + .next = NULL,
91711 + .name = "rx_hdr_overflow_read",
91712 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91713 + .param3 = 1,
91714 +};
91715 +
91716 +struct size_overflow_hash _001553_hash = {
91717 + .next = NULL,
91718 + .name = "rx_hw_stuck_read",
91719 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91720 + .param3 = 1,
91721 +};
91722 +
91723 +struct size_overflow_hash _001554_hash = {
91724 + .next = NULL,
91725 + .name = "rx_out_of_mem_read",
91726 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91727 + .param3 = 1,
91728 +};
91729 +
91730 +struct size_overflow_hash _001555_hash = {
91731 + .next = NULL,
91732 + .name = "rx_path_reset_read",
91733 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91734 + .param3 = 1,
91735 +};
91736 +
91737 +struct size_overflow_hash _001556_hash = {
91738 + .next = NULL,
91739 + .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read",
91740 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91741 + .param3 = 1,
91742 +};
91743 +
91744 +struct size_overflow_hash _001557_hash = {
91745 + .next = NULL,
91746 + .name = "rxpipe_descr_host_int_trig_rx_data_read",
91747 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91748 + .param3 = 1,
91749 +};
91750 +
91751 +struct size_overflow_hash _001558_hash = {
91752 + .next = NULL,
91753 + .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read",
91754 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91755 + .param3 = 1,
91756 +};
91757 +
91758 +struct size_overflow_hash _001559_hash = {
91759 + .next = NULL,
91760 + .name = "rxpipe_rx_prep_beacon_drop_read",
91761 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91762 + .param3 = 1,
91763 +};
91764 +
91765 +struct size_overflow_hash _001560_hash = {
91766 + .next = NULL,
91767 + .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read",
91768 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91769 + .param3 = 1,
91770 +};
91771 +
91772 +struct size_overflow_hash _001561_hash = {
91773 + .next = NULL,
91774 + .name = "rx_reset_counter_read",
91775 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91776 + .param3 = 1,
91777 +};
91778 +
91779 +struct size_overflow_hash _001562_hash = {
91780 + .next = NULL,
91781 + .name = "rx_streaming_always_read",
91782 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91783 + .param3 = 1,
91784 +};
91785 +
91786 +struct size_overflow_hash _001563_hash = {
91787 + .next = NULL,
91788 + .name = "rx_streaming_interval_read",
91789 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91790 + .param3 = 1,
91791 +};
91792 +
91793 +struct size_overflow_hash _001564_hash = {
91794 + .next = NULL,
91795 + .name = "rx_xfr_hint_trig_read",
91796 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
91797 + .param3 = 1,
91798 +};
91799 +
91800 +struct size_overflow_hash _001565_hash = {
91801 + .next = NULL,
91802 + .name = "scsi_execute_req",
91803 + .file = "include/scsi/scsi_device.h",
91804 + .param5 = 1,
91805 +};
91806 +
91807 +struct size_overflow_hash _001566_hash = {
91808 + .next = NULL,
91809 + .name = "scsi_tgt_kspace_exec",
91810 + .file = "drivers/scsi/scsi_tgt_lib.c",
91811 + .param8 = 1,
91812 +};
91813 +
91814 +struct size_overflow_hash _001567_hash = {
91815 + .next = NULL,
91816 + .name = "sctp_sendmsg",
91817 + .file = "net/sctp/socket.c",
91818 + .param4 = 1,
91819 +};
91820 +
91821 +struct size_overflow_hash _001568_hash = {
91822 + .next = NULL,
91823 + .name = "sctp_setsockopt",
91824 + .file = "net/sctp/socket.c",
91825 + .param5 = 1,
91826 +};
91827 +
91828 +struct size_overflow_hash _001569_hash = {
91829 + .next = NULL,
91830 + .name = "set_connectable",
91831 + .file = "net/bluetooth/mgmt.c",
91832 + .param4 = 1,
91833 +};
91834 +
91835 +struct size_overflow_hash _001570_hash = {
91836 + .next = NULL,
91837 + .name = "set_discoverable",
91838 + .file = "net/bluetooth/mgmt.c",
91839 + .param4 = 1,
91840 +};
91841 +
91842 +struct size_overflow_hash _001571_hash = {
91843 + .next = NULL,
91844 + .name = "set_local_name",
91845 + .file = "net/bluetooth/mgmt.c",
91846 + .param4 = 1,
91847 +};
91848 +
91849 +struct size_overflow_hash _001572_hash = {
91850 + .next = NULL,
91851 + .name = "set_powered",
91852 + .file = "net/bluetooth/mgmt.c",
91853 + .param4 = 1,
91854 +};
91855 +
91856 +struct size_overflow_hash _001573_hash = {
91857 + .next = NULL,
91858 + .name = "simple_alloc_urb",
91859 + .file = "drivers/usb/misc/usbtest.c",
91860 + .param3 = 1,
91861 +};
91862 +
91863 +struct size_overflow_hash _001574_hash = {
91864 + .next = NULL,
91865 + .name = "sm_checker_extend",
91866 + .file = "drivers/md/persistent-data/dm-space-map-checker.c",
91867 + .param2 = 1,
91868 +};
91869 +
91870 +struct size_overflow_hash _001575_hash = {
91871 + .next = NULL,
91872 + .name = "snd_cs4281_BA0_read",
91873 + .file = "sound/pci/cs4281.c",
91874 + .param5 = 1,
91875 +};
91876 +
91877 +struct size_overflow_hash _001576_hash = {
91878 + .next = NULL,
91879 + .name = "snd_cs4281_BA1_read",
91880 + .file = "sound/pci/cs4281.c",
91881 + .param5 = 1,
91882 +};
91883 +
91884 +struct size_overflow_hash _001577_hash = {
91885 + .next = NULL,
91886 + .name = "snd_cs46xx_io_read",
91887 + .file = "sound/pci/cs46xx/cs46xx_lib.c",
91888 + .param5 = 1,
91889 +};
91890 +
91891 +struct size_overflow_hash _001578_hash = {
91892 + .next = NULL,
91893 + .name = "snd_gus_dram_read",
91894 + .file = "include/sound/gus.h",
91895 + .param4 = 1,
91896 +};
91897 +
91898 +struct size_overflow_hash _001579_hash = {
91899 + .next = NULL,
91900 + .name = "snd_gus_dram_write",
91901 + .file = "include/sound/gus.h",
91902 + .param4 = 1,
91903 +};
91904 +
91905 +struct size_overflow_hash _001580_hash = {
91906 + .next = NULL,
91907 + .name = "snd_mem_proc_write",
91908 + .file = "sound/core/memalloc.c",
91909 + .param3 = 1,
91910 +};
91911 +
91912 +struct size_overflow_hash _001581_hash = {
91913 + .next = NULL,
91914 + .name = "snd_pcm_oss_read",
91915 + .file = "sound/core/oss/pcm_oss.c",
91916 + .param3 = 1,
91917 +};
91918 +
91919 +struct size_overflow_hash _001582_hash = {
91920 + .next = NULL,
91921 + .name = "snd_pcm_oss_sync1",
91922 + .file = "sound/core/oss/pcm_oss.c",
91923 + .param2 = 1,
91924 +};
91925 +
91926 +struct size_overflow_hash _001583_hash = {
91927 + .next = NULL,
91928 + .name = "snd_pcm_oss_write",
91929 + .file = "sound/core/oss/pcm_oss.c",
91930 + .param3 = 1,
91931 +};
91932 +
91933 +struct size_overflow_hash _001584_hash = {
91934 + .next = NULL,
91935 + .name = "snd_rme32_capture_copy",
91936 + .file = "sound/pci/rme32.c",
91937 + .param5 = 1,
91938 +};
91939 +
91940 +struct size_overflow_hash _001585_hash = {
91941 + .next = NULL,
91942 + .name = "snd_rme32_playback_copy",
91943 + .file = "sound/pci/rme32.c",
91944 + .param5 = 1,
91945 +};
91946 +
91947 +struct size_overflow_hash _001586_hash = {
91948 + .next = NULL,
91949 + .name = "snd_rme96_capture_copy",
91950 + .file = "sound/pci/rme96.c",
91951 + .param5 = 1,
91952 +};
91953 +
91954 +struct size_overflow_hash _001587_hash = {
91955 + .next = NULL,
91956 + .name = "snd_rme96_playback_copy",
91957 + .file = "sound/pci/rme96.c",
91958 + .param5 = 1,
91959 +};
91960 +
91961 +struct size_overflow_hash _001588_hash = {
91962 + .next = NULL,
91963 + .name = "spi_execute",
91964 + .file = "drivers/scsi/scsi_transport_spi.c",
91965 + .param5 = 1,
91966 +};
91967 +
91968 +struct size_overflow_hash _001589_hash = {
91969 + .next = NULL,
91970 + .name = "srp_target_alloc",
91971 + .file = "include/scsi/libsrp.h",
91972 + .param3 = 1,
91973 +};
91974 +
91975 +struct size_overflow_hash _001590_hash = {
91976 + .next = NULL,
91977 + .name = "stats_dot11ACKFailureCount_read",
91978 + .file = "net/mac80211/debugfs.c",
91979 + .param3 = 1,
91980 +};
91981 +
91982 +struct size_overflow_hash _001591_hash = {
91983 + .next = NULL,
91984 + .name = "stats_dot11FCSErrorCount_read",
91985 + .file = "net/mac80211/debugfs.c",
91986 + .param3 = 1,
91987 +};
91988 +
91989 +struct size_overflow_hash _001592_hash = {
91990 + .next = NULL,
91991 + .name = "stats_dot11RTSFailureCount_read",
91992 + .file = "net/mac80211/debugfs.c",
91993 + .param3 = 1,
91994 +};
91995 +
91996 +struct size_overflow_hash _001593_hash = {
91997 + .next = NULL,
91998 + .name = "stats_dot11RTSSuccessCount_read",
91999 + .file = "net/mac80211/debugfs.c",
92000 + .param3 = 1,
92001 +};
92002 +
92003 +struct size_overflow_hash _001594_hash = {
92004 + .next = NULL,
92005 + .name = "stk_allocate_buffers",
92006 + .file = "drivers/media/video/stk-webcam.c",
92007 + .param2 = 1,
92008 +};
92009 +
92010 +struct size_overflow_hash _001595_hash = {
92011 + .next = NULL,
92012 + .name = "submit_inquiry",
92013 + .file = "drivers/scsi/device_handler/scsi_dh_rdac.c",
92014 + .param3 = 1,
92015 +};
92016 +
92017 +struct size_overflow_hash _001596_hash = {
92018 + .next = NULL,
92019 + .name = "team_options_register",
92020 + .file = "include/linux/if_team.h",
92021 + .param3 = 1,
92022 +};
92023 +
92024 +struct size_overflow_hash _001597_hash = {
92025 + .next = NULL,
92026 + .name = "test_unaligned_bulk",
92027 + .file = "drivers/usb/misc/usbtest.c",
92028 + .param3 = 1,
92029 +};
92030 +
92031 +struct size_overflow_hash _001598_hash = {
92032 + .next = NULL,
92033 + .name = "timeout_read",
92034 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
92035 + .param3 = 1,
92036 +};
92037 +
92038 +struct size_overflow_hash _001599_hash = {
92039 + .next = NULL,
92040 + .name = "timeout_write",
92041 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
92042 + .param3 = 1,
92043 +};
92044 +
92045 +struct size_overflow_hash _001600_hash = {
92046 + .next = NULL,
92047 + .name = "tipc_link_send_sections_fast",
92048 + .file = "net/tipc/link.c",
92049 + .param4 = 1,
92050 +};
92051 +
92052 +struct size_overflow_hash _001601_hash = {
92053 + .next = NULL,
92054 + .name = "total_ps_buffered_read",
92055 + .file = "net/mac80211/debugfs.c",
92056 + .param3 = 1,
92057 +};
92058 +
92059 +struct size_overflow_hash _001602_hash = {
92060 + .next = NULL,
92061 + .name = "ts_read",
92062 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
92063 + .param3 = 1,
92064 +};
92065 +
92066 +struct size_overflow_hash _001603_hash = {
92067 + .next = NULL,
92068 + .name = "TSS_authhmac",
92069 + .file = "security/keys/trusted.c",
92070 + .param3 = 1,
92071 +};
92072 +
92073 +struct size_overflow_hash _001604_hash = {
92074 + .next = NULL,
92075 + .name = "TSS_checkhmac1",
92076 + .file = "security/keys/trusted.c",
92077 + .param5 = 1,
92078 +};
92079 +
92080 +struct size_overflow_hash _001605_hash = {
92081 + .next = NULL,
92082 + .name = "TSS_checkhmac2",
92083 + .file = "security/keys/trusted.c",
92084 + .param5 = 1,
92085 + .param7 = 1,
92086 +};
92087 +
92088 +struct size_overflow_hash _001607_hash = {
92089 + .next = NULL,
92090 + .name = "ts_write",
92091 + .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
92092 + .param3 = 1,
92093 +};
92094 +
92095 +struct size_overflow_hash _001608_hash = {
92096 + .next = NULL,
92097 + .name = "tx_internal_desc_overflow_read",
92098 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
92099 + .param3 = 1,
92100 +};
92101 +
92102 +struct size_overflow_hash _001609_hash = {
92103 + .next = NULL,
92104 + .name = "uapsd_max_sp_len_read",
92105 + .file = "net/mac80211/debugfs.c",
92106 + .param3 = 1,
92107 +};
92108 +
92109 +struct size_overflow_hash _001610_hash = {
92110 + .next = NULL,
92111 + .name = "uapsd_queues_read",
92112 + .file = "net/mac80211/debugfs.c",
92113 + .param3 = 1,
92114 +};
92115 +
92116 +struct size_overflow_hash _001611_hash = {
92117 + .next = NULL,
92118 + .name = "ulong_read_file",
92119 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
92120 + .param3 = 1,
92121 +};
92122 +
92123 +struct size_overflow_hash _001612_hash = {
92124 + .next = NULL,
92125 + .name = "ulong_write_file",
92126 + .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
92127 + .param3 = 1,
92128 +};
92129 +
92130 +struct size_overflow_hash _001613_hash = {
92131 + .next = NULL,
92132 + .name = "usb_alloc_coherent",
92133 + .file = "include/linux/usb.h",
92134 + .param2 = 1,
92135 +};
92136 +
92137 +struct size_overflow_hash _001614_hash = {
92138 + .next = NULL,
92139 + .name = "user_power_read",
92140 + .file = "net/mac80211/debugfs.c",
92141 + .param3 = 1,
92142 +};
92143 +
92144 +struct size_overflow_hash _001615_hash = {
92145 + .next = NULL,
92146 + .name = "vb2_read",
92147 + .file = "include/media/videobuf2-core.h",
92148 + .param3 = 1,
92149 +};
92150 +
92151 +struct size_overflow_hash _001616_hash = {
92152 + .next = NULL,
92153 + .name = "vb2_write",
92154 + .file = "include/media/videobuf2-core.h",
92155 + .param3 = 1,
92156 +};
92157 +
92158 +struct size_overflow_hash _001617_hash = {
92159 + .next = NULL,
92160 + .name = "vhost_add_used_n",
92161 + .file = "drivers/vhost/vhost.c",
92162 + .param3 = 1,
92163 +};
92164 +
92165 +struct size_overflow_hash _001618_hash = {
92166 + .next = NULL,
92167 + .name = "virtqueue_add_buf",
92168 + .file = "include/linux/virtio.h",
92169 + .param3 = 1,
92170 + .param4 = 1,
92171 +};
92172 +
92173 +struct size_overflow_hash _001620_hash = {
92174 + .next = NULL,
92175 + .name = "vmbus_establish_gpadl",
92176 + .file = "include/linux/hyperv.h",
92177 + .param3 = 1,
92178 +};
92179 +
92180 +struct size_overflow_hash _001621_hash = {
92181 + .next = NULL,
92182 + .name = "wep_addr_key_count_read",
92183 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
92184 + .param3 = 1,
92185 +};
92186 +
92187 +struct size_overflow_hash _001622_hash = {
92188 + .next = NULL,
92189 + .name = "wep_decrypt_fail_read",
92190 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
92191 + .param3 = 1,
92192 +};
92193 +
92194 +struct size_overflow_hash _001623_hash = {
92195 + .next = NULL,
92196 + .name = "wep_default_key_count_read",
92197 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
92198 + .param3 = 1,
92199 +};
92200 +
92201 +struct size_overflow_hash _001624_hash = {
92202 + .next = NULL,
92203 + .name = "wep_interrupt_read",
92204 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
92205 + .param3 = 1,
92206 +};
92207 +
92208 +struct size_overflow_hash _001625_hash = {
92209 + .next = NULL,
92210 + .name = "wep_iv_read",
92211 + .file = "net/mac80211/debugfs.c",
92212 + .param3 = 1,
92213 +};
92214 +
92215 +struct size_overflow_hash _001626_hash = {
92216 + .next = NULL,
92217 + .name = "wep_key_not_found_read",
92218 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
92219 + .param3 = 1,
92220 +};
92221 +
92222 +struct size_overflow_hash _001627_hash = {
92223 + .next = NULL,
92224 + .name = "wep_packets_read",
92225 + .file = "drivers/net/wireless/wl12xx/debugfs.c",
92226 + .param3 = 1,
92227 +};
92228 +
92229 +struct size_overflow_hash _001628_hash = {
92230 + .next = NULL,
92231 + .name = "write_led",
92232 + .file = "drivers/platform/x86/asus_acpi.c",
92233 + .param2 = 1,
92234 +};
92235 +
92236 +struct size_overflow_hash _001629_hash = {
92237 + .next = NULL,
92238 + .name = "wusb_prf",
92239 + .file = "include/linux/usb/wusb.h",
92240 + .param7 = 1,
92241 +};
92242 +
92243 +struct size_overflow_hash _001630_hash = {
92244 + .next = NULL,
92245 + .name = "zd_usb_iowrite16v",
92246 + .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
92247 + .param3 = 1,
92248 +};
92249 +
92250 +struct size_overflow_hash _001631_hash = {
92251 + .next = NULL,
92252 + .name = "afs_cell_lookup",
92253 + .file = "fs/afs/cell.c",
92254 + .param2 = 1,
92255 +};
92256 +
92257 +struct size_overflow_hash _001632_hash = {
92258 + .next = NULL,
92259 + .name = "agp_generic_alloc_user",
92260 + .file = "drivers/char/agp/generic.c",
92261 + .param1 = 1,
92262 +};
92263 +
92264 +struct size_overflow_hash _001634_hash = {
92265 + .next = NULL,
92266 + .name = "bluetooth_proc_write",
92267 + .file = "drivers/platform/x86/asus_acpi.c",
92268 + .param3 = 1,
92269 +};
92270 +
92271 +struct size_overflow_hash _001635_hash = {
92272 + .next = NULL,
92273 + .name = "cache_write",
92274 + .file = "net/sunrpc/cache.c",
92275 + .param3 = 1,
92276 +};
92277 +
92278 +struct size_overflow_hash _001636_hash = {
92279 + .next = NULL,
92280 + .name = "ch_do_scsi",
92281 + .file = "drivers/scsi/ch.c",
92282 + .param4 = 1,
92283 +};
92284 +
92285 +struct size_overflow_hash _001637_hash = {
92286 + .next = NULL,
92287 + .name = "cx18_read",
92288 + .file = "drivers/media/video/cx18/cx18-fileops.c",
92289 + .param3 = 1,
92290 +};
92291 +
92292 +struct size_overflow_hash _001638_hash = {
92293 + .next = NULL,
92294 + .name = "dccp_feat_register_sp",
92295 + .file = "net/dccp/feat.c",
92296 + .param5 = 1,
92297 +};
92298 +
92299 +struct size_overflow_hash _001640_hash = {
92300 + .next = NULL,
92301 + .name = "iso_alloc_urb",
92302 + .file = "drivers/usb/misc/usbtest.c",
92303 + .param5 = 1,
92304 +};
92305 +
92306 +struct size_overflow_hash _001641_hash = {
92307 + .next = NULL,
92308 + .name = "ivtv_read_pos",
92309 + .file = "drivers/media/video/ivtv/ivtv-fileops.c",
92310 + .param3 = 1,
92311 +};
92312 +
92313 +struct size_overflow_hash _001642_hash = {
92314 + .next = NULL,
92315 + .name = "mcam_v4l_read",
92316 + .file = "drivers/media/video/marvell-ccic/mcam-core.c",
92317 + .param3 = 1,
92318 +};
92319 +
92320 +struct size_overflow_hash _001643_hash = {
92321 + .next = NULL,
92322 + .name = "mled_proc_write",
92323 + .file = "drivers/platform/x86/asus_acpi.c",
92324 + .param3 = 1,
92325 +};
92326 +
92327 +struct size_overflow_hash _001644_hash = {
92328 + .next = NULL,
92329 + .name = "nfs_idmap_lookup_id",
92330 + .file = "fs/nfs/idmap.c",
92331 + .param2 = 1,
92332 +};
92333 +
92334 +struct size_overflow_hash _001645_hash = {
92335 + .next = NULL,
92336 + .name = "ocfs2_control_write",
92337 + .file = "fs/ocfs2/stack_user.c",
92338 + .param3 = 1,
92339 +};
92340 +
92341 +struct size_overflow_hash _001646_hash = {
92342 + .next = NULL,
92343 + .name = "osd_req_list_dev_partitions",
92344 + .file = "include/scsi/osd_initiator.h",
92345 + .param4 = 1,
92346 +};
92347 +
92348 +struct size_overflow_hash _001647_hash = {
92349 + .next = NULL,
92350 + .name = "osd_req_list_partition_collections",
92351 + .file = "include/scsi/osd_initiator.h",
92352 + .param5 = 1,
92353 +};
92354 +
92355 +struct size_overflow_hash _001648_hash = {
92356 + .next = NULL,
92357 + .name = "pwc_video_read",
92358 + .file = "drivers/media/video/pwc/pwc-if.c",
92359 + .param3 = 1,
92360 +};
92361 +
92362 +struct size_overflow_hash _001649_hash = {
92363 + .next = NULL,
92364 + .name = "scsi_vpd_inquiry",
92365 + .file = "drivers/scsi/scsi.c",
92366 + .param4 = 1,
92367 +};
92368 +
92369 +struct size_overflow_hash _001650_hash = {
92370 + .next = NULL,
92371 + .name = "snd_gf1_mem_proc_dump",
92372 + .file = "sound/isa/gus/gus_mem_proc.c",
92373 + .param5 = 1,
92374 +};
92375 +
92376 +struct size_overflow_hash _001651_hash = {
92377 + .next = NULL,
92378 + .name = "spi_dv_device_echo_buffer",
92379 + .file = "drivers/scsi/scsi_transport_spi.c",
92380 + .param2 = 1,
92381 + .param3 = 1,
92382 +};
92383 +
92384 +struct size_overflow_hash _001653_hash = {
92385 + .next = NULL,
92386 + .name = "tled_proc_write",
92387 + .file = "drivers/platform/x86/asus_acpi.c",
92388 + .param3 = 1,
92389 +};
92390 +
92391 +struct size_overflow_hash _001655_hash = {
92392 + .next = NULL,
92393 + .name = "usb_allocate_stream_buffers",
92394 + .file = "drivers/media/dvb/dvb-usb/usb-urb.c",
92395 + .param3 = 1,
92396 +};
92397 +
92398 +struct size_overflow_hash _001656_hash = {
92399 + .next = NULL,
92400 + .name = "_usb_writeN_sync",
92401 + .file = "drivers/net/wireless/rtlwifi/usb.c",
92402 + .param4 = 1,
92403 +};
92404 +
92405 +struct size_overflow_hash _001657_hash = {
92406 + .next = NULL,
92407 + .name = "vhost_add_used_and_signal_n",
92408 + .file = "drivers/vhost/vhost.c",
92409 + .param4 = 1,
92410 +};
92411 +
92412 +struct size_overflow_hash _001658_hash = {
92413 + .next = NULL,
92414 + .name = "vmbus_open",
92415 + .file = "include/linux/hyperv.h",
92416 + .param2 = 1,
92417 + .param3 = 1,
92418 +};
92419 +
92420 +struct size_overflow_hash _001660_hash = {
92421 + .next = NULL,
92422 + .name = "wled_proc_write",
92423 + .file = "drivers/platform/x86/asus_acpi.c",
92424 + .param3 = 1,
92425 +};
92426 +
92427 +struct size_overflow_hash _001661_hash = {
92428 + .next = NULL,
92429 + .name = "wusb_prf_256",
92430 + .file = "include/linux/usb/wusb.h",
92431 + .param7 = 1,
92432 +};
92433 +
92434 +struct size_overflow_hash _001662_hash = {
92435 + .next = NULL,
92436 + .name = "wusb_prf_64",
92437 + .file = "include/linux/usb/wusb.h",
92438 + .param7 = 1,
92439 +};
92440 +
92441 +struct size_overflow_hash _001663_hash = {
92442 + .next = NULL,
92443 + .name = "agp_allocate_memory",
92444 + .file = "include/linux/agp_backend.h",
92445 + .param2 = 1,
92446 +};
92447 +
92448 +struct size_overflow_hash _001664_hash = {
92449 + .next = NULL,
92450 + .name = "cx18_read_pos",
92451 + .file = "drivers/media/video/cx18/cx18-fileops.c",
92452 + .param3 = 1,
92453 +};
92454 +
92455 +struct size_overflow_hash _001665_hash = {
92456 + .next = NULL,
92457 + .name = "nfs_map_group_to_gid",
92458 + .file = "include/linux/nfs_idmap.h",
92459 + .param3 = 1,
92460 +};
92461 +
92462 +struct size_overflow_hash _001666_hash = {
92463 + .next = NULL,
92464 + .name = "nfs_map_name_to_uid",
92465 + .file = "include/linux/nfs_idmap.h",
92466 + .param3 = 1,
92467 +};
92468 +
92469 +struct size_overflow_hash _001667_hash = {
92470 + .next = NULL,
92471 + .name = "test_iso_queue",
92472 + .file = "drivers/usb/misc/usbtest.c",
92473 + .param5 = 1,
92474 +};
92475 +
92476 +struct size_overflow_hash _001668_hash = {
92477 + .next = NULL,
92478 + .name = "agp_allocate_memory_wrap",
92479 + .file = "drivers/char/agp/frontend.c",
92480 + .param1 = 1,
92481 +};
92482 +
92483 +struct size_overflow_hash _001669_hash = {
92484 + .next = NULL,
92485 + .name = "alloc_irq_cpu_rmap",
92486 + .file = "include/linux/cpu_rmap.h",
92487 + .param1 = 1,
92488 +};
92489 +
92490 +struct size_overflow_hash _001670_hash = {
92491 + .next = NULL,
92492 + .name = "alloc_ring",
92493 + .file = "drivers/net/ethernet/chelsio/cxgb4/sge.c",
92494 + .param2 = 1,
92495 + .param4 = 1,
92496 +};
92497 +
92498 +struct size_overflow_hash _001672_hash = {
92499 + .next = &_001124_hash,
92500 + .name = "atomic_counters_read",
92501 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
92502 + .param3 = 1,
92503 +};
92504 +
92505 +struct size_overflow_hash _001673_hash = {
92506 + .next = NULL,
92507 + .name = "atomic_stats_read",
92508 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
92509 + .param3 = 1,
92510 +};
92511 +
92512 +struct size_overflow_hash _001674_hash = {
92513 + .next = NULL,
92514 + .name = "c4iw_init_resource_fifo",
92515 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
92516 + .param3 = 1,
92517 +};
92518 +
92519 +struct size_overflow_hash _001675_hash = {
92520 + .next = NULL,
92521 + .name = "c4iw_init_resource_fifo_random",
92522 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
92523 + .param3 = 1,
92524 +};
92525 +
92526 +struct size_overflow_hash _001676_hash = {
92527 + .next = NULL,
92528 + .name = "compat_do_arpt_set_ctl",
92529 + .file = "net/ipv4/netfilter/arp_tables.c",
92530 + .param4 = 1,
92531 +};
92532 +
92533 +struct size_overflow_hash _001677_hash = {
92534 + .next = NULL,
92535 + .name = "compat_do_ip6t_set_ctl",
92536 + .file = "net/ipv6/netfilter/ip6_tables.c",
92537 + .param4 = 1,
92538 +};
92539 +
92540 +struct size_overflow_hash _001678_hash = {
92541 + .next = NULL,
92542 + .name = "compat_do_ipt_set_ctl",
92543 + .file = "net/ipv4/netfilter/ip_tables.c",
92544 + .param4 = 1,
92545 +};
92546 +
92547 +struct size_overflow_hash _001679_hash = {
92548 + .next = NULL,
92549 + .name = "cxio_init_resource_fifo",
92550 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
92551 + .param3 = 1,
92552 +};
92553 +
92554 +struct size_overflow_hash _001680_hash = {
92555 + .next = NULL,
92556 + .name = "cxio_init_resource_fifo_random",
92557 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
92558 + .param3 = 1,
92559 +};
92560 +
92561 +struct size_overflow_hash _001681_hash = {
92562 + .next = NULL,
92563 + .name = "dev_counters_read",
92564 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92565 + .param3 = 1,
92566 +};
92567 +
92568 +struct size_overflow_hash _001682_hash = {
92569 + .next = NULL,
92570 + .name = "dev_names_read",
92571 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92572 + .param3 = 1,
92573 +};
92574 +
92575 +struct size_overflow_hash _001683_hash = {
92576 + .next = &_001468_hash,
92577 + .name = "do_arpt_set_ctl",
92578 + .file = "net/ipv4/netfilter/arp_tables.c",
92579 + .param4 = 1,
92580 +};
92581 +
92582 +struct size_overflow_hash _001684_hash = {
92583 + .next = NULL,
92584 + .name = "do_ip6t_set_ctl",
92585 + .file = "net/ipv6/netfilter/ip6_tables.c",
92586 + .param4 = 1,
92587 +};
92588 +
92589 +struct size_overflow_hash _001685_hash = {
92590 + .next = NULL,
92591 + .name = "do_ipt_set_ctl",
92592 + .file = "net/ipv4/netfilter/ip_tables.c",
92593 + .param4 = 1,
92594 +};
92595 +
92596 +struct size_overflow_hash _001686_hash = {
92597 + .next = NULL,
92598 + .name = "drbd_bm_resize",
92599 + .file = "drivers/block/drbd/drbd_bitmap.c",
92600 + .param2 = 1,
92601 +};
92602 +
92603 +struct size_overflow_hash _001687_hash = {
92604 + .next = NULL,
92605 + .name = "driver_names_read",
92606 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92607 + .param3 = 1,
92608 +};
92609 +
92610 +struct size_overflow_hash _001688_hash = {
92611 + .next = NULL,
92612 + .name = "driver_stats_read",
92613 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92614 + .param3 = 1,
92615 +};
92616 +
92617 +struct size_overflow_hash _001689_hash = {
92618 + .next = NULL,
92619 + .name = "flash_read",
92620 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92621 + .param3 = 1,
92622 +};
92623 +
92624 +struct size_overflow_hash _001690_hash = {
92625 + .next = NULL,
92626 + .name = "flash_read",
92627 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
92628 + .param3 = 1,
92629 +};
92630 +
92631 +struct size_overflow_hash _001691_hash = {
92632 + .next = NULL,
92633 + .name = "flash_write",
92634 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92635 + .param3 = 1,
92636 +};
92637 +
92638 +struct size_overflow_hash _001692_hash = {
92639 + .next = NULL,
92640 + .name = "flash_write",
92641 + .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
92642 + .param3 = 1,
92643 +};
92644 +
92645 +struct size_overflow_hash _001693_hash = {
92646 + .next = NULL,
92647 + .name = "ghash_async_setkey",
92648 + .file = "arch/x86/crypto/ghash-clmulni-intel_glue.c",
92649 + .param3 = 1,
92650 +};
92651 +
92652 +struct size_overflow_hash _001694_hash = {
92653 + .next = NULL,
92654 + .name = "handle_eviocgbit",
92655 + .file = "drivers/input/evdev.c",
92656 + .param3 = 1,
92657 +};
92658 +
92659 +struct size_overflow_hash _001695_hash = {
92660 + .next = NULL,
92661 + .name = "hid_parse_report",
92662 + .file = "include/linux/hid.h",
92663 + .param3 = 1,
92664 +};
92665 +
92666 +struct size_overflow_hash _001696_hash = {
92667 + .next = NULL,
92668 + .name = "ipath_get_base_info",
92669 + .file = "drivers/infiniband/hw/ipath/ipath_file_ops.c",
92670 + .param3 = 1,
92671 +};
92672 +
92673 +struct size_overflow_hash _001697_hash = {
92674 + .next = NULL,
92675 + .name = "options_write",
92676 + .file = "drivers/misc/sgi-gru/gruprocfs.c",
92677 + .param3 = 1,
92678 +};
92679 +
92680 +struct size_overflow_hash _001698_hash = {
92681 + .next = NULL,
92682 + .name = "portcntrs_1_read",
92683 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92684 + .param3 = 1,
92685 +};
92686 +
92687 +struct size_overflow_hash _001699_hash = {
92688 + .next = NULL,
92689 + .name = "portcntrs_2_read",
92690 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92691 + .param3 = 1,
92692 +};
92693 +
92694 +struct size_overflow_hash _001700_hash = {
92695 + .next = NULL,
92696 + .name = "portnames_read",
92697 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92698 + .param3 = 1,
92699 +};
92700 +
92701 +struct size_overflow_hash _001701_hash = {
92702 + .next = NULL,
92703 + .name = "qib_alloc_devdata",
92704 + .file = "drivers/infiniband/hw/qib/qib_init.c",
92705 + .param2 = 1,
92706 +};
92707 +
92708 +struct size_overflow_hash _001702_hash = {
92709 + .next = NULL,
92710 + .name = "qib_diag_write",
92711 + .file = "drivers/infiniband/hw/qib/qib_diag.c",
92712 + .param3 = 1,
92713 +};
92714 +
92715 +struct size_overflow_hash _001703_hash = {
92716 + .next = NULL,
92717 + .name = "qib_get_base_info",
92718 + .file = "drivers/infiniband/hw/qib/qib_file_ops.c",
92719 + .param3 = 1,
92720 +};
92721 +
92722 +struct size_overflow_hash _001704_hash = {
92723 + .next = NULL,
92724 + .name = "qsfp_1_read",
92725 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92726 + .param3 = 1,
92727 +};
92728 +
92729 +struct size_overflow_hash _001705_hash = {
92730 + .next = NULL,
92731 + .name = "qsfp_2_read",
92732 + .file = "drivers/infiniband/hw/qib/qib_fs.c",
92733 + .param3 = 1,
92734 +};
92735 +
92736 +struct size_overflow_hash _001706_hash = {
92737 + .next = NULL,
92738 + .name = "rfc4106_set_key",
92739 + .file = "arch/x86/crypto/aesni-intel_glue.c",
92740 + .param3 = 1,
92741 +};
92742 +
92743 +struct size_overflow_hash _001707_hash = {
92744 + .next = &_000258_hash,
92745 + .name = "stats_read_ul",
92746 + .file = "drivers/idle/i7300_idle.c",
92747 + .param3 = 1,
92748 +};
92749 +
92750 +struct size_overflow_hash _001708_hash = {
92751 + .next = NULL,
92752 + .name = "xpc_kmalloc_cacheline_aligned",
92753 + .file = "drivers/misc/sgi-xp/xpc_partition.c",
92754 + .param1 = 1,
92755 +};
92756 +
92757 +struct size_overflow_hash _001709_hash = {
92758 + .next = NULL,
92759 + .name = "xpc_kzalloc_cacheline_aligned",
92760 + .file = "drivers/misc/sgi-xp/xpc_main.c",
92761 + .param1 = 1,
92762 +};
92763 +
92764 +struct size_overflow_hash _001710_hash = {
92765 + .next = NULL,
92766 + .name = "c4iw_init_resource",
92767 + .file = "drivers/infiniband/hw/cxgb4/resource.c",
92768 + .param2 = 1,
92769 + .param3 = 1,
92770 +};
92771 +
92772 +struct size_overflow_hash _001712_hash = {
92773 + .next = NULL,
92774 + .name = "cxio_hal_init_resource",
92775 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
92776 + .param2 = 1,
92777 + .param7 = 1,
92778 + .param6 = 1,
92779 +};
92780 +
92781 +struct size_overflow_hash _001715_hash = {
92782 + .next = &_000734_hash,
92783 + .name = "cxio_hal_init_rhdl_resource",
92784 + .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
92785 + .param1 = 1,
92786 +};
92787 +
92788 +struct size_overflow_hash _001716_hash = {
92789 + .next = NULL,
92790 + .name = "amthi_read",
92791 + .file = "drivers/staging/mei/iorw.c",
92792 + .param4 = 1,
92793 +};
92794 +
92795 +struct size_overflow_hash _001717_hash = {
92796 + .next = NULL,
92797 + .name = "bcm_char_read",
92798 + .file = "drivers/staging/bcm/Bcmchar.c",
92799 + .param3 = 1,
92800 +};
92801 +
92802 +struct size_overflow_hash _001718_hash = {
92803 + .next = NULL,
92804 + .name = "BcmCopySection",
92805 + .file = "drivers/staging/bcm/nvm.c",
92806 + .param5 = 1,
92807 +};
92808 +
92809 +struct size_overflow_hash _001719_hash = {
92810 + .next = NULL,
92811 + .name = "buffer_from_user",
92812 + .file = "drivers/staging/vme/devices/vme_user.c",
92813 + .param3 = 1,
92814 +};
92815 +
92816 +struct size_overflow_hash _001720_hash = {
92817 + .next = NULL,
92818 + .name = "buffer_to_user",
92819 + .file = "drivers/staging/vme/devices/vme_user.c",
92820 + .param3 = 1,
92821 +};
92822 +
92823 +struct size_overflow_hash _001721_hash = {
92824 + .next = NULL,
92825 + .name = "capabilities_read",
92826 + .file = "drivers/xen/xenfs/super.c",
92827 + .param3 = 1,
92828 +};
92829 +
92830 +struct size_overflow_hash _001722_hash = {
92831 + .next = NULL,
92832 + .name = "chd_dec_fetch_cdata",
92833 + .file = "drivers/staging/crystalhd/crystalhd_lnx.c",
92834 + .param3 = 1,
92835 +};
92836 +
92837 +struct size_overflow_hash _001723_hash = {
92838 + .next = NULL,
92839 + .name = "create_bounce_buffer",
92840 + .file = "drivers/staging/hv/storvsc_drv.c",
92841 + .param3 = 1,
92842 +};
92843 +
92844 +struct size_overflow_hash _001724_hash = {
92845 + .next = NULL,
92846 + .name = "crystalhd_create_dio_pool",
92847 + .file = "drivers/staging/crystalhd/crystalhd_misc.c",
92848 + .param2 = 1,
92849 +};
92850 +
92851 +struct size_overflow_hash _001725_hash = {
92852 + .next = NULL,
92853 + .name = "do_read_log_to_user",
92854 + .file = "drivers/staging/android/logger.c",
92855 + .param4 = 1,
92856 +};
92857 +
92858 +struct size_overflow_hash _001726_hash = {
92859 + .next = NULL,
92860 + .name = "do_write_log_from_user",
92861 + .file = "drivers/staging/android/logger.c",
92862 + .param3 = 1,
92863 +};
92864 +
92865 +struct size_overflow_hash _001727_hash = {
92866 + .next = NULL,
92867 + .name = "dt3155_read",
92868 + .file = "drivers/staging/media/dt3155v4l/dt3155v4l.c",
92869 + .param3 = 1,
92870 +};
92871 +
92872 +struct size_overflow_hash _001728_hash = {
92873 + .next = NULL,
92874 + .name = "easycap_alsa_vmalloc",
92875 + .file = "drivers/staging/media/easycap/easycap_sound.c",
92876 + .param2 = 1,
92877 +};
92878 +
92879 +struct size_overflow_hash _001729_hash = {
92880 + .next = NULL,
92881 + .name = "evm_read_key",
92882 + .file = "security/integrity/evm/evm_secfs.c",
92883 + .param3 = 1,
92884 +};
92885 +
92886 +struct size_overflow_hash _001730_hash = {
92887 + .next = NULL,
92888 + .name = "evm_write_key",
92889 + .file = "security/integrity/evm/evm_secfs.c",
92890 + .param3 = 1,
92891 +};
92892 +
92893 +struct size_overflow_hash _001731_hash = {
92894 + .next = NULL,
92895 + .name = "evtchn_read",
92896 + .file = "drivers/xen/evtchn.c",
92897 + .param3 = 1,
92898 +};
92899 +
92900 +struct size_overflow_hash _001732_hash = {
92901 + .next = NULL,
92902 + .name = "gather_array",
92903 + .file = "drivers/xen/privcmd.c",
92904 + .param3 = 1,
92905 +};
92906 +
92907 +struct size_overflow_hash _001733_hash = {
92908 + .next = NULL,
92909 + .name = "gnttab_map",
92910 + .file = "drivers/xen/grant-table.c",
92911 + .param2 = 1,
92912 +};
92913 +
92914 +struct size_overflow_hash _001734_hash = {
92915 + .next = NULL,
92916 + .name = "iio_read_first_n_kfifo",
92917 + .file = "drivers/staging/iio/kfifo_buf.c",
92918 + .param2 = 1,
92919 +};
92920 +
92921 +struct size_overflow_hash _001735_hash = {
92922 + .next = NULL,
92923 + .name = "iio_read_first_n_sw_rb",
92924 + .file = "drivers/staging/iio/ring_sw.c",
92925 + .param2 = 1,
92926 +};
92927 +
92928 +struct size_overflow_hash _001736_hash = {
92929 + .next = NULL,
92930 + .name = "keymap_store",
92931 + .file = "drivers/staging/speakup/kobjects.c",
92932 + .param4 = 1,
92933 +};
92934 +
92935 +struct size_overflow_hash _001737_hash = {
92936 + .next = NULL,
92937 + .name = "line6_dumpreq_initbuf",
92938 + .file = "drivers/staging/line6/dumprequest.c",
92939 + .param3 = 1,
92940 +};
92941 +
92942 +struct size_overflow_hash _001738_hash = {
92943 + .next = NULL,
92944 + .name = "lirc_write",
92945 + .file = "drivers/staging/media/lirc/lirc_parallel.c",
92946 + .param3 = 1,
92947 +};
92948 +
92949 +struct size_overflow_hash _001739_hash = {
92950 + .next = NULL,
92951 + .name = "lirc_write",
92952 + .file = "drivers/staging/media/lirc/lirc_sir.c",
92953 + .param3 = 1,
92954 +};
92955 +
92956 +struct size_overflow_hash _001740_hash = {
92957 + .next = &_000815_hash,
92958 + .name = "lirc_write",
92959 + .file = "drivers/staging/media/lirc/lirc_serial.c",
92960 + .param3 = 1,
92961 +};
92962 +
92963 +struct size_overflow_hash _001741_hash = {
92964 + .next = &_001021_hash,
92965 + .name = "_malloc",
92966 + .file = "drivers/staging/rtl8712/osdep_service.h",
92967 + .param1 = 1,
92968 +};
92969 +
92970 +struct size_overflow_hash _001742_hash = {
92971 + .next = NULL,
92972 + .name = "mei_read",
92973 + .file = "drivers/staging/mei/main.c",
92974 + .param3 = 1,
92975 +};
92976 +
92977 +struct size_overflow_hash _001743_hash = {
92978 + .next = NULL,
92979 + .name = "mei_write",
92980 + .file = "drivers/staging/mei/main.c",
92981 + .param3 = 1,
92982 +};
92983 +
92984 +struct size_overflow_hash _001744_hash = {
92985 + .next = NULL,
92986 + .name = "msg_set",
92987 + .file = "drivers/staging/speakup/i18n.c",
92988 + .param3 = 1,
92989 +};
92990 +
92991 +struct size_overflow_hash _001745_hash = {
92992 + .next = NULL,
92993 + .name = "OS_kmalloc",
92994 + .file = "drivers/staging/cxt1e1/sbecom_inline_linux.h",
92995 + .param1 = 1,
92996 +};
92997 +
92998 +struct size_overflow_hash _001746_hash = {
92999 + .next = NULL,
93000 + .name = "queue_reply",
93001 + .file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
93002 + .param3 = 1,
93003 +};
93004 +
93005 +struct size_overflow_hash _001747_hash = {
93006 + .next = &_000841_hash,
93007 + .name = "resource_from_user",
93008 + .file = "drivers/staging/vme/devices/vme_user.c",
93009 + .param3 = 1,
93010 +};
93011 +
93012 +struct size_overflow_hash _001748_hash = {
93013 + .next = NULL,
93014 + .name = "sca3000_read_first_n_hw_rb",
93015 + .file = "drivers/staging/iio/accel/sca3000_ring.c",
93016 + .param2 = 1,
93017 +};
93018 +
93019 +struct size_overflow_hash _001749_hash = {
93020 + .next = NULL,
93021 + .name = "sep_lock_user_pages",
93022 + .file = "drivers/staging/sep/sep_driver.c",
93023 + .param2 = 1,
93024 + .param3 = 1,
93025 +};
93026 +
93027 +struct size_overflow_hash _001751_hash = {
93028 + .next = NULL,
93029 + .name = "sep_prepare_input_output_dma_table_in_dcb",
93030 + .file = "drivers/staging/sep/sep_driver.c",
93031 + .param4 = 1,
93032 + .param5 = 1,
93033 + .param2 = 1,
93034 + .param3 = 1,
93035 +};
93036 +
93037 +struct size_overflow_hash _001753_hash = {
93038 + .next = NULL,
93039 + .name = "split",
93040 + .file = "drivers/xen/xenbus/xenbus_xs.c",
93041 + .param2 = 1,
93042 +};
93043 +
93044 +struct size_overflow_hash _001754_hash = {
93045 + .next = NULL,
93046 + .name = "storvsc_connect_to_vsp",
93047 + .file = "drivers/staging/hv/storvsc_drv.c",
93048 + .param2 = 1,
93049 +};
93050 +
93051 +struct size_overflow_hash _001755_hash = {
93052 + .next = NULL,
93053 + .name = "u32_array_read",
93054 + .file = "arch/x86/xen/debugfs.c",
93055 + .param3 = 1,
93056 +};
93057 +
93058 +struct size_overflow_hash _001756_hash = {
93059 + .next = NULL,
93060 + .name = "ValidateDSDParamsChecksum",
93061 + .file = "drivers/staging/bcm/led_control.c",
93062 + .param3 = 1,
93063 +};
93064 +
93065 +struct size_overflow_hash _001757_hash = {
93066 + .next = NULL,
93067 + .name = "vfd_write",
93068 + .file = "drivers/staging/media/lirc/lirc_sasem.c",
93069 + .param3 = 1,
93070 +};
93071 +
93072 +struct size_overflow_hash _001758_hash = {
93073 + .next = NULL,
93074 + .name = "vfd_write",
93075 + .file = "drivers/staging/media/lirc/lirc_imon.c",
93076 + .param3 = 1,
93077 +};
93078 +
93079 +struct size_overflow_hash _001759_hash = {
93080 + .next = NULL,
93081 + .name = "Wb35Reg_BurstWrite",
93082 + .file = "drivers/staging/winbond/wb35reg.c",
93083 + .param4 = 1,
93084 +};
93085 +
93086 +struct size_overflow_hash _001760_hash = {
93087 + .next = NULL,
93088 + .name = "xenbus_file_write",
93089 + .file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
93090 + .param3 = 1,
93091 +};
93092 +
93093 +struct size_overflow_hash _001761_hash = {
93094 + .next = NULL,
93095 + .name = "xsd_read",
93096 + .file = "drivers/xen/xenfs/xenstored.c",
93097 + .param3 = 1,
93098 +};
93099 +
93100 +struct size_overflow_hash _001762_hash = {
93101 + .next = NULL,
93102 + .name = "line6_dumpreq_init",
93103 + .file = "drivers/staging/line6/dumprequest.c",
93104 + .param3 = 1,
93105 +};
93106 +
93107 +struct size_overflow_hash _001763_hash = {
93108 + .next = NULL,
93109 + .name = "r8712_usbctrl_vendorreq",
93110 + .file = "drivers/staging/rtl8712/usb_ops_linux.c",
93111 + .param6 = 1,
93112 +};
93113 +
93114 +struct size_overflow_hash _001764_hash = {
93115 + .next = NULL,
93116 + .name = "r871x_set_wpa_ie",
93117 + .file = "drivers/staging/rtl8712/rtl871x_ioctl_linux.c",
93118 + .param3 = 1,
93119 +};
93120 +
93121 +struct size_overflow_hash _001765_hash = {
93122 + .next = NULL,
93123 + .name = "sep_prepare_input_dma_table",
93124 + .file = "drivers/staging/sep/sep_driver.c",
93125 + .param2 = 1,
93126 + .param3 = 1,
93127 +};
93128 +
93129 +struct size_overflow_hash _001767_hash = {
93130 + .next = NULL,
93131 + .name = "sep_prepare_input_output_dma_table",
93132 + .file = "drivers/staging/sep/sep_driver.c",
93133 + .param2 = 1,
93134 + .param4 = 1,
93135 + .param3 = 1,
93136 +};
93137 +
93138 +struct size_overflow_hash _001770_hash = {
93139 + .next = NULL,
93140 + .name = "vme_user_write",
93141 + .file = "drivers/staging/vme/devices/vme_user.c",
93142 + .param3 = 1,
93143 +};
93144 +
93145 +struct size_overflow_hash _001771_hash = {
93146 + .next = NULL,
93147 + .name = "alloc_ebda_hpc",
93148 + .file = "drivers/pci/hotplug/ibmphp_ebda.c",
93149 + .param1 = 1,
93150 + .param2 = 1,
93151 +};
93152 +
93153 +struct size_overflow_hash _001772_hash = {
93154 + .next = NULL,
93155 + .name = "alloc_apertures",
93156 + .file = "include/linux/fb.h",
93157 + .param1 = 1,
93158 +};
93159 +
93160 +struct size_overflow_hash _001773_hash = {
93161 + .next = NULL,
93162 + .name = "bin_uuid",
93163 + .file = "kernel/sysctl_binary.c",
93164 + .param3 = 1,
93165 +};
93166 +
93167 +struct size_overflow_hash _001774_hash = {
93168 + .next = &_000640_hash,
93169 + .name = "__copy_from_user_inatomic_nocache",
93170 + .file = "arch/x86/include/asm/uaccess_64.h",
93171 + .param3 = 1,
93172 +};
93173 +
93174 +struct size_overflow_hash _001775_hash = {
93175 + .next = NULL,
93176 + .name = "do_dmabuf_dirty_sou",
93177 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
93178 + .param7 = 1,
93179 +};
93180 +
93181 +struct size_overflow_hash _001776_hash = {
93182 + .next = NULL,
93183 + .name = "do_surface_dirty_sou",
93184 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
93185 + .param7 = 1,
93186 +};
93187 +
93188 +struct size_overflow_hash _001777_hash = {
93189 + .next = NULL,
93190 + .name = "drm_agp_bind_pages",
93191 + .file = "drivers/gpu/drm/drm_agpsupport.c",
93192 + .param3 = 1,
93193 +};
93194 +
93195 +struct size_overflow_hash _001778_hash = {
93196 + .next = NULL,
93197 + .name = "drm_calloc_large",
93198 + .file = "include/drm/drm_mem_util.h",
93199 + .param1 = 1,
93200 + .param2 = 1,
93201 +};
93202 +
93203 +struct size_overflow_hash _001780_hash = {
93204 + .next = NULL,
93205 + .name = "drm_ht_create",
93206 + .file = "drivers/gpu/drm/drm_hashtab.c",
93207 + .param2 = 1,
93208 +};
93209 +
93210 +struct size_overflow_hash _001781_hash = {
93211 + .next = NULL,
93212 + .name = "drm_malloc_ab",
93213 + .file = "include/drm/drm_mem_util.h",
93214 + .param1 = 1,
93215 + .param2 = 1,
93216 +};
93217 +
93218 +struct size_overflow_hash _001783_hash = {
93219 + .next = NULL,
93220 + .name = "drm_plane_init",
93221 + .file = "drivers/gpu/drm/drm_crtc.c",
93222 + .param6 = 1,
93223 +};
93224 +
93225 +struct size_overflow_hash _001784_hash = {
93226 + .next = NULL,
93227 + .name = "drm_vmalloc_dma",
93228 + .file = "drivers/gpu/drm/drm_scatter.c",
93229 + .param1 = 1,
93230 +};
93231 +
93232 +struct size_overflow_hash _001785_hash = {
93233 + .next = NULL,
93234 + .name = "fb_read",
93235 + .file = "drivers/video/fbmem.c",
93236 + .param3 = 1,
93237 +};
93238 +
93239 +struct size_overflow_hash _001786_hash = {
93240 + .next = NULL,
93241 + .name = "fb_write",
93242 + .file = "drivers/video/fbmem.c",
93243 + .param3 = 1,
93244 +};
93245 +
93246 +struct size_overflow_hash _001787_hash = {
93247 + .next = NULL,
93248 + .name = "framebuffer_alloc",
93249 + .file = "include/linux/fb.h",
93250 + .param1 = 1,
93251 +};
93252 +
93253 +struct size_overflow_hash _001788_hash = {
93254 + .next = NULL,
93255 + .name = "i915_cache_sharing_read",
93256 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
93257 + .param3 = 1,
93258 +};
93259 +
93260 +struct size_overflow_hash _001789_hash = {
93261 + .next = NULL,
93262 + .name = "i915_cache_sharing_write",
93263 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
93264 + .param3 = 1,
93265 +};
93266 +
93267 +struct size_overflow_hash _001790_hash = {
93268 + .next = NULL,
93269 + .name = "i915_max_freq_read",
93270 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
93271 + .param3 = 1,
93272 +};
93273 +
93274 +struct size_overflow_hash _001791_hash = {
93275 + .next = NULL,
93276 + .name = "i915_max_freq_write",
93277 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
93278 + .param3 = 1,
93279 +};
93280 +
93281 +struct size_overflow_hash _001792_hash = {
93282 + .next = NULL,
93283 + .name = "i915_wedged_read",
93284 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
93285 + .param3 = 1,
93286 +};
93287 +
93288 +struct size_overflow_hash _001793_hash = {
93289 + .next = NULL,
93290 + .name = "i915_wedged_write",
93291 + .file = "drivers/gpu/drm/i915/i915_debugfs.c",
93292 + .param3 = 1,
93293 +};
93294 +
93295 +struct size_overflow_hash _001794_hash = {
93296 + .next = NULL,
93297 + .name = "__module_alloc",
93298 + .file = "arch/x86/kernel/module.c",
93299 + .param1 = 1,
93300 +};
93301 +
93302 +struct size_overflow_hash _001795_hash = {
93303 + .next = NULL,
93304 + .name = "module_alloc_update_bounds_rw",
93305 + .file = "kernel/module.c",
93306 + .param1 = 1,
93307 +};
93308 +
93309 +struct size_overflow_hash _001796_hash = {
93310 + .next = NULL,
93311 + .name = "module_alloc_update_bounds_rx",
93312 + .file = "kernel/module.c",
93313 + .param1 = 1,
93314 +};
93315 +
93316 +struct size_overflow_hash _001797_hash = {
93317 + .next = NULL,
93318 + .name = "p9_client_read",
93319 + .file = "include/net/9p/client.h",
93320 + .param5 = 1,
93321 +};
93322 +
93323 +struct size_overflow_hash _001798_hash = {
93324 + .next = NULL,
93325 + .name = "probe_kernel_write",
93326 + .file = "include/linux/uaccess.h",
93327 + .param3 = 1,
93328 +};
93329 +
93330 +struct size_overflow_hash _001799_hash = {
93331 + .next = NULL,
93332 + .name = "sched_feat_write",
93333 + .file = "kernel/sched/core.c",
93334 + .param3 = 1,
93335 +};
93336 +
93337 +struct size_overflow_hash _001800_hash = {
93338 + .next = NULL,
93339 + .name = "tstats_write",
93340 + .file = "kernel/time/timer_stats.c",
93341 + .param3 = 1,
93342 +};
93343 +
93344 +struct size_overflow_hash _001801_hash = {
93345 + .next = NULL,
93346 + .name = "ttm_bo_fbdev_io",
93347 + .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
93348 + .param4 = 1,
93349 +};
93350 +
93351 +struct size_overflow_hash _001802_hash = {
93352 + .next = NULL,
93353 + .name = "ttm_bo_io",
93354 + .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
93355 + .param5 = 1,
93356 +};
93357 +
93358 +struct size_overflow_hash _001803_hash = {
93359 + .next = NULL,
93360 + .name = "ttm_dma_page_pool_free",
93361 + .file = "drivers/gpu/drm/ttm/ttm_page_alloc_dma.c",
93362 + .param2 = 1,
93363 +};
93364 +
93365 +struct size_overflow_hash _001804_hash = {
93366 + .next = NULL,
93367 + .name = "ttm_page_pool_free",
93368 + .file = "drivers/gpu/drm/ttm/ttm_page_alloc.c",
93369 + .param2 = 1,
93370 +};
93371 +
93372 +struct size_overflow_hash _001805_hash = {
93373 + .next = NULL,
93374 + .name = "vmw_execbuf_process",
93375 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c",
93376 + .param5 = 1,
93377 +};
93378 +
93379 +struct size_overflow_hash _001806_hash = {
93380 + .next = NULL,
93381 + .name = "vmw_fifo_reserve",
93382 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c",
93383 + .param2 = 1,
93384 +};
93385 +
93386 +struct size_overflow_hash _001807_hash = {
93387 + .next = NULL,
93388 + .name = "vmw_kms_present",
93389 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
93390 + .param9 = 1,
93391 +};
93392 +
93393 +struct size_overflow_hash _001808_hash = {
93394 + .next = NULL,
93395 + .name = "vmw_kms_readback",
93396 + .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
93397 + .param6 = 1,
93398 +};
93399 +
93400 +struct size_overflow_hash _001809_hash = {
93401 + .next = NULL,
93402 + .name = "acpi_system_write_alarm",
93403 + .file = "drivers/acpi/proc.c",
93404 + .param3 = 1,
93405 +};
93406 +
93407 +struct size_overflow_hash _001810_hash = {
93408 + .next = NULL,
93409 + .name = "kmalloc",
93410 + .file = "include/linux/slab_def.h",
93411 + .param1 = 1,
93412 +};
93413 +
93414 +struct size_overflow_hash _001811_hash = {
93415 + .next = NULL,
93416 + .name = "slabinfo_write",
93417 + .file = "mm/slab.c",
93418 + .param3 = 1,
93419 +};
93420 +
93421 +struct size_overflow_hash *size_overflow_hash[65536] = {
93422 + [56878] = &_000001_hash,
93423 + [11151] = &_000002_hash,
93424 + [17854] = &_000003_hash,
93425 + [4132] = &_000004_hash,
93426 + [39070] = &_000005_hash,
93427 + [35447] = &_000007_hash,
93428 + [47830] = &_000008_hash,
93429 + [65254] = &_000009_hash,
93430 + [17521] = &_000011_hash,
93431 + [41425] = &_000012_hash,
93432 + [5785] = &_000013_hash,
93433 + [19960] = &_000014_hash,
93434 + [26729] = &_000015_hash,
93435 + [7954] = &_000016_hash,
93436 + [22403] = &_000017_hash,
93437 + [23258] = &_000018_hash,
93438 + [55695] = &_000019_hash,
93439 + [38964] = &_000020_hash,
93440 + [64250] = &_000021_hash,
93441 + [31825] = &_000022_hash,
93442 + [47446] = &_000023_hash,
93443 + [61521] = &_000024_hash,
93444 + [64227] = &_000025_hash,
93445 + [53378] = &_000026_hash,
93446 + [8885] = &_000027_hash,
93447 + [62101] = &_000028_hash,
93448 + [18152] = &_000029_hash,
93449 + [37525] = &_000030_hash,
93450 + [25827] = &_000031_hash,
93451 + [1169] = &_000032_hash,
93452 + [11925] = &_000033_hash,
93453 + [20558] = &_000034_hash,
93454 + [44019] = &_000035_hash,
93455 + [21909] = &_000036_hash,
93456 + [63679] = &_000037_hash,
93457 + [39450] = &_000038_hash,
93458 + [25085] = &_000039_hash,
93459 + [17830] = &_000040_hash,
93460 + [14329] = &_000041_hash,
93461 + [31235] = &_000042_hash,
93462 + [48207] = &_000043_hash,
93463 + [34918] = &_000044_hash,
93464 + [46839] = &_000045_hash,
93465 + [57930] = &_000046_hash,
93466 + [41364] = &_000047_hash,
93467 + [17581] = &_000048_hash,
93468 + [45922] = &_000049_hash,
93469 + [49567] = &_000050_hash,
93470 + [18248] = &_000051_hash,
93471 + [25528] = &_000052_hash,
93472 + [61874] = &_000053_hash,
93473 + [22591] = &_000054_hash,
93474 + [48456] = &_000055_hash,
93475 + [8743] = &_000056_hash,
93476 + [39131] = &_000057_hash,
93477 + [48328] = &_000058_hash,
93478 + [47136] = &_000059_hash,
93479 + [6358] = &_000060_hash,
93480 + [12252] = &_000061_hash,
93481 + [49340] = &_000062_hash,
93482 + [45875] = &_000063_hash,
93483 + [52182] = &_000065_hash,
93484 + [31149] = &_000067_hash,
93485 + [20455] = &_000068_hash,
93486 + [19917] = &_000070_hash,
93487 + [64771] = &_000071_hash,
93488 + [25140] = &_000072_hash,
93489 + [34097] = &_000073_hash,
93490 + [58131] = &_000074_hash,
93491 + [65311] = &_000075_hash,
93492 + [60609] = &_000076_hash,
93493 + [1917] = &_000077_hash,
93494 + [15337] = &_000078_hash,
93495 + [4732] = &_000079_hash,
93496 + [38783] = &_000080_hash,
93497 + [37249] = &_000081_hash,
93498 + [9234] = &_000082_hash,
93499 + [33309] = &_000083_hash,
93500 + [22389] = &_000084_hash,
93501 + [56319] = &_000085_hash,
93502 + [21496] = &_000086_hash,
93503 + [8163] = &_000087_hash,
93504 + [58766] = &_000088_hash,
93505 + [21048] = &_000089_hash,
93506 + [51221] = &_000090_hash,
93507 + [21498] = &_000091_hash,
93508 + [42627] = &_000092_hash,
93509 + [53059] = &_000094_hash,
93510 + [52870] = &_000095_hash,
93511 + [1567] = &_000096_hash,
93512 + [38330] = &_000097_hash,
93513 + [30892] = &_000098_hash,
93514 + [16927] = &_000099_hash,
93515 + [16461] = &_000100_hash,
93516 + [5634] = &_000101_hash,
93517 + [16496] = &_000103_hash,
93518 + [40012] = &_000104_hash,
93519 + [46014] = &_000105_hash,
93520 + [39600] = &_000106_hash,
93521 + [7435] = &_000107_hash,
93522 + [13332] = &_000109_hash,
93523 + [36665] = &_000110_hash,
93524 + [12413] = &_000111_hash,
93525 + [27279] = &_000112_hash,
93526 + [44774] = &_000113_hash,
93527 + [14479] = &_000114_hash,
93528 + [32447] = &_000115_hash,
93529 + [15439] = &_000116_hash,
93530 + [17932] = &_000117_hash,
93531 + [26096] = &_000118_hash,
93532 + [50814] = &_000119_hash,
93533 + [22598] = &_000120_hash,
93534 + [48287] = &_000121_hash,
93535 + [15611] = &_000122_hash,
93536 + [13414] = &_000123_hash,
93537 + [40371] = &_000124_hash,
93538 + [284] = &_000125_hash,
93539 + [6293] = &_000127_hash,
93540 + [60587] = &_000128_hash,
93541 + [8181] = &_000129_hash,
93542 + [27451] = &_000130_hash,
93543 + [29259] = &_000131_hash,
93544 + [41172] = &_000132_hash,
93545 + [3315] = &_000133_hash,
93546 + [37550] = &_000134_hash,
93547 + [40395] = &_000135_hash,
93548 + [24124] = &_000136_hash,
93549 + [63535] = &_000137_hash,
93550 + [14981] = &_000138_hash,
93551 + [52008] = &_000139_hash,
93552 + [22091] = &_000140_hash,
93553 + [64800] = &_000141_hash,
93554 + [14919] = &_000142_hash,
93555 + [60340] = &_000143_hash,
93556 + [34205] = &_000145_hash,
93557 + [65246] = &_000146_hash,
93558 + [1299] = &_000147_hash,
93559 + [33165] = &_000148_hash,
93560 + [22394] = &_000149_hash,
93561 + [49562] = &_000150_hash,
93562 + [56881] = &_000151_hash,
93563 + [13870] = &_000152_hash,
93564 + [65074] = &_000153_hash,
93565 + [11553] = &_000154_hash,
93566 + [43222] = &_000155_hash,
93567 + [17984] = &_000156_hash,
93568 + [26811] = &_000157_hash,
93569 + [30848] = &_000158_hash,
93570 + [15627] = &_000159_hash,
93571 + [43101] = &_000160_hash,
93572 + [4082] = &_000161_hash,
93573 + [43692] = &_000162_hash,
93574 + [21622] = &_000163_hash,
93575 + [50734] = &_000164_hash,
93576 + [803] = &_000166_hash,
93577 + [64674] = &_000168_hash,
93578 + [57538] = &_000170_hash,
93579 + [42442] = &_000171_hash,
93580 + [23031] = &_000172_hash,
93581 + [40663] = &_000173_hash,
93582 + [51180] = &_000174_hash,
93583 + [24173] = &_000175_hash,
93584 + [9286] = &_000176_hash,
93585 + [49517] = &_000177_hash,
93586 + [34878] = &_000180_hash,
93587 + [22819] = &_000181_hash,
93588 + [64314] = &_000182_hash,
93589 + [20494] = &_000183_hash,
93590 + [9483] = &_000184_hash,
93591 + [26518] = &_000185_hash,
93592 + [44651] = &_000186_hash,
93593 + [1188] = &_000187_hash,
93594 + [36031] = &_000188_hash,
93595 + [33469] = &_000189_hash,
93596 + [19672] = &_000190_hash,
93597 + [3216] = &_000191_hash,
93598 + [25071] = &_000192_hash,
93599 + [11744] = &_000194_hash,
93600 + [2358] = &_000196_hash,
93601 + [10146] = &_000198_hash,
93602 + [58709] = &_000199_hash,
93603 + [64773] = &_000200_hash,
93604 + [6159] = &_000201_hash,
93605 + [28617] = &_000202_hash,
93606 + [61067] = &_000203_hash,
93607 + [12884] = &_000204_hash,
93608 + [37308] = &_000205_hash,
93609 + [59973] = &_000206_hash,
93610 + [35895] = &_000207_hash,
93611 + [24951] = &_000208_hash,
93612 + [3070] = &_000209_hash,
93613 + [61023] = &_000210_hash,
93614 + [45702] = &_000211_hash,
93615 + [5533] = &_000212_hash,
93616 + [29186] = &_000213_hash,
93617 + [26311] = &_000214_hash,
93618 + [40182] = &_000215_hash,
93619 + [50505] = &_000216_hash,
93620 + [59061] = &_000217_hash,
93621 + [27511] = &_000218_hash,
93622 + [63286] = &_000219_hash,
93623 + [6678] = &_000220_hash,
93624 + [23065] = &_000222_hash,
93625 + [18156] = &_000223_hash,
93626 + [53757] = &_000224_hash,
93627 + [53720] = &_000225_hash,
93628 + [50241] = &_000226_hash,
93629 + [22498] = &_000227_hash,
93630 + [10991] = &_000228_hash,
93631 + [40026] = &_000229_hash,
93632 + [19995] = &_000230_hash,
93633 + [30445] = &_000231_hash,
93634 + [57691] = &_000232_hash,
93635 + [23150] = &_000233_hash,
93636 + [9960] = &_000234_hash,
93637 + [8736] = &_000235_hash,
93638 + [23750] = &_000237_hash,
93639 + [18393] = &_000238_hash,
93640 + [28541] = &_000240_hash,
93641 + [59944] = &_000241_hash,
93642 + [35042] = &_000242_hash,
93643 + [63488] = &_000243_hash,
93644 + [27286] = &_000244_hash,
93645 + [46922] = &_000245_hash,
93646 + [11860] = &_000246_hash,
93647 + [52928] = &_000247_hash,
93648 + [46714] = &_000248_hash,
93649 + [57313] = &_000249_hash,
93650 + [61978] = &_000250_hash,
93651 + [61063] = &_000251_hash,
93652 + [22271] = &_000252_hash,
93653 + [4214] = &_000253_hash,
93654 + [46247] = &_000254_hash,
93655 + [33246] = &_000255_hash,
93656 + [58325] = &_000257_hash,
93657 + [47399] = &_000259_hash,
93658 + [34963] = &_000260_hash,
93659 + [21221] = &_000261_hash,
93660 + [32211] = &_000262_hash,
93661 + [20854] = &_000263_hash,
93662 + [49351] = &_000264_hash,
93663 + [52341] = &_000265_hash,
93664 + [53533] = &_000266_hash,
93665 + [52267] = &_000267_hash,
93666 + [46753] = &_000268_hash,
93667 + [2115] = &_000269_hash,
93668 + [44017] = &_000271_hash,
93669 + [13495] = &_000272_hash,
93670 + [12988] = &_000273_hash,
93671 + [55227] = &_000274_hash,
93672 + [47762] = &_000276_hash,
93673 + [17613] = &_000277_hash,
93674 + [52037] = &_000278_hash,
93675 + [5994] = &_000279_hash,
93676 + [46818] = &_000280_hash,
93677 + [13467] = &_000281_hash,
93678 + [61848] = &_000282_hash,
93679 + [43082] = &_000284_hash,
93680 + [55732] = &_000286_hash,
93681 + [2543] = &_000287_hash,
93682 + [51694] = &_000288_hash,
93683 + [18402] = &_000289_hash,
93684 + [38282] = &_000290_hash,
93685 + [5456] = &_000291_hash,
93686 + [58261] = &_000292_hash,
93687 + [24792] = &_000293_hash,
93688 + [6422] = &_000294_hash,
93689 + [63953] = &_000295_hash,
93690 + [27384] = &_000296_hash,
93691 + [47213] = &_000297_hash,
93692 + [23548] = &_000298_hash,
93693 + [47858] = &_000299_hash,
93694 + [52501] = &_000300_hash,
93695 + [12475] = &_000301_hash,
93696 + [52921] = &_000302_hash,
93697 + [19120] = &_000303_hash,
93698 + [14355] = &_000304_hash,
93699 + [30563] = &_000305_hash,
93700 + [14942] = &_000306_hash,
93701 + [30969] = &_000307_hash,
93702 + [57776] = &_000308_hash,
93703 + [21956] = &_000309_hash,
93704 + [44050] = &_000310_hash,
93705 + [2193] = &_000311_hash,
93706 + [44818] = &_000312_hash,
93707 + [50616] = &_000313_hash,
93708 + [49299] = &_000314_hash,
93709 + [2796] = &_000315_hash,
93710 + [4190] = &_000316_hash,
93711 + [11548] = &_000317_hash,
93712 + [53798] = &_000318_hash,
93713 + [60370] = &_000319_hash,
93714 + [35863] = &_000320_hash,
93715 + [54595] = &_000322_hash,
93716 + [2808] = &_000323_hash,
93717 + [24656] = &_000324_hash,
93718 + [895] = &_000325_hash,
93719 + [32809] = &_000326_hash,
93720 + [55621] = &_000327_hash,
93721 + [1733] = &_000328_hash,
93722 + [36069] = &_000330_hash,
93723 + [23714] = &_000331_hash,
93724 + [26020] = &_000332_hash,
93725 + [63875] = &_000333_hash,
93726 + [58608] = &_000334_hash,
93727 + [8919] = &_000335_hash,
93728 + [23906] = &_000336_hash,
93729 + [59497] = &_000337_hash,
93730 + [34782] = &_000338_hash,
93731 + [40998] = &_000339_hash,
93732 + [33328] = &_000340_hash,
93733 + [17866] = &_000341_hash,
93734 + [38741] = &_000342_hash,
93735 + [53939] = &_000343_hash,
93736 + [14658] = &_000344_hash,
93737 + [42465] = &_000345_hash,
93738 + [49600] = &_000346_hash,
93739 + [7391] = &_000347_hash,
93740 + [43616] = &_000348_hash,
93741 + [16775] = &_000349_hash,
93742 + [41393] = &_000350_hash,
93743 + [10532] = &_000351_hash,
93744 + [50366] = &_000352_hash,
93745 + [33324] = &_000353_hash,
93746 + [38200] = &_000354_hash,
93747 + [59315] = &_000355_hash,
93748 + [33916] = &_000356_hash,
93749 + [36593] = &_000357_hash,
93750 + [63079] = &_000358_hash,
93751 + [379] = &_000359_hash,
93752 + [34248] = &_000360_hash,
93753 + [27251] = &_000361_hash,
93754 + [29460] = &_000362_hash,
93755 + [7461] = &_000363_hash,
93756 + [9870] = &_000364_hash,
93757 + [44596] = &_000365_hash,
93758 + [45157] = &_000366_hash,
93759 + [55069] = &_000367_hash,
93760 + [29452] = &_000368_hash,
93761 + [54888] = &_000369_hash,
93762 + [31885] = &_000370_hash,
93763 + [20206] = &_000371_hash,
93764 + [59852] = &_000372_hash,
93765 + [20325] = &_000373_hash,
93766 + [18488] = &_000374_hash,
93767 + [22017] = &_000375_hash,
93768 + [57485] = &_000376_hash,
93769 + [49827] = &_000377_hash,
93770 + [37770] = &_000379_hash,
93771 + [52668] = &_000380_hash,
93772 + [13724] = &_000381_hash,
93773 + [59701] = &_000382_hash,
93774 + [11954] = &_000383_hash,
93775 + [9890] = &_000384_hash,
93776 + [17684] = &_000385_hash,
93777 + [18158] = &_000386_hash,
93778 + [61318] = &_000387_hash,
93779 + [2760] = &_000388_hash,
93780 + [38444] = &_000390_hash,
93781 + [55856] = &_000392_hash,
93782 + [34762] = &_000393_hash,
93783 + [48360] = &_000394_hash,
93784 + [40885] = &_000395_hash,
93785 + [36032] = &_000396_hash,
93786 + [52057] = &_000397_hash,
93787 + [12463] = &_000398_hash,
93788 + [30616] = &_000399_hash,
93789 + [38680] = &_000400_hash,
93790 + [41742] = &_000401_hash,
93791 + [50662] = &_000402_hash,
93792 + [48440] = &_000403_hash,
93793 + [34418] = &_000404_hash,
93794 + [64275] = &_000405_hash,
93795 + [12231] = &_000406_hash,
93796 + [53530] = &_000407_hash,
93797 + [54723] = &_000408_hash,
93798 + [19490] = &_000409_hash,
93799 + [11595] = &_000410_hash,
93800 + [15277] = &_000411_hash,
93801 + [4811] = &_000412_hash,
93802 + [42017] = &_000413_hash,
93803 + [17238] = &_000414_hash,
93804 + [55439] = &_000415_hash,
93805 + [45794] = &_000416_hash,
93806 + [60027] = &_000417_hash,
93807 + [3750] = &_000418_hash,
93808 + [11091] = &_000419_hash,
93809 + [32935] = &_000420_hash,
93810 + [22809] = &_000422_hash,
93811 + [60193] = &_000423_hash,
93812 + [14396] = &_000424_hash,
93813 + [18101] = &_000425_hash,
93814 + [46395] = &_000426_hash,
93815 + [24339] = &_000427_hash,
93816 + [26065] = &_000428_hash,
93817 + [43016] = &_000429_hash,
93818 + [41996] = &_000430_hash,
93819 + [7371] = &_000431_hash,
93820 + [32968] = &_000432_hash,
93821 + [53082] = &_000433_hash,
93822 + [38798] = &_000434_hash,
93823 + [12726] = &_000435_hash,
93824 + [55018] = &_000436_hash,
93825 + [26114] = &_000437_hash,
93826 + [31697] = &_000438_hash,
93827 + [21401] = &_000441_hash,
93828 + [33193] = &_000442_hash,
93829 + [52271] = &_000443_hash,
93830 + [20847] = &_000444_hash,
93831 + [30754] = &_000445_hash,
93832 + [54440] = &_000446_hash,
93833 + [22059] = &_000447_hash,
93834 + [47566] = &_000448_hash,
93835 + [22926] = &_000449_hash,
93836 + [20788] = &_000450_hash,
93837 + [18162] = &_000451_hash,
93838 + [65006] = &_000452_hash,
93839 + [11523] = &_000453_hash,
93840 + [29207] = &_000454_hash,
93841 + [18071] = &_000455_hash,
93842 + [7601] = &_000456_hash,
93843 + [12773] = &_000457_hash,
93844 + [61543] = &_000458_hash,
93845 + [5578] = &_000460_hash,
93846 + [49050] = &_000461_hash,
93847 + [51965] = &_000462_hash,
93848 + [6807] = &_000463_hash,
93849 + [22982] = &_000464_hash,
93850 + [36769] = &_000465_hash,
93851 + [53892] = &_000466_hash,
93852 + [2547] = &_000467_hash,
93853 + [53678] = &_000468_hash,
93854 + [61439] = &_000469_hash,
93855 + [31287] = &_000470_hash,
93856 + [6125] = &_000471_hash,
93857 + [57511] = &_000472_hash,
93858 + [13001] = &_000473_hash,
93859 + [62932] = &_000474_hash,
93860 + [62284] = &_000475_hash,
93861 + [9472] = &_000476_hash,
93862 + [26260] = &_000477_hash,
93863 + [63065] = &_000478_hash,
93864 + [18949] = &_000479_hash,
93865 + [29891] = &_000481_hash,
93866 + [41916] = &_000482_hash,
93867 + [40474] = &_000483_hash,
93868 + [63551] = &_000484_hash,
93869 + [36557] = &_000485_hash,
93870 + [2994] = &_000486_hash,
93871 + [5521] = &_000487_hash,
93872 + [51016] = &_000488_hash,
93873 + [7644] = &_000489_hash,
93874 + [55103] = &_000490_hash,
93875 + [11488] = &_000491_hash,
93876 + [7184] = &_000492_hash,
93877 + [36934] = &_000493_hash,
93878 + [54855] = &_000494_hash,
93879 + [63193] = &_000495_hash,
93880 + [12369] = &_000496_hash,
93881 + [15828] = &_000497_hash,
93882 + [61322] = &_000498_hash,
93883 + [5412] = &_000499_hash,
93884 + [28089] = &_000500_hash,
93885 + [64306] = &_000502_hash,
93886 + [24071] = &_000503_hash,
93887 + [50308] = &_000504_hash,
93888 + [38790] = &_000505_hash,
93889 + [9838] = &_000506_hash,
93890 + [18983] = &_000507_hash,
93891 + [9656] = &_000508_hash,
93892 + [18950] = &_000509_hash,
93893 + [59749] = &_000510_hash,
93894 + [20465] = &_000511_hash,
93895 + [4765] = &_000512_hash,
93896 + [16169] = &_000513_hash,
93897 + [6930] = &_000514_hash,
93898 + [16926] = &_000515_hash,
93899 + [35218] = &_000516_hash,
93900 + [19956] = &_000517_hash,
93901 + [55255] = &_000518_hash,
93902 + [861] = &_000519_hash,
93903 + [26574] = &_000520_hash,
93904 + [26794] = &_000521_hash,
93905 + [2133] = &_000522_hash,
93906 + [44616] = &_000523_hash,
93907 + [12840] = &_000524_hash,
93908 + [60426] = &_000525_hash,
93909 + [18133] = &_000526_hash,
93910 + [30479] = &_000527_hash,
93911 + [3219] = &_000528_hash,
93912 + [36488] = &_000529_hash,
93913 + [62043] = &_000530_hash,
93914 + [21714] = &_000532_hash,
93915 + [48007] = &_000533_hash,
93916 + [49969] = &_000534_hash,
93917 + [7701] = &_000535_hash,
93918 + [11521] = &_000536_hash,
93919 + [4269] = &_000537_hash,
93920 + [37627] = &_000539_hash,
93921 + [33555] = &_000540_hash,
93922 + [25900] = &_000541_hash,
93923 + [31709] = &_000542_hash,
93924 + [44626] = &_000544_hash,
93925 + [1679] = &_000545_hash,
93926 + [18349] = &_000546_hash,
93927 + [15338] = &_000547_hash,
93928 + [57935] = &_000548_hash,
93929 + [55850] = &_000549_hash,
93930 + [36063] = &_000550_hash,
93931 + [56674] = &_000551_hash,
93932 + [21379] = &_000552_hash,
93933 + [18507] = &_000553_hash,
93934 + [55719] = &_000554_hash,
93935 + [31210] = &_000555_hash,
93936 + [36207] = &_000556_hash,
93937 + [64180] = &_000557_hash,
93938 + [41770] = &_000558_hash,
93939 + [11600] = &_000559_hash,
93940 + [36638] = &_000560_hash,
93941 + [25576] = &_000561_hash,
93942 + [7000] = &_000562_hash,
93943 + [34187] = &_000563_hash,
93944 + [58533] = &_000564_hash,
93945 + [5083] = &_000565_hash,
93946 + [62614] = &_000566_hash,
93947 + [20085] = &_000567_hash,
93948 + [1135] = &_000568_hash,
93949 + [25613] = &_000569_hash,
93950 + [9541] = &_000570_hash,
93951 + [30577] = &_000571_hash,
93952 + [35722] = &_000572_hash,
93953 + [60407] = &_000573_hash,
93954 + [29465] = &_000574_hash,
93955 + [46891] = &_000575_hash,
93956 + [43633] = &_000576_hash,
93957 + [53743] = &_000577_hash,
93958 + [16196] = &_000578_hash,
93959 + [34425] = &_000580_hash,
93960 + [9646] = &_000581_hash,
93961 + [59756] = &_000583_hash,
93962 + [45524] = &_000584_hash,
93963 + [36702] = &_000585_hash,
93964 + [36747] = &_000586_hash,
93965 + [33643] = &_000588_hash,
93966 + [29158] = &_000589_hash,
93967 + [49662] = &_000590_hash,
93968 + [51062] = &_000591_hash,
93969 + [64755] = &_000592_hash,
93970 + [4829] = &_000594_hash,
93971 + [16413] = &_000595_hash,
93972 + [36125] = &_000596_hash,
93973 + [36293] = &_000597_hash,
93974 + [39712] = &_000598_hash,
93975 + [32160] = &_000599_hash,
93976 + [22962] = &_000600_hash,
93977 + [32001] = &_000601_hash,
93978 + [35828] = &_000602_hash,
93979 + [3106] = &_000603_hash,
93980 + [34039] = &_000604_hash,
93981 + [22393] = &_000605_hash,
93982 + [3560] = &_000606_hash,
93983 + [28195] = &_000607_hash,
93984 + [2062] = &_000608_hash,
93985 + [64001] = &_000609_hash,
93986 + [42407] = &_000610_hash,
93987 + [6253] = &_000611_hash,
93988 + [58640] = &_000612_hash,
93989 + [32195] = &_000613_hash,
93990 + [26197] = &_000614_hash,
93991 + [58003] = &_000615_hash,
93992 + [21662] = &_000616_hash,
93993 + [45750] = &_000617_hash,
93994 + [25798] = &_000618_hash,
93995 + [41052] = &_000619_hash,
93996 + [14096] = &_000620_hash,
93997 + [1439] = &_000621_hash,
93998 + [29074] = &_000622_hash,
93999 + [2376] = &_000623_hash,
94000 + [24068] = &_000625_hash,
94001 + [59519] = &_000627_hash,
94002 + [9893] = &_000628_hash,
94003 + [39979] = &_000630_hash,
94004 + [41540] = &_000631_hash,
94005 + [43200] = &_000633_hash,
94006 + [33494] = &_000634_hash,
94007 + [2028] = &_000635_hash,
94008 + [27206] = &_000636_hash,
94009 + [24302] = &_000637_hash,
94010 + [38112] = &_000638_hash,
94011 + [46538] = &_000639_hash,
94012 + [35228] = &_000641_hash,
94013 + [8339] = &_000642_hash,
94014 + [45349] = &_000643_hash,
94015 + [48404] = &_000644_hash,
94016 + [37865] = &_000645_hash,
94017 + [45763] = &_000646_hash,
94018 + [62347] = &_000647_hash,
94019 + [21644] = &_000648_hash,
94020 + [53135] = &_000649_hash,
94021 + [25095] = &_000650_hash,
94022 + [11697] = &_000651_hash,
94023 + [27003] = &_000652_hash,
94024 + [32464] = &_000653_hash,
94025 + [65339] = &_000654_hash,
94026 + [44248] = &_000655_hash,
94027 + [16] = &_000656_hash,
94028 + [29933] = &_000657_hash,
94029 + [34359] = &_000658_hash,
94030 + [3154] = &_000659_hash,
94031 + [59308] = &_000660_hash,
94032 + [61661] = &_000661_hash,
94033 + [23959] = &_000662_hash,
94034 + [6724] = &_000663_hash,
94035 + [54587] = &_000664_hash,
94036 + [28479] = &_000665_hash,
94037 + [56583] = &_000666_hash,
94038 + [64644] = &_000667_hash,
94039 + [23284] = &_000668_hash,
94040 + [61655] = &_000669_hash,
94041 + [20980] = &_000670_hash,
94042 + [19794] = &_000671_hash,
94043 + [30036] = &_000672_hash,
94044 + [25649] = &_000673_hash,
94045 + [47428] = &_000674_hash,
94046 + [47737] = &_000675_hash,
94047 + [8367] = &_000676_hash,
94048 + [2987] = &_000677_hash,
94049 + [50962] = &_000678_hash,
94050 + [10760] = &_000679_hash,
94051 + [31678] = &_000680_hash,
94052 + [48558] = &_000681_hash,
94053 + [2274] = &_000682_hash,
94054 + [831] = &_000683_hash,
94055 + [61833] = &_000684_hash,
94056 + [56864] = &_000685_hash,
94057 + [31040] = &_000686_hash,
94058 + [22196] = &_000687_hash,
94059 + [20076] = &_000688_hash,
94060 + [52821] = &_000689_hash,
94061 + [21896] = &_000690_hash,
94062 + [49367] = &_000691_hash,
94063 + [64731] = &_000692_hash,
94064 + [37110] = &_000693_hash,
94065 + [53694] = &_000694_hash,
94066 + [6175] = &_000695_hash,
94067 + [33048] = &_000696_hash,
94068 + [34746] = &_000697_hash,
94069 + [23777] = &_000698_hash,
94070 + [53828] = &_000699_hash,
94071 + [26539] = &_000700_hash,
94072 + [42628] = &_000701_hash,
94073 + [59115] = &_000702_hash,
94074 + [4456] = &_000703_hash,
94075 + [63619] = &_000704_hash,
94076 + [47329] = &_000705_hash,
94077 + [13534] = &_000706_hash,
94078 + [36955] = &_000707_hash,
94079 + [9841] = &_000708_hash,
94080 + [19308] = &_000709_hash,
94081 + [52439] = &_000710_hash,
94082 + [24680] = &_000711_hash,
94083 + [55652] = &_000712_hash,
94084 + [7842] = &_000713_hash,
94085 + [6500] = &_000714_hash,
94086 + [33485] = &_000715_hash,
94087 + [49920] = &_000716_hash,
94088 + [50750] = &_000717_hash,
94089 + [22318] = &_000718_hash,
94090 + [44599] = &_000719_hash,
94091 + [46403] = &_000720_hash,
94092 + [44534] = &_000721_hash,
94093 + [303] = &_000722_hash,
94094 + [22960] = &_000723_hash,
94095 + [10544] = &_000724_hash,
94096 + [8236] = &_000725_hash,
94097 + [21239] = &_000726_hash,
94098 + [24712] = &_000727_hash,
94099 + [37974] = &_000728_hash,
94100 + [62082] = &_000729_hash,
94101 + [57054] = &_000730_hash,
94102 + [53265] = &_000731_hash,
94103 + [52239] = &_000732_hash,
94104 + [14753] = &_000733_hash,
94105 + [60221] = &_000736_hash,
94106 + [27142] = &_000737_hash,
94107 + [14295] = &_000738_hash,
94108 + [25923] = &_000739_hash,
94109 + [29213] = &_000740_hash,
94110 + [31865] = &_000741_hash,
94111 + [4764] = &_000742_hash,
94112 + [10574] = &_000743_hash,
94113 + [55766] = &_000744_hash,
94114 + [22483] = &_000745_hash,
94115 + [61047] = &_000746_hash,
94116 + [41044] = &_000747_hash,
94117 + [58978] = &_000748_hash,
94118 + [47578] = &_000749_hash,
94119 + [7730] = &_000750_hash,
94120 + [15904] = &_000751_hash,
94121 + [25081] = &_000752_hash,
94122 + [45743] = &_000753_hash,
94123 + [58830] = &_000754_hash,
94124 + [59081] = &_000755_hash,
94125 + [47533] = &_000756_hash,
94126 + [11305] = &_000757_hash,
94127 + [29096] = &_000758_hash,
94128 + [19749] = &_000759_hash,
94129 + [56290] = &_000760_hash,
94130 + [44963] = &_000761_hash,
94131 + [30026] = &_000762_hash,
94132 + [27694] = &_000763_hash,
94133 + [8089] = &_000764_hash,
94134 + [38583] = &_000765_hash,
94135 + [1144] = &_000766_hash,
94136 + [20939] = &_000767_hash,
94137 + [22231] = &_000768_hash,
94138 + [17486] = &_000769_hash,
94139 + [51811] = &_000770_hash,
94140 + [62746] = &_000771_hash,
94141 + [19181] = &_000772_hash,
94142 + [52661] = &_000773_hash,
94143 + [51148] = &_000774_hash,
94144 + [49864] = &_000775_hash,
94145 + [37978] = &_000776_hash,
94146 + [6280] = &_000777_hash,
94147 + [12961] = &_000778_hash,
94148 + [60541] = &_000779_hash,
94149 + [37021] = &_000780_hash,
94150 + [26028] = &_000781_hash,
94151 + [41363] = &_000782_hash,
94152 + [42016] = &_000783_hash,
94153 + [58540] = &_000784_hash,
94154 + [2326] = &_000785_hash,
94155 + [60981] = &_000786_hash,
94156 + [13313] = &_000787_hash,
94157 + [44188] = &_000788_hash,
94158 + [34638] = &_000789_hash,
94159 + [20304] = &_000790_hash,
94160 + [60975] = &_000791_hash,
94161 + [12244] = &_000792_hash,
94162 + [16266] = &_000793_hash,
94163 + [3395] = &_000794_hash,
94164 + [63321] = &_000795_hash,
94165 + [20509] = &_000796_hash,
94166 + [57365] = &_000797_hash,
94167 + [47449] = &_000798_hash,
94168 + [56693] = &_000799_hash,
94169 + [33936] = &_000800_hash,
94170 + [52548] = &_000801_hash,
94171 + [18733] = &_000802_hash,
94172 + [15560] = &_000803_hash,
94173 + [13231] = &_000804_hash,
94174 + [64518] = &_000806_hash,
94175 + [54551] = &_000807_hash,
94176 + [54359] = &_000809_hash,
94177 + [46503] = &_000810_hash,
94178 + [22258] = &_000811_hash,
94179 + [39434] = &_000812_hash,
94180 + [52887] = &_000813_hash,
94181 + [3079] = &_000814_hash,
94182 + [18813] = &_000816_hash,
94183 + [47614] = &_000817_hash,
94184 + [38186] = &_000818_hash,
94185 + [57652] = &_000819_hash,
94186 + [10078] = &_000820_hash,
94187 + [17910] = &_000821_hash,
94188 + [13567] = &_000822_hash,
94189 + [21531] = &_000823_hash,
94190 + [46135] = &_000824_hash,
94191 + [10582] = &_000825_hash,
94192 + [4662] = &_000826_hash,
94193 + [17969] = &_000827_hash,
94194 + [43943] = &_000828_hash,
94195 + [46740] = &_000829_hash,
94196 + [26716] = &_000830_hash,
94197 + [58230] = &_000831_hash,
94198 + [252] = &_000832_hash,
94199 + [15704] = &_000833_hash,
94200 + [59765] = &_000834_hash,
94201 + [7322] = &_000835_hash,
94202 + [43950] = &_000836_hash,
94203 + [53093] = &_000837_hash,
94204 + [21646] = &_000838_hash,
94205 + [57063] = &_000839_hash,
94206 + [17132] = &_000840_hash,
94207 + [53922] = &_000842_hash,
94208 + [49155] = &_000843_hash,
94209 + [16356] = &_000844_hash,
94210 + [60037] = &_000845_hash,
94211 + [17299] = &_000846_hash,
94212 + [25678] = &_000847_hash,
94213 + [15494] = &_000848_hash,
94214 + [15159] = &_000849_hash,
94215 + [28442] = &_000850_hash,
94216 + [3514] = &_000851_hash,
94217 + [38151] = &_000852_hash,
94218 + [4173] = &_000853_hash,
94219 + [7258] = &_000854_hash,
94220 + [65109] = &_000855_hash,
94221 + [58827] = &_000856_hash,
94222 + [33575] = &_000857_hash,
94223 + [33078] = &_000858_hash,
94224 + [47234] = &_000859_hash,
94225 + [39193] = &_000860_hash,
94226 + [10950] = &_000861_hash,
94227 + [15613] = &_000862_hash,
94228 + [16046] = &_000863_hash,
94229 + [50172] = &_000864_hash,
94230 + [26107] = &_000865_hash,
94231 + [60543] = &_000866_hash,
94232 + [56337] = &_000867_hash,
94233 + [47626] = &_000868_hash,
94234 + [24409] = &_000869_hash,
94235 + [11732] = &_000870_hash,
94236 + [30010] = &_000871_hash,
94237 + [51480] = &_000872_hash,
94238 + [28518] = &_000873_hash,
94239 + [2061] = &_000874_hash,
94240 + [10885] = &_000875_hash,
94241 + [29517] = &_000876_hash,
94242 + [45913] = &_000877_hash,
94243 + [51774] = &_000878_hash,
94244 + [62298] = &_000879_hash,
94245 + [8892] = &_000880_hash,
94246 + [64891] = &_000881_hash,
94247 + [64537] = &_000882_hash,
94248 + [38103] = &_000883_hash,
94249 + [55518] = &_000884_hash,
94250 + [27419] = &_000885_hash,
94251 + [13869] = &_000886_hash,
94252 + [53150] = &_000887_hash,
94253 + [2884] = &_000888_hash,
94254 + [10362] = &_000889_hash,
94255 + [6961] = &_000890_hash,
94256 + [56975] = &_000891_hash,
94257 + [12508] = &_000892_hash,
94258 + [54597] = &_000893_hash,
94259 + [60499] = &_000894_hash,
94260 + [50109] = &_000895_hash,
94261 + [944] = &_000896_hash,
94262 + [29229] = &_000897_hash,
94263 + [37648] = &_000898_hash,
94264 + [1568] = &_000899_hash,
94265 + [61793] = &_000900_hash,
94266 + [53395] = &_000901_hash,
94267 + [5519] = &_000902_hash,
94268 + [28637] = &_000903_hash,
94269 + [53687] = &_000904_hash,
94270 + [6783] = &_000905_hash,
94271 + [43312] = &_000906_hash,
94272 + [2373] = &_000907_hash,
94273 + [33482] = &_000908_hash,
94274 + [24886] = &_000909_hash,
94275 + [48154] = &_000910_hash,
94276 + [12838] = &_000911_hash,
94277 + [47012] = &_000912_hash,
94278 + [23691] = &_000913_hash,
94279 + [37924] = &_000914_hash,
94280 + [47346] = &_000915_hash,
94281 + [5624] = &_000916_hash,
94282 + [16842] = &_000918_hash,
94283 + [60399] = &_000919_hash,
94284 + [2312] = &_000920_hash,
94285 + [59212] = &_000921_hash,
94286 + [11923] = &_000922_hash,
94287 + [10805] = &_000923_hash,
94288 + [36577] = &_000924_hash,
94289 + [60948] = &_000925_hash,
94290 + [21711] = &_000926_hash,
94291 + [54830] = &_000927_hash,
94292 + [1822] = &_000928_hash,
94293 + [44573] = &_000929_hash,
94294 + [23805] = &_000930_hash,
94295 + [46061] = &_000931_hash,
94296 + [33996] = &_000932_hash,
94297 + [40856] = &_000933_hash,
94298 + [16299] = &_000934_hash,
94299 + [63446] = &_000935_hash,
94300 + [31205] = &_000936_hash,
94301 + [33100] = &_000937_hash,
94302 + [40843] = &_000938_hash,
94303 + [23712] = &_000939_hash,
94304 + [36962] = &_000940_hash,
94305 + [9845] = &_000942_hash,
94306 + [13738] = &_000943_hash,
94307 + [58099] = &_000944_hash,
94308 + [31869] = &_000945_hash,
94309 + [63501] = &_000946_hash,
94310 + [58188] = &_000947_hash,
94311 + [51338] = &_000948_hash,
94312 + [54999] = &_000949_hash,
94313 + [2434] = &_000950_hash,
94314 + [34958] = &_000951_hash,
94315 + [41487] = &_000952_hash,
94316 + [11941] = &_000953_hash,
94317 + [56728] = &_000954_hash,
94318 + [48150] = &_000955_hash,
94319 + [13905] = &_000956_hash,
94320 + [9054] = &_000957_hash,
94321 + [10758] = &_000958_hash,
94322 + [48056] = &_000959_hash,
94323 + [24231] = &_000960_hash,
94324 + [43748] = &_000961_hash,
94325 + [24237] = &_000962_hash,
94326 + [14899] = &_000963_hash,
94327 + [38652] = &_000964_hash,
94328 + [65013] = &_000965_hash,
94329 + [16645] = &_000967_hash,
94330 + [55031] = &_000968_hash,
94331 + [23978] = &_000969_hash,
94332 + [24208] = &_000970_hash,
94333 + [18098] = &_000971_hash,
94334 + [2303] = &_000972_hash,
94335 + [3338] = &_000973_hash,
94336 + [39219] = &_000974_hash,
94337 + [18609] = &_000976_hash,
94338 + [64412] = &_000977_hash,
94339 + [16962] = &_000978_hash,
94340 + [26346] = &_000979_hash,
94341 + [39380] = &_000980_hash,
94342 + [33020] = &_000981_hash,
94343 + [22639] = &_000982_hash,
94344 + [6453] = &_000983_hash,
94345 + [58602] = &_000984_hash,
94346 + [50920] = &_000985_hash,
94347 + [56471] = &_000987_hash,
94348 + [15378] = &_000988_hash,
94349 + [3589] = &_000989_hash,
94350 + [12558] = &_000990_hash,
94351 + [3201] = &_000991_hash,
94352 + [28175] = &_000993_hash,
94353 + [43888] = &_000995_hash,
94354 + [56010] = &_000996_hash,
94355 + [32456] = &_000997_hash,
94356 + [29036] = &_000998_hash,
94357 + [32330] = &_000999_hash,
94358 + [25603] = &_001000_hash,
94359 + [17675] = &_001001_hash,
94360 + [36271] = &_001002_hash,
94361 + [49814] = &_001003_hash,
94362 + [5693] = &_001004_hash,
94363 + [51009] = &_001005_hash,
94364 + [62835] = &_001006_hash,
94365 + [27139] = &_001007_hash,
94366 + [45155] = &_001008_hash,
94367 + [17186] = &_001009_hash,
94368 + [46734] = &_001010_hash,
94369 + [61957] = &_001011_hash,
94370 + [51389] = &_001012_hash,
94371 + [23687] = &_001013_hash,
94372 + [46825] = &_001014_hash,
94373 + [52287] = &_001016_hash,
94374 + [31408] = &_001017_hash,
94375 + [5396] = &_001018_hash,
94376 + [62247] = &_001019_hash,
94377 + [7946] = &_001020_hash,
94378 + [58210] = &_001022_hash,
94379 + [15618] = &_001023_hash,
94380 + [61225] = &_001024_hash,
94381 + [13163] = &_001025_hash,
94382 + [36882] = &_001026_hash,
94383 + [8719] = &_001027_hash,
94384 + [8539] = &_001028_hash,
94385 + [27134] = &_001029_hash,
94386 + [53335] = &_001030_hash,
94387 + [30381] = &_001031_hash,
94388 + [32336] = &_001032_hash,
94389 + [32867] = &_001033_hash,
94390 + [1238] = &_001034_hash,
94391 + [8174] = &_001035_hash,
94392 + [6368] = &_001036_hash,
94393 + [29170] = &_001037_hash,
94394 + [9687] = &_001038_hash,
94395 + [61116] = &_001039_hash,
94396 + [31681] = &_001040_hash,
94397 + [22119] = &_001041_hash,
94398 + [59885] = &_001042_hash,
94399 + [47789] = &_001043_hash,
94400 + [5796] = &_001044_hash,
94401 + [43376] = &_001045_hash,
94402 + [36706] = &_001046_hash,
94403 + [47945] = &_001047_hash,
94404 + [33208] = &_001048_hash,
94405 + [55431] = &_001049_hash,
94406 + [25291] = &_001050_hash,
94407 + [58805] = &_001051_hash,
94408 + [23708] = &_001052_hash,
94409 + [29278] = &_001053_hash,
94410 + [1272] = &_001054_hash,
94411 + [10199] = &_001055_hash,
94412 + [34666] = &_001056_hash,
94413 + [49317] = &_001057_hash,
94414 + [18604] = &_001058_hash,
94415 + [42545] = &_001059_hash,
94416 + [33157] = &_001060_hash,
94417 + [53343] = &_001061_hash,
94418 + [64842] = &_001062_hash,
94419 + [61865] = &_001063_hash,
94420 + [54010] = &_001064_hash,
94421 + [64638] = &_001065_hash,
94422 + [20480] = &_001066_hash,
94423 + [23341] = &_001067_hash,
94424 + [10350] = &_001068_hash,
94425 + [30970] = &_001069_hash,
94426 + [62360] = &_001070_hash,
94427 + [52537] = &_001071_hash,
94428 + [51386] = &_001072_hash,
94429 + [48731] = &_001073_hash,
94430 + [58061] = &_001074_hash,
94431 + [40405] = &_001075_hash,
94432 + [57198] = &_001076_hash,
94433 + [19290] = &_001077_hash,
94434 + [60403] = &_001078_hash,
94435 + [2738] = &_001079_hash,
94436 + [59721] = &_001080_hash,
94437 + [24980] = &_001081_hash,
94438 + [55896] = &_001082_hash,
94439 + [57055] = &_001083_hash,
94440 + [46010] = &_001084_hash,
94441 + [712] = &_001085_hash,
94442 + [37747] = &_001086_hash,
94443 + [59996] = &_001087_hash,
94444 + [45219] = &_001088_hash,
94445 + [16917] = &_001089_hash,
94446 + [7415] = &_001090_hash,
94447 + [29576] = &_001091_hash,
94448 + [13584] = &_001092_hash,
94449 + [53364] = &_001093_hash,
94450 + [14813] = &_001094_hash,
94451 + [25543] = &_001095_hash,
94452 + [29240] = &_001096_hash,
94453 + [38748] = &_001097_hash,
94454 + [42270] = &_001098_hash,
94455 + [34848] = &_001099_hash,
94456 + [46226] = &_001100_hash,
94457 + [55526] = &_001101_hash,
94458 + [48271] = &_001102_hash,
94459 + [24658] = &_001104_hash,
94460 + [46964] = &_001105_hash,
94461 + [2637] = &_001106_hash,
94462 + [55601] = &_001107_hash,
94463 + [60275] = &_001108_hash,
94464 + [52645] = &_001109_hash,
94465 + [11712] = &_001110_hash,
94466 + [51364] = &_001111_hash,
94467 + [5106] = &_001112_hash,
94468 + [24710] = &_001113_hash,
94469 + [13101] = &_001114_hash,
94470 + [46963] = &_001115_hash,
94471 + [6779] = &_001116_hash,
94472 + [9237] = &_001117_hash,
94473 + [61524] = &_001118_hash,
94474 + [38247] = &_001119_hash,
94475 + [48715] = &_001120_hash,
94476 + [40797] = &_001121_hash,
94477 + [46780] = &_001122_hash,
94478 + [22071] = &_001123_hash,
94479 + [49735] = &_001125_hash,
94480 + [63925] = &_001126_hash,
94481 + [30902] = &_001127_hash,
94482 + [39828] = &_001128_hash,
94483 + [53089] = &_001129_hash,
94484 + [6394] = &_001130_hash,
94485 + [5116] = &_001131_hash,
94486 + [50702] = &_001132_hash,
94487 + [59565] = &_001133_hash,
94488 + [61042] = &_001134_hash,
94489 + [14533] = &_001135_hash,
94490 + [23807] = &_001136_hash,
94491 + [24296] = &_001137_hash,
94492 + [8808] = &_001138_hash,
94493 + [52383] = &_001139_hash,
94494 + [30487] = &_001140_hash,
94495 + [30125] = &_001141_hash,
94496 + [40665] = &_001142_hash,
94497 + [60809] = &_001143_hash,
94498 + [4842] = &_001144_hash,
94499 + [13955] = &_001145_hash,
94500 + [33237] = &_001146_hash,
94501 + [40673] = &_001147_hash,
94502 + [48026] = &_001148_hash,
94503 + [64033] = &_001149_hash,
94504 + [13879] = &_001150_hash,
94505 + [60114] = &_001151_hash,
94506 + [19472] = &_001152_hash,
94507 + [33552] = &_001153_hash,
94508 + [28575] = &_001154_hash,
94509 + [19696] = &_001155_hash,
94510 + [19742] = &_001156_hash,
94511 + [15286] = &_001157_hash,
94512 + [24629] = &_001158_hash,
94513 + [28382] = &_001159_hash,
94514 + [18962] = &_001160_hash,
94515 + [45796] = &_001161_hash,
94516 + [51632] = &_001162_hash,
94517 + [16907] = &_001163_hash,
94518 + [49336] = &_001164_hash,
94519 + [25316] = &_001165_hash,
94520 + [39978] = &_001166_hash,
94521 + [8091] = &_001167_hash,
94522 + [30680] = &_001168_hash,
94523 + [2066] = &_001169_hash,
94524 + [24271] = &_001170_hash,
94525 + [34934] = &_001171_hash,
94526 + [29208] = &_001172_hash,
94527 + [18591] = &_001173_hash,
94528 + [24373] = &_001174_hash,
94529 + [41485] = &_001175_hash,
94530 + [45487] = &_001176_hash,
94531 + [29299] = &_001177_hash,
94532 + [53920] = &_001178_hash,
94533 + [25407] = &_001179_hash,
94534 + [5525] = &_001180_hash,
94535 + [3531] = &_001181_hash,
94536 + [25143] = &_001182_hash,
94537 + [56046] = &_001183_hash,
94538 + [34693] = &_001184_hash,
94539 + [48644] = &_001185_hash,
94540 + [21226] = &_001186_hash,
94541 + [14051] = &_001187_hash,
94542 + [7715] = &_001188_hash,
94543 + [30413] = &_001189_hash,
94544 + [13681] = &_001190_hash,
94545 + [6554] = &_001191_hash,
94546 + [12228] = &_001192_hash,
94547 + [25497] = &_001193_hash,
94548 + [52228] = &_001194_hash,
94549 + [49069] = &_001195_hash,
94550 + [26961] = &_001196_hash,
94551 + [13768] = &_001197_hash,
94552 + [56185] = &_001198_hash,
94553 + [41838] = &_001199_hash,
94554 + [60119] = &_001200_hash,
94555 + [3112] = &_001201_hash,
94556 + [62001] = &_001202_hash,
94557 + [35888] = &_001203_hash,
94558 + [54718] = &_001206_hash,
94559 + [64177] = &_001207_hash,
94560 + [57222] = &_001208_hash,
94561 + [5260] = &_001209_hash,
94562 + [55517] = &_001210_hash,
94563 + [18186] = &_001211_hash,
94564 + [14257] = &_001212_hash,
94565 + [26846] = &_001213_hash,
94566 + [56097] = &_001214_hash,
94567 + [55151] = &_001215_hash,
94568 + [2999] = &_001216_hash,
94569 + [3602] = &_001217_hash,
94570 + [18460] = &_001218_hash,
94571 + [3507] = &_001219_hash,
94572 + [57847] = &_001220_hash,
94573 + [58077] = &_001221_hash,
94574 + [2659] = &_001222_hash,
94575 + [39846] = &_001223_hash,
94576 + [18629] = &_001224_hash,
94577 + [2723] = &_001225_hash,
94578 + [45230] = &_001226_hash,
94579 + [26941] = &_001227_hash,
94580 + [4344] = &_001228_hash,
94581 + [8487] = &_001229_hash,
94582 + [9901] = &_001230_hash,
94583 + [43061] = &_001231_hash,
94584 + [42551] = &_001232_hash,
94585 + [63272] = &_001233_hash,
94586 + [37771] = &_001234_hash,
94587 + [28261] = &_001235_hash,
94588 + [44694] = &_001236_hash,
94589 + [8573] = &_001237_hash,
94590 + [60174] = &_001238_hash,
94591 + [28040] = &_001239_hash,
94592 + [39423] = &_001240_hash,
94593 + [98] = &_001241_hash,
94594 + [62874] = &_001242_hash,
94595 + [38726] = &_001243_hash,
94596 + [55348] = &_001244_hash,
94597 + [10997] = &_001245_hash,
94598 + [88] = &_001246_hash,
94599 + [60639] = &_001247_hash,
94600 + [48159] = &_001248_hash,
94601 + [47899] = &_001249_hash,
94602 + [25367] = &_001250_hash,
94603 + [55681] = &_001251_hash,
94604 + [44716] = &_001252_hash,
94605 + [26161] = &_001253_hash,
94606 + [55347] = &_001254_hash,
94607 + [14518] = &_001255_hash,
94608 + [8887] = &_001256_hash,
94609 + [23009] = &_001257_hash,
94610 + [27962] = &_001258_hash,
94611 + [20004] = &_001259_hash,
94612 + [61750] = &_001260_hash,
94613 + [11661] = &_001261_hash,
94614 + [37118] = &_001262_hash,
94615 + [9370] = &_001263_hash,
94616 + [15099] = &_001264_hash,
94617 + [2404] = &_001265_hash,
94618 + [64074] = &_001266_hash,
94619 + [7538] = &_001267_hash,
94620 + [19736] = &_001268_hash,
94621 + [8199] = &_001269_hash,
94622 + [40711] = &_001270_hash,
94623 + [47859] = &_001271_hash,
94624 + [53925] = &_001272_hash,
94625 + [46888] = &_001273_hash,
94626 + [21783] = &_001274_hash,
94627 + [37305] = &_001275_hash,
94628 + [18414] = &_001276_hash,
94629 + [62423] = &_001277_hash,
94630 + [30371] = &_001278_hash,
94631 + [32617] = &_001279_hash,
94632 + [14530] = &_001281_hash,
94633 + [48623] = &_001282_hash,
94634 + [12845] = &_001283_hash,
94635 + [8895] = &_001284_hash,
94636 + [33661] = &_001285_hash,
94637 + [23178] = &_001286_hash,
94638 + [54706] = &_001287_hash,
94639 + [27133] = &_001288_hash,
94640 + [52745] = &_001289_hash,
94641 + [64420] = &_001290_hash,
94642 + [25617] = &_001291_hash,
94643 + [25414] = &_001292_hash,
94644 + [20445] = &_001293_hash,
94645 + [64006] = &_001294_hash,
94646 + [52646] = &_001295_hash,
94647 + [30281] = &_001296_hash,
94648 + [3761] = &_001297_hash,
94649 + [44345] = &_001298_hash,
94650 + [14713] = &_001299_hash,
94651 + [26043] = &_001300_hash,
94652 + [41679] = &_001301_hash,
94653 + [6267] = &_001302_hash,
94654 + [22247] = &_001304_hash,
94655 + [9440] = &_001305_hash,
94656 + [54676] = &_001306_hash,
94657 + [53982] = &_001308_hash,
94658 + [9467] = &_001309_hash,
94659 + [53419] = &_001310_hash,
94660 + [1424] = &_001311_hash,
94661 + [17561] = &_001312_hash,
94662 + [28161] = &_001313_hash,
94663 + [57262] = &_001314_hash,
94664 + [61071] = &_001315_hash,
94665 + [20067] = &_001316_hash,
94666 + [34321] = &_001317_hash,
94667 + [56199] = &_001318_hash,
94668 + [29070] = &_001319_hash,
94669 + [15698] = &_001320_hash,
94670 + [14173] = &_001321_hash,
94671 + [41224] = &_001322_hash,
94672 + [56438] = &_001323_hash,
94673 + [41894] = &_001324_hash,
94674 + [20885] = &_001325_hash,
94675 + [23275] = &_001326_hash,
94676 + [45043] = &_001327_hash,
94677 + [22143] = &_001328_hash,
94678 + [38029] = &_001329_hash,
94679 + [55343] = &_001330_hash,
94680 + [40624] = &_001331_hash,
94681 + [26476] = &_001332_hash,
94682 + [43128] = &_001333_hash,
94683 + [45115] = &_001334_hash,
94684 + [32958] = &_001335_hash,
94685 + [43091] = &_001336_hash,
94686 + [33299] = &_001337_hash,
94687 + [55021] = &_001338_hash,
94688 + [5509] = &_001339_hash,
94689 + [53012] = &_001340_hash,
94690 + [57849] = &_001341_hash,
94691 + [63282] = &_001342_hash,
94692 + [27883] = &_001343_hash,
94693 + [1670] = &_001344_hash,
94694 + [24095] = &_001345_hash,
94695 + [47810] = &_001346_hash,
94696 + [40759] = &_001347_hash,
94697 + [42139] = &_001348_hash,
94698 + [50484] = &_001349_hash,
94699 + [2305] = &_001350_hash,
94700 + [59832] = &_001351_hash,
94701 + [17662] = &_001352_hash,
94702 + [58943] = &_001353_hash,
94703 + [37417] = &_001356_hash,
94704 + [25127] = &_001357_hash,
94705 + [15006] = &_001358_hash,
94706 + [54292] = &_001359_hash,
94707 + [30642] = &_001360_hash,
94708 + [39939] = &_001361_hash,
94709 + [34818] = &_001362_hash,
94710 + [23378] = &_001363_hash,
94711 + [24090] = &_001364_hash,
94712 + [11111] = &_001365_hash,
94713 + [64141] = &_001366_hash,
94714 + [46457] = &_001367_hash,
94715 + [57927] = &_001368_hash,
94716 + [58877] = &_001371_hash,
94717 + [13880] = &_001372_hash,
94718 + [62888] = &_001373_hash,
94719 + [57962] = &_001374_hash,
94720 + [9117] = &_001375_hash,
94721 + [52012] = &_001376_hash,
94722 + [49246] = &_001377_hash,
94723 + [52701] = &_001378_hash,
94724 + [29857] = &_001379_hash,
94725 + [49420] = &_001380_hash,
94726 + [45897] = &_001381_hash,
94727 + [15141] = &_001382_hash,
94728 + [24177] = &_001383_hash,
94729 + [10325] = &_001384_hash,
94730 + [52861] = &_001385_hash,
94731 + [28922] = &_001386_hash,
94732 + [31089] = &_001387_hash,
94733 + [63084] = &_001388_hash,
94734 + [26245] = &_001389_hash,
94735 + [60000] = &_001390_hash,
94736 + [56935] = &_001391_hash,
94737 + [37569] = &_001392_hash,
94738 + [6446] = &_001394_hash,
94739 + [35883] = &_001395_hash,
94740 + [9123] = &_001396_hash,
94741 + [51457] = &_001397_hash,
94742 + [1787] = &_001398_hash,
94743 + [10135] = &_001399_hash,
94744 + [952] = &_001400_hash,
94745 + [53578] = &_001401_hash,
94746 + [9923] = &_001402_hash,
94747 + [45249] = &_001403_hash,
94748 + [52860] = &_001404_hash,
94749 + [29558] = &_001405_hash,
94750 + [40556] = &_001406_hash,
94751 + [53210] = &_001407_hash,
94752 + [2506] = &_001408_hash,
94753 + [48262] = &_001409_hash,
94754 + [46939] = &_001410_hash,
94755 + [17901] = &_001411_hash,
94756 + [27204] = &_001412_hash,
94757 + [52516] = &_001413_hash,
94758 + [55885] = &_001414_hash,
94759 + [6681] = &_001415_hash,
94760 + [42360] = &_001416_hash,
94761 + [20259] = &_001417_hash,
94762 + [8874] = &_001418_hash,
94763 + [53363] = &_001419_hash,
94764 + [17500] = &_001420_hash,
94765 + [63988] = &_001421_hash,
94766 + [26378] = &_001422_hash,
94767 + [7768] = &_001423_hash,
94768 + [12938] = &_001424_hash,
94769 + [6755] = &_001425_hash,
94770 + [43806] = &_001426_hash,
94771 + [15976] = &_001427_hash,
94772 + [2732] = &_001428_hash,
94773 + [2519] = &_001429_hash,
94774 + [14340] = &_001430_hash,
94775 + [34772] = &_001431_hash,
94776 + [36433] = &_001432_hash,
94777 + [16068] = &_001433_hash,
94778 + [22052] = &_001434_hash,
94779 + [8929] = &_001435_hash,
94780 + [63220] = &_001436_hash,
94781 + [18246] = &_001437_hash,
94782 + [37678] = &_001438_hash,
94783 + [4932] = &_001439_hash,
94784 + [46960] = &_001440_hash,
94785 + [16909] = &_001441_hash,
94786 + [44429] = &_001442_hash,
94787 + [59514] = &_001443_hash,
94788 + [62760] = &_001444_hash,
94789 + [41841] = &_001445_hash,
94790 + [25417] = &_001446_hash,
94791 + [63230] = &_001447_hash,
94792 + [39532] = &_001448_hash,
94793 + [24688] = &_001449_hash,
94794 + [18555] = &_001450_hash,
94795 + [54499] = &_001451_hash,
94796 + [10719] = &_001452_hash,
94797 + [1644] = &_001453_hash,
94798 + [15109] = &_001454_hash,
94799 + [15787] = &_001455_hash,
94800 + [57869] = &_001456_hash,
94801 + [54445] = &_001457_hash,
94802 + [19398] = &_001458_hash,
94803 + [9488] = &_001459_hash,
94804 + [12587] = &_001460_hash,
94805 + [17124] = &_001461_hash,
94806 + [53665] = &_001462_hash,
94807 + [40386] = &_001463_hash,
94808 + [39444] = &_001464_hash,
94809 + [28873] = &_001465_hash,
94810 + [11290] = &_001466_hash,
94811 + [51313] = &_001467_hash,
94812 + [23354] = &_001469_hash,
94813 + [49559] = &_001470_hash,
94814 + [49312] = &_001471_hash,
94815 + [36333] = &_001472_hash,
94816 + [59349] = &_001473_hash,
94817 + [60316] = &_001474_hash,
94818 + [2546] = &_001475_hash,
94819 + [57483] = &_001476_hash,
94820 + [14569] = &_001478_hash,
94821 + [61842] = &_001481_hash,
94822 + [32923] = &_001482_hash,
94823 + [57471] = &_001483_hash,
94824 + [83] = &_001484_hash,
94825 + [40242] = &_001485_hash,
94826 + [42578] = &_001486_hash,
94827 + [62037] = &_001487_hash,
94828 + [8131] = &_001488_hash,
94829 + [752] = &_001489_hash,
94830 + [56376] = &_001490_hash,
94831 + [22290] = &_001491_hash,
94832 + [46232] = &_001492_hash,
94833 + [35132] = &_001493_hash,
94834 + [23825] = &_001494_hash,
94835 + [43262] = &_001495_hash,
94836 + [8138] = &_001496_hash,
94837 + [31489] = &_001497_hash,
94838 + [57578] = &_001498_hash,
94839 + [28007] = &_001499_hash,
94840 + [28688] = &_001500_hash,
94841 + [19319] = &_001501_hash,
94842 + [12575] = &_001502_hash,
94843 + [62762] = &_001504_hash,
94844 + [47450] = &_001505_hash,
94845 + [1869] = &_001506_hash,
94846 + [51225] = &_001507_hash,
94847 + [19561] = &_001508_hash,
94848 + [64894] = &_001509_hash,
94849 + [6829] = &_001510_hash,
94850 + [30644] = &_001511_hash,
94851 + [63391] = &_001512_hash,
94852 + [11655] = &_001514_hash,
94853 + [28229] = &_001515_hash,
94854 + [22382] = &_001516_hash,
94855 + [22649] = &_001517_hash,
94856 + [42619] = &_001518_hash,
94857 + [19761] = &_001519_hash,
94858 + [56990] = &_001520_hash,
94859 + [19531] = &_001521_hash,
94860 + [26514] = &_001522_hash,
94861 + [56773] = &_001523_hash,
94862 + [15563] = &_001524_hash,
94863 + [26212] = &_001525_hash,
94864 + [29203] = &_001526_hash,
94865 + [32768] = &_001527_hash,
94866 + [15110] = &_001528_hash,
94867 + [3885] = &_001529_hash,
94868 + [13788] = &_001530_hash,
94869 + [27875] = &_001531_hash,
94870 + [54959] = &_001532_hash,
94871 + [20945] = &_001533_hash,
94872 + [59640] = &_001534_hash,
94873 + [4693] = &_001535_hash,
94874 + [13793] = &_001536_hash,
94875 + [25659] = &_001537_hash,
94876 + [18734] = &_001538_hash,
94877 + [17869] = &_001539_hash,
94878 + [26270] = &_001540_hash,
94879 + [18458] = &_001541_hash,
94880 + [58468] = &_001542_hash,
94881 + [61257] = &_001543_hash,
94882 + [39946] = &_001544_hash,
94883 + [52382] = &_001545_hash,
94884 + [18428] = &_001546_hash,
94885 + [31069] = &_001547_hash,
94886 + [61614] = &_001548_hash,
94887 + [60044] = &_001549_hash,
94888 + [36818] = &_001550_hash,
94889 + [54353] = &_001551_hash,
94890 + [55994] = &_001552_hash,
94891 + [65142] = &_001553_hash,
94892 + [1664] = &_001554_hash,
94893 + [32212] = &_001555_hash,
94894 + [63087] = &_001556_hash,
94895 + [29916] = &_001557_hash,
94896 + [54912] = &_001558_hash,
94897 + [10318] = &_001559_hash,
94898 + [44031] = &_001560_hash,
94899 + [50108] = &_001561_hash,
94900 + [57812] = &_001562_hash,
94901 + [63190] = &_001563_hash,
94902 + [48246] = &_001564_hash,
94903 + [3744] = &_001565_hash,
94904 + [56321] = &_001566_hash,
94905 + [42691] = &_001567_hash,
94906 + [62052] = &_001568_hash,
94907 + [21999] = &_001569_hash,
94908 + [13672] = &_001570_hash,
94909 + [20648] = &_001571_hash,
94910 + [42500] = &_001572_hash,
94911 + [22795] = &_001573_hash,
94912 + [19496] = &_001574_hash,
94913 + [35556] = &_001575_hash,
94914 + [57144] = &_001576_hash,
94915 + [1019] = &_001577_hash,
94916 + [28818] = &_001578_hash,
94917 + [52880] = &_001579_hash,
94918 + [6543] = &_001580_hash,
94919 + [18895] = &_001581_hash,
94920 + [857] = &_001582_hash,
94921 + [45966] = &_001583_hash,
94922 + [11785] = &_001584_hash,
94923 + [7736] = &_001585_hash,
94924 + [4308] = &_001586_hash,
94925 + [51095] = &_001587_hash,
94926 + [12101] = &_001588_hash,
94927 + [427] = &_001589_hash,
94928 + [4021] = &_001590_hash,
94929 + [54201] = &_001591_hash,
94930 + [5615] = &_001592_hash,
94931 + [16234] = &_001593_hash,
94932 + [51718] = &_001594_hash,
94933 + [42390] = &_001595_hash,
94934 + [55391] = &_001596_hash,
94935 + [28539] = &_001597_hash,
94936 + [943] = &_001598_hash,
94937 + [32683] = &_001599_hash,
94938 + [39182] = &_001600_hash,
94939 + [33198] = &_001601_hash,
94940 + [39446] = &_001602_hash,
94941 + [16394] = &_001603_hash,
94942 + [30791] = &_001604_hash,
94943 + [35530] = &_001605_hash,
94944 + [53193] = &_001607_hash,
94945 + [39401] = &_001608_hash,
94946 + [28624] = &_001609_hash,
94947 + [12066] = &_001610_hash,
94948 + [63492] = &_001611_hash,
94949 + [14897] = &_001612_hash,
94950 + [29641] = &_001613_hash,
94951 + [10165] = &_001614_hash,
94952 + [60046] = &_001615_hash,
94953 + [12429] = &_001616_hash,
94954 + [32788] = &_001617_hash,
94955 + [52698] = &_001618_hash,
94956 + [13130] = &_001620_hash,
94957 + [28643] = &_001621_hash,
94958 + [50666] = &_001622_hash,
94959 + [35126] = &_001623_hash,
94960 + [33593] = &_001624_hash,
94961 + [27547] = &_001625_hash,
94962 + [5484] = &_001626_hash,
94963 + [26642] = &_001627_hash,
94964 + [25586] = &_001628_hash,
94965 + [58757] = &_001629_hash,
94966 + [18701] = &_001630_hash,
94967 + [26271] = &_001631_hash,
94968 + [23829] = &_001632_hash,
94969 + [63659] = &_001634_hash,
94970 + [26603] = &_001635_hash,
94971 + [25704] = &_001636_hash,
94972 + [21149] = &_001637_hash,
94973 + [36900] = &_001638_hash,
94974 + [61577] = &_001640_hash,
94975 + [54095] = &_001641_hash,
94976 + [31650] = &_001642_hash,
94977 + [48970] = &_001643_hash,
94978 + [49357] = &_001644_hash,
94979 + [33835] = &_001645_hash,
94980 + [46861] = &_001646_hash,
94981 + [1428] = &_001647_hash,
94982 + [36247] = &_001648_hash,
94983 + [21600] = &_001649_hash,
94984 + [24747] = &_001650_hash,
94985 + [51012] = &_001651_hash,
94986 + [38974] = &_001653_hash,
94987 + [30549] = &_001655_hash,
94988 + [40146] = &_001656_hash,
94989 + [41756] = &_001657_hash,
94990 + [37010] = &_001658_hash,
94991 + [35072] = &_001660_hash,
94992 + [2114] = &_001661_hash,
94993 + [48424] = &_001662_hash,
94994 + [61522] = &_001663_hash,
94995 + [50633] = &_001664_hash,
94996 + [2283] = &_001665_hash,
94997 + [61763] = &_001666_hash,
94998 + [48195] = &_001667_hash,
94999 + [31000] = &_001668_hash,
95000 + [23856] = &_001669_hash,
95001 + [37421] = &_001670_hash,
95002 + [10019] = &_001672_hash,
95003 + [5148] = &_001673_hash,
95004 + [14363] = &_001674_hash,
95005 + [57354] = &_001675_hash,
95006 + [62460] = &_001676_hash,
95007 + [45174] = &_001677_hash,
95008 + [31054] = &_001678_hash,
95009 + [62195] = &_001679_hash,
95010 + [14976] = &_001680_hash,
95011 + [55676] = &_001681_hash,
95012 + [1025] = &_001682_hash,
95013 + [6921] = &_001683_hash,
95014 + [22158] = &_001684_hash,
95015 + [18050] = &_001685_hash,
95016 + [18612] = &_001686_hash,
95017 + [31107] = &_001687_hash,
95018 + [45212] = &_001688_hash,
95019 + [29599] = &_001689_hash,
95020 + [30827] = &_001690_hash,
95021 + [25086] = &_001691_hash,
95022 + [27146] = &_001692_hash,
95023 + [2467] = &_001693_hash,
95024 + [45786] = &_001694_hash,
95025 + [51909] = &_001695_hash,
95026 + [64604] = &_001696_hash,
95027 + [57819] = &_001697_hash,
95028 + [11001] = &_001698_hash,
95029 + [20326] = &_001699_hash,
95030 + [12682] = &_001700_hash,
95031 + [28932] = &_001701_hash,
95032 + [53491] = &_001702_hash,
95033 + [63894] = &_001703_hash,
95034 + [51191] = &_001704_hash,
95035 + [59759] = &_001705_hash,
95036 + [15691] = &_001706_hash,
95037 + [38786] = &_001707_hash,
95038 + [51546] = &_001708_hash,
95039 + [10121] = &_001709_hash,
95040 + [60786] = &_001710_hash,
95041 + [19952] = &_001712_hash,
95042 + [7271] = &_001715_hash,
95043 + [10729] = &_001716_hash,
95044 + [28883] = &_001717_hash,
95045 + [52042] = &_001718_hash,
95046 + [49606] = &_001719_hash,
95047 + [33243] = &_001720_hash,
95048 + [57341] = &_001721_hash,
95049 + [7978] = &_001722_hash,
95050 + [36330] = &_001723_hash,
95051 + [39035] = &_001724_hash,
95052 + [34498] = &_001725_hash,
95053 + [19789] = &_001726_hash,
95054 + [55685] = &_001727_hash,
95055 + [55419] = &_001728_hash,
95056 + [27798] = &_001729_hash,
95057 + [54599] = &_001730_hash,
95058 + [65522] = &_001731_hash,
95059 + [38111] = &_001732_hash,
95060 + [57077] = &_001733_hash,
95061 + [53053] = &_001734_hash,
95062 + [14190] = &_001735_hash,
95063 + [47037] = &_001736_hash,
95064 + [33296] = &_001737_hash,
95065 + [23803] = &_001738_hash,
95066 + [48773] = &_001739_hash,
95067 + [63014] = &_001740_hash,
95068 + [64392] = &_001741_hash,
95069 + [44203] = &_001742_hash,
95070 + [47717] = &_001743_hash,
95071 + [38399] = &_001744_hash,
95072 + [30385] = &_001745_hash,
95073 + [61693] = &_001746_hash,
95074 + [32049] = &_001747_hash,
95075 + [26133] = &_001748_hash,
95076 + [45038] = &_001749_hash,
95077 + [8582] = &_001751_hash,
95078 + [38182] = &_001753_hash,
95079 + [62457] = &_001754_hash,
95080 + [27937] = &_001755_hash,
95081 + [3795] = &_001756_hash,
95082 + [23228] = &_001757_hash,
95083 + [56511] = &_001758_hash,
95084 + [47807] = &_001759_hash,
95085 + [60528] = &_001760_hash,
95086 + [51858] = &_001761_hash,
95087 + [49183] = &_001762_hash,
95088 + [33807] = &_001763_hash,
95089 + [34791] = &_001764_hash,
95090 + [8150] = &_001765_hash,
95091 + [19691] = &_001767_hash,
95092 + [20519] = &_001770_hash,
95093 + [17144] = &_001771_hash,
95094 + [14000] = &_001772_hash,
95095 + [31148] = &_001773_hash,
95096 + [62594] = &_001774_hash,
95097 + [39210] = &_001775_hash,
95098 + [2077] = &_001776_hash,
95099 + [23497] = &_001777_hash,
95100 + [34512] = &_001778_hash,
95101 + [16268] = &_001780_hash,
95102 + [14562] = &_001781_hash,
95103 + [17606] = &_001783_hash,
95104 + [25654] = &_001784_hash,
95105 + [56078] = &_001785_hash,
95106 + [61088] = &_001786_hash,
95107 + [53442] = &_001787_hash,
95108 + [54456] = &_001788_hash,
95109 + [22038] = &_001789_hash,
95110 + [58394] = &_001790_hash,
95111 + [38953] = &_001791_hash,
95112 + [16109] = &_001792_hash,
95113 + [3812] = &_001793_hash,
95114 + [5084] = &_001794_hash,
95115 + [41893] = &_001795_hash,
95116 + [45486] = &_001796_hash,
95117 + [50226] = &_001797_hash,
95118 + [63694] = &_001798_hash,
95119 + [56763] = &_001799_hash,
95120 + [20905] = &_001800_hash,
95121 + [13080] = &_001801_hash,
95122 + [54700] = &_001802_hash,
95123 + [40947] = &_001803_hash,
95124 + [32645] = &_001804_hash,
95125 + [57462] = &_001805_hash,
95126 + [33853] = &_001806_hash,
95127 + [57940] = &_001807_hash,
95128 + [45583] = &_001808_hash,
95129 + [14237] = &_001809_hash,
95130 + [27495] = &_001810_hash,
95131 + [12489] = &_001811_hash,
95132 +};
95133 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
95134 new file mode 100644
95135 index 0000000..4154daf
95136 --- /dev/null
95137 +++ b/tools/gcc/size_overflow_plugin.c
95138 @@ -0,0 +1,1188 @@
95139 +/*
95140 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
95141 + * Licensed under the GPL v2, or (at your option) v3
95142 + *
95143 + * Homepage:
95144 + * http://www.grsecurity.net/~ephox/overflow_plugin/
95145 + *
95146 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
95147 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
95148 + * The recomputed argument is checked against INT_MAX and an event is logged on overflow and the triggering process is killed.
95149 + *
95150 + * Usage:
95151 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c
95152 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
95153 + */
95154 +
95155 +#include "gcc-plugin.h"
95156 +#include "config.h"
95157 +#include "system.h"
95158 +#include "coretypes.h"
95159 +#include "tree.h"
95160 +#include "tree-pass.h"
95161 +#include "intl.h"
95162 +#include "plugin-version.h"
95163 +#include "tm.h"
95164 +#include "toplev.h"
95165 +#include "function.h"
95166 +#include "tree-flow.h"
95167 +#include "plugin.h"
95168 +#include "gimple.h"
95169 +#include "c-common.h"
95170 +#include "diagnostic.h"
95171 +#include "cfgloop.h"
95172 +
95173 +struct size_overflow_hash {
95174 + struct size_overflow_hash *next;
95175 + const char *name;
95176 + const char *file;
95177 + unsigned short param1:1;
95178 + unsigned short param2:1;
95179 + unsigned short param3:1;
95180 + unsigned short param4:1;
95181 + unsigned short param5:1;
95182 + unsigned short param6:1;
95183 + unsigned short param7:1;
95184 + unsigned short param8:1;
95185 + unsigned short param9:1;
95186 +};
95187 +
95188 +#include "size_overflow_hash.h"
95189 +
95190 +#define __unused __attribute__((__unused__))
95191 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
95192 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
95193 +#define BEFORE_STMT true
95194 +#define AFTER_STMT false
95195 +#define CREATE_NEW_VAR NULL_TREE
95196 +
95197 +int plugin_is_GPL_compatible;
95198 +void debug_gimple_stmt(gimple gs);
95199 +
95200 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
95201 +static tree signed_size_overflow_type;
95202 +static tree unsigned_size_overflow_type;
95203 +static tree report_size_overflow_decl;
95204 +static tree const_char_ptr_type_node;
95205 +static unsigned int handle_function(void);
95206 +static bool file_match = true;
95207 +
95208 +static struct plugin_info size_overflow_plugin_info = {
95209 + .version = "20120521beta",
95210 + .help = "no-size_overflow\tturn off size overflow checking\n",
95211 +};
95212 +
95213 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
95214 +{
95215 + unsigned int arg_count = type_num_arguments(*node);
95216 +
95217 + for (; args; args = TREE_CHAIN(args)) {
95218 + tree position = TREE_VALUE(args);
95219 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
95220 + error("handle_size_overflow_attribute: overflow parameter outside range.");
95221 + *no_add_attrs = true;
95222 + }
95223 + }
95224 + return NULL_TREE;
95225 +}
95226 +
95227 +static struct attribute_spec no_size_overflow_attr = {
95228 + .name = "size_overflow",
95229 + .min_length = 1,
95230 + .max_length = -1,
95231 + .decl_required = false,
95232 + .type_required = true,
95233 + .function_type_required = true,
95234 + .handler = handle_size_overflow_attribute
95235 +};
95236 +
95237 +static void register_attributes(void __unused *event_data, void __unused *data)
95238 +{
95239 + register_attribute(&no_size_overflow_attr);
95240 +}
95241 +
95242 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
95243 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
95244 +{
95245 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
95246 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
95247 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
95248 +
95249 + const unsigned int m = 0x57559429;
95250 + const unsigned int n = 0x5052acdb;
95251 + const unsigned int *key4 = (const unsigned int *)key;
95252 + unsigned int h = len;
95253 + unsigned int k = len + seed + n;
95254 + unsigned long long p;
95255 +
95256 + while (len >= 8) {
95257 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
95258 + len -= 8;
95259 + }
95260 + if (len >= 4) {
95261 + cwmixb(key4[0]) key4 += 1;
95262 + len -= 4;
95263 + }
95264 + if (len)
95265 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
95266 + cwmixb(h ^ (k + n));
95267 + return k ^ h;
95268 +
95269 +#undef cwfold
95270 +#undef cwmixa
95271 +#undef cwmixb
95272 +}
95273 +
95274 +static inline unsigned int get_hash_num(const char *fndecl, const char *loc_file, unsigned int seed)
95275 +{
95276 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
95277 + unsigned int file = CrapWow(loc_file, strlen(loc_file), seed) & 0xffff;
95278 +
95279 + if (file_match)
95280 + return fn ^ file;
95281 + else
95282 + return fn;
95283 +}
95284 +
95285 +static inline tree get_original_function_decl(tree fndecl)
95286 +{
95287 + if (DECL_ABSTRACT_ORIGIN(fndecl))
95288 + return DECL_ABSTRACT_ORIGIN(fndecl);
95289 + return fndecl;
95290 +}
95291 +
95292 +static inline gimple get_def_stmt(tree node)
95293 +{
95294 + gcc_assert(TREE_CODE(node) == SSA_NAME);
95295 + return SSA_NAME_DEF_STMT(node);
95296 +}
95297 +
95298 +static struct size_overflow_hash *get_function_hash(tree fndecl, const char *loc_file)
95299 +{
95300 + unsigned int hash;
95301 + struct size_overflow_hash *entry;
95302 + const char *func_name = NAME(fndecl);
95303 +
95304 + hash = get_hash_num(NAME(fndecl), loc_file, 0);
95305 +
95306 + entry = size_overflow_hash[hash];
95307 + while (entry) {
95308 + if (!strcmp(entry->name, func_name) && (!file_match || !strcmp(entry->file, loc_file)))
95309 + return entry;
95310 + entry = entry->next;
95311 + }
95312 +
95313 + return NULL;
95314 +}
95315 +
95316 +static void check_arg_type(tree var)
95317 +{
95318 + tree type = TREE_TYPE(var);
95319 + enum tree_code code = TREE_CODE(type);
95320 +
95321 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
95322 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
95323 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
95324 +}
95325 +
95326 +static int find_arg_number(tree arg, tree func)
95327 +{
95328 + tree var;
95329 + bool match = false;
95330 + unsigned int argnum = 1;
95331 +
95332 + if (TREE_CODE(arg) == SSA_NAME)
95333 + arg = SSA_NAME_VAR(arg);
95334 +
95335 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
95336 + if (strcmp(NAME(arg), NAME(var))) {
95337 + argnum++;
95338 + continue;
95339 + }
95340 + check_arg_type(var);
95341 +
95342 + match = true;
95343 + if (!TYPE_UNSIGNED(TREE_TYPE(var)))
95344 + return 0;
95345 + break;
95346 + }
95347 + if (!match) {
95348 + warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func));
95349 + return 0;
95350 + }
95351 + return argnum;
95352 +}
95353 +
95354 +static void print_missing_msg(tree func, const char *filename, unsigned int argnum)
95355 +{
95356 + unsigned int new_hash;
95357 + location_t loc = DECL_SOURCE_LOCATION(func);
95358 + const char *curfunc = NAME(func);
95359 +
95360 + new_hash = get_hash_num(curfunc, filename, 0);
95361 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+%s+", curfunc, curfunc, argnum, new_hash, filename);
95362 +}
95363 +
95364 +static void check_missing_attribute(tree arg)
95365 +{
95366 + tree type, func = get_original_function_decl(current_function_decl);
95367 + unsigned int argnum;
95368 + struct size_overflow_hash *hash;
95369 + const char *filename;
95370 +
95371 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
95372 +
95373 + type = TREE_TYPE(arg);
95374 + // skip function pointers
95375 + if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
95376 + return;
95377 +
95378 + if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
95379 + return;
95380 +
95381 + argnum = find_arg_number(arg, func);
95382 + if (argnum == 0)
95383 + return;
95384 +
95385 + filename = DECL_SOURCE_FILE(func);
95386 +
95387 + hash = get_function_hash(func, filename);
95388 + if (!hash) {
95389 + print_missing_msg(func, filename, argnum);
95390 + return;
95391 + }
95392 +
95393 +#define check_param(num) \
95394 + if (num == argnum && hash->param##num) \
95395 + return;
95396 + check_param(1);
95397 + check_param(2);
95398 + check_param(3);
95399 + check_param(4);
95400 + check_param(5);
95401 + check_param(6);
95402 + check_param(7);
95403 + check_param(8);
95404 + check_param(9);
95405 +#undef check_param
95406 +
95407 + print_missing_msg(func, filename, argnum);
95408 +}
95409 +
95410 +static tree create_new_var(tree type)
95411 +{
95412 + tree new_var = create_tmp_var(type, "cicus");
95413 +
95414 + add_referenced_var(new_var);
95415 + mark_sym_for_renaming(new_var);
95416 + return new_var;
95417 +}
95418 +
95419 +static bool is_bool(tree node)
95420 +{
95421 + tree type;
95422 +
95423 + if (node == NULL_TREE)
95424 + return false;
95425 +
95426 + type = TREE_TYPE(node);
95427 + if (!INTEGRAL_TYPE_P(type))
95428 + return false;
95429 + if (TREE_CODE(type) == BOOLEAN_TYPE)
95430 + return true;
95431 + if (TYPE_PRECISION(type) == 1)
95432 + return true;
95433 + return false;
95434 +}
95435 +
95436 +static tree cast_a_tree(tree type, tree var)
95437 +{
95438 + gcc_assert(fold_convertible_p(type, var));
95439 +
95440 + return fold_convert(type, var);
95441 +}
95442 +
95443 +static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
95444 +{
95445 + gimple assign;
95446 +
95447 + if (new_var == CREATE_NEW_VAR)
95448 + new_var = create_new_var(type);
95449 +
95450 + assign = gimple_build_assign(new_var, cast_a_tree(type, var));
95451 + gimple_set_location(assign, loc);
95452 + gimple_set_lhs(assign, make_ssa_name(new_var, assign));
95453 +
95454 + return assign;
95455 +}
95456 +
95457 +static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
95458 +{
95459 + tree oldstmt_rhs1;
95460 + enum tree_code code;
95461 + gimple stmt;
95462 + gimple_stmt_iterator gsi;
95463 +
95464 + if (!*potentionally_overflowed)
95465 + return NULL_TREE;
95466 +
95467 + if (rhs1 == NULL_TREE) {
95468 + debug_gimple_stmt(oldstmt);
95469 + error("create_assign: rhs1 is NULL_TREE");
95470 + gcc_unreachable();
95471 + }
95472 +
95473 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
95474 + code = TREE_CODE(oldstmt_rhs1);
95475 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
95476 + check_missing_attribute(oldstmt_rhs1);
95477 +
95478 + stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
95479 + gsi = gsi_for_stmt(oldstmt);
95480 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
95481 + basic_block next_bb, cur_bb;
95482 + edge e;
95483 +
95484 + gcc_assert(before == false);
95485 + gcc_assert(stmt_can_throw_internal(oldstmt));
95486 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
95487 + gcc_assert(!gsi_end_p(gsi));
95488 +
95489 + cur_bb = gimple_bb(oldstmt);
95490 + next_bb = cur_bb->next_bb;
95491 + e = find_edge(cur_bb, next_bb);
95492 + gcc_assert(e != NULL);
95493 + gcc_assert(e->flags & EDGE_FALLTHRU);
95494 +
95495 + gsi = gsi_after_labels(next_bb);
95496 + gcc_assert(!gsi_end_p(gsi));
95497 + before = true;
95498 + }
95499 + if (before)
95500 + gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
95501 + else
95502 + gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
95503 + update_stmt(stmt);
95504 + pointer_set_insert(visited, oldstmt);
95505 + return gimple_get_lhs(stmt);
95506 +}
95507 +
95508 +static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
95509 +{
95510 + tree new_var, lhs = gimple_get_lhs(oldstmt);
95511 + gimple stmt;
95512 + gimple_stmt_iterator gsi;
95513 +
95514 + if (!*potentionally_overflowed)
95515 + return NULL_TREE;
95516 +
95517 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
95518 + rhs1 = gimple_assign_rhs1(oldstmt);
95519 + rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
95520 + }
95521 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
95522 + rhs2 = gimple_assign_rhs2(oldstmt);
95523 + rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
95524 + }
95525 +
95526 + stmt = gimple_copy(oldstmt);
95527 + gimple_set_location(stmt, gimple_location(oldstmt));
95528 +
95529 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
95530 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
95531 +
95532 + if (is_bool(lhs))
95533 + new_var = SSA_NAME_VAR(lhs);
95534 + else
95535 + new_var = create_new_var(signed_size_overflow_type);
95536 + new_var = make_ssa_name(new_var, stmt);
95537 + gimple_set_lhs(stmt, new_var);
95538 +
95539 + if (rhs1 != NULL_TREE) {
95540 + if (!gimple_assign_cast_p(oldstmt))
95541 + rhs1 = cast_a_tree(signed_size_overflow_type, rhs1);
95542 + gimple_assign_set_rhs1(stmt, rhs1);
95543 + }
95544 +
95545 + if (rhs2 != NULL_TREE)
95546 + gimple_assign_set_rhs2(stmt, rhs2);
95547 +#if BUILDING_GCC_VERSION >= 4007
95548 + if (rhs3 != NULL_TREE)
95549 + gimple_assign_set_rhs3(stmt, rhs3);
95550 +#endif
95551 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
95552 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
95553 +
95554 + gsi = gsi_for_stmt(oldstmt);
95555 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
95556 + update_stmt(stmt);
95557 + pointer_set_insert(visited, oldstmt);
95558 + return gimple_get_lhs(stmt);
95559 +}
95560 +
95561 +static gimple overflow_create_phi_node(gimple oldstmt, tree var)
95562 +{
95563 + basic_block bb;
95564 + gimple phi;
95565 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
95566 +
95567 + bb = gsi_bb(gsi);
95568 +
95569 + phi = create_phi_node(var, bb);
95570 + gsi = gsi_last(phi_nodes(bb));
95571 + gsi_remove(&gsi, false);
95572 +
95573 + gsi = gsi_for_stmt(oldstmt);
95574 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
95575 + gimple_set_bb(phi, bb);
95576 + return phi;
95577 +}
95578 +
95579 +static tree signed_cast_constant(tree node)
95580 +{
95581 + gcc_assert(is_gimple_constant(node));
95582 +
95583 + return cast_a_tree(signed_size_overflow_type, node);
95584 +}
95585 +
95586 +static basic_block create_a_first_bb(void)
95587 +{
95588 + basic_block first_bb;
95589 +
95590 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
95591 + if (dom_info_available_p(CDI_DOMINATORS))
95592 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
95593 + return first_bb;
95594 +}
95595 +
95596 +static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
95597 +{
95598 + basic_block bb;
95599 + gimple newstmt, def_stmt;
95600 + gimple_stmt_iterator gsi;
95601 +
95602 + newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
95603 + if (TREE_CODE(arg) == SSA_NAME) {
95604 + def_stmt = get_def_stmt(arg);
95605 + if (gimple_code(def_stmt) != GIMPLE_NOP) {
95606 + gsi = gsi_for_stmt(def_stmt);
95607 + gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
95608 + return newstmt;
95609 + }
95610 + }
95611 +
95612 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
95613 + if (bb->index == 0)
95614 + bb = create_a_first_bb();
95615 + gsi = gsi_after_labels(bb);
95616 + gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
95617 + return newstmt;
95618 +}
95619 +
95620 +static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
95621 +{
95622 + gimple newstmt;
95623 + gimple_stmt_iterator gsi;
95624 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
95625 + gimple def_newstmt = get_def_stmt(new_rhs);
95626 +
95627 + gsi_insert = gsi_insert_after;
95628 + gsi = gsi_for_stmt(def_newstmt);
95629 +
95630 + switch (gimple_code(get_def_stmt(arg))) {
95631 + case GIMPLE_PHI:
95632 + newstmt = gimple_build_assign(new_var, new_rhs);
95633 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
95634 + gsi_insert = gsi_insert_before;
95635 + break;
95636 + case GIMPLE_ASM:
95637 + case GIMPLE_CALL:
95638 + newstmt = gimple_build_assign(new_var, new_rhs);
95639 + break;
95640 + case GIMPLE_ASSIGN:
95641 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
95642 + break;
95643 + default:
95644 + /* unknown gimple_code (handle_build_new_phi_arg) */
95645 + gcc_unreachable();
95646 + }
95647 +
95648 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
95649 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
95650 + update_stmt(newstmt);
95651 + return newstmt;
95652 +}
95653 +
95654 +static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
95655 +{
95656 + gimple newstmt;
95657 + tree new_rhs;
95658 +
95659 + new_rhs = expand(visited, potentionally_overflowed, arg);
95660 +
95661 + if (new_rhs == NULL_TREE)
95662 + return NULL_TREE;
95663 +
95664 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
95665 + return gimple_get_lhs(newstmt);
95666 +}
95667 +
95668 +static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
95669 +{
95670 + gimple phi;
95671 + tree new_var = create_new_var(signed_size_overflow_type);
95672 + unsigned int i, n = gimple_phi_num_args(oldstmt);
95673 +
95674 + pointer_set_insert(visited, oldstmt);
95675 + phi = overflow_create_phi_node(oldstmt, new_var);
95676 + for (i = 0; i < n; i++) {
95677 + tree arg, lhs;
95678 +
95679 + arg = gimple_phi_arg_def(oldstmt, i);
95680 + if (is_gimple_constant(arg))
95681 + arg = signed_cast_constant(arg);
95682 + lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
95683 + if (lhs == NULL_TREE)
95684 + lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
95685 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
95686 + }
95687 +
95688 + update_stmt(phi);
95689 + return gimple_phi_result(phi);
95690 +}
95691 +
95692 +static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
95693 +{
95694 + gimple def_stmt = get_def_stmt(var);
95695 + tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
95696 +
95697 + *potentionally_overflowed = true;
95698 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
95699 + if (new_rhs1 == NULL_TREE) {
95700 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
95701 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
95702 + else
95703 + return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
95704 + }
95705 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
95706 +}
95707 +
95708 +static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
95709 +{
95710 + gimple def_stmt = get_def_stmt(var);
95711 + tree rhs1 = gimple_assign_rhs1(def_stmt);
95712 +
95713 + if (is_gimple_constant(rhs1))
95714 + return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE);
95715 +
95716 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
95717 + switch (TREE_CODE(rhs1)) {
95718 + case SSA_NAME:
95719 + return handle_unary_rhs(visited, potentionally_overflowed, var);
95720 +
95721 + case ARRAY_REF:
95722 + case BIT_FIELD_REF:
95723 + case ADDR_EXPR:
95724 + case COMPONENT_REF:
95725 + case INDIRECT_REF:
95726 +#if BUILDING_GCC_VERSION >= 4006
95727 + case MEM_REF:
95728 +#endif
95729 + case PARM_DECL:
95730 + case TARGET_MEM_REF:
95731 + case VAR_DECL:
95732 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
95733 +
95734 + default:
95735 + debug_gimple_stmt(def_stmt);
95736 + debug_tree(rhs1);
95737 + gcc_unreachable();
95738 + }
95739 +}
95740 +
95741 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
95742 +{
95743 + gimple cond_stmt;
95744 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
95745 +
95746 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
95747 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
95748 + update_stmt(cond_stmt);
95749 +}
95750 +
95751 +static tree create_string_param(tree string)
95752 +{
95753 + tree i_type, a_type;
95754 + int length = TREE_STRING_LENGTH(string);
95755 +
95756 + gcc_assert(length > 0);
95757 +
95758 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
95759 + a_type = build_array_type(char_type_node, i_type);
95760 +
95761 + TREE_TYPE(string) = a_type;
95762 + TREE_CONSTANT(string) = 1;
95763 + TREE_READONLY(string) = 1;
95764 +
95765 + return build1(ADDR_EXPR, ptr_type_node, string);
95766 +}
95767 +
95768 +static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
95769 +{
95770 + gimple func_stmt, def_stmt;
95771 + tree current_func, loc_file, loc_line;
95772 + expanded_location xloc;
95773 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
95774 +
95775 + def_stmt = get_def_stmt(arg);
95776 + xloc = expand_location(gimple_location(def_stmt));
95777 +
95778 + if (!gimple_has_location(def_stmt)) {
95779 + xloc = expand_location(gimple_location(stmt));
95780 + if (!gimple_has_location(stmt))
95781 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
95782 + }
95783 +
95784 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
95785 +
95786 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
95787 + loc_file = create_string_param(loc_file);
95788 +
95789 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
95790 + current_func = create_string_param(current_func);
95791 +
95792 + // void report_size_overflow(const char *file, unsigned int line, const char *func)
95793 + func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
95794 +
95795 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
95796 +}
95797 +
95798 +static void __unused print_the_code_insertions(gimple stmt)
95799 +{
95800 + location_t loc = gimple_location(stmt);
95801 +
95802 + inform(loc, "Integer size_overflow check applied here.");
95803 +}
95804 +
95805 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
95806 +{
95807 + basic_block cond_bb, join_bb, bb_true;
95808 + edge e;
95809 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
95810 +
95811 + cond_bb = gimple_bb(stmt);
95812 + gsi_prev(&gsi);
95813 + if (gsi_end_p(gsi))
95814 + e = split_block_after_labels(cond_bb);
95815 + else
95816 + e = split_block(cond_bb, gsi_stmt(gsi));
95817 + cond_bb = e->src;
95818 + join_bb = e->dest;
95819 + e->flags = EDGE_FALSE_VALUE;
95820 + e->probability = REG_BR_PROB_BASE;
95821 +
95822 + bb_true = create_empty_bb(cond_bb);
95823 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
95824 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
95825 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
95826 +
95827 + if (dom_info_available_p(CDI_DOMINATORS)) {
95828 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
95829 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
95830 + }
95831 +
95832 + if (current_loops != NULL) {
95833 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
95834 + add_bb_to_loop(bb_true, cond_bb->loop_father);
95835 + }
95836 +
95837 + insert_cond(cond_bb, arg, cond_code, type_value);
95838 + insert_cond_result(bb_true, stmt, arg);
95839 +
95840 +// print_the_code_insertions(stmt);
95841 +}
95842 +
95843 +static tree get_type_for_check(tree rhs)
95844 +{
95845 + tree def_rhs;
95846 + gimple def_stmt = get_def_stmt(rhs);
95847 +
95848 + if (!gimple_assign_cast_p(def_stmt))
95849 + return TREE_TYPE(rhs);
95850 + def_rhs = gimple_assign_rhs1(def_stmt);
95851 + if (TREE_CODE(TREE_TYPE(def_rhs)) == INTEGER_TYPE)
95852 + return TREE_TYPE(def_rhs);
95853 + return TREE_TYPE(rhs);
95854 +}
95855 +
95856 +static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
95857 +{
95858 + gimple ucast_stmt;
95859 + gimple_stmt_iterator gsi;
95860 + location_t loc = gimple_location(stmt);
95861 +
95862 + ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
95863 + gsi = gsi_for_stmt(stmt);
95864 + gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
95865 + return ucast_stmt;
95866 +}
95867 +
95868 +static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
95869 +{
95870 + tree type_max, type_min, rhs_type;
95871 + gimple ucast_stmt;
95872 +
95873 + if (!*potentionally_overflowed)
95874 + return;
95875 +
95876 + rhs_type = get_type_for_check(rhs);
95877 +
95878 + if (TYPE_UNSIGNED(rhs_type)) {
95879 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
95880 + type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
95881 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
95882 + } else {
95883 + type_max = cast_a_tree(signed_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
95884 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
95885 +
95886 + type_min = cast_a_tree(signed_size_overflow_type, TYPE_MIN_VALUE(rhs_type));
95887 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
95888 + }
95889 +}
95890 +
95891 +static tree change_assign_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt, tree orig_rhs)
95892 +{
95893 + gimple assign;
95894 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
95895 + tree new_rhs, origtype = TREE_TYPE(orig_rhs);
95896 +
95897 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
95898 +
95899 + new_rhs = expand(visited, potentionally_overflowed, orig_rhs);
95900 + if (new_rhs == NULL_TREE)
95901 + return NULL_TREE;
95902 +
95903 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
95904 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
95905 + update_stmt(assign);
95906 + return gimple_get_lhs(assign);
95907 +}
95908 +
95909 +static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
95910 +{
95911 + tree new_rhs, cast_rhs;
95912 +
95913 + if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
95914 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
95915 +
95916 + new_rhs = change_assign_rhs(visited, potentionally_overflowed, def_stmt, rhs);
95917 + if (new_rhs != NULL_TREE) {
95918 + gimple_assign_set_rhs(def_stmt, new_rhs);
95919 + update_stmt(def_stmt);
95920 +
95921 + cast_rhs = gimple_assign_rhs1(get_def_stmt(new_rhs));
95922 +
95923 + check_size_overflow(def_stmt, cast_rhs, rhs, potentionally_overflowed);
95924 + }
95925 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
95926 +}
95927 +
95928 +static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
95929 +{
95930 + tree rhs1, rhs2;
95931 + gimple def_stmt = get_def_stmt(var);
95932 + tree new_rhs1 = NULL_TREE;
95933 + tree new_rhs2 = NULL_TREE;
95934 +
95935 + rhs1 = gimple_assign_rhs1(def_stmt);
95936 + rhs2 = gimple_assign_rhs2(def_stmt);
95937 +
95938 + /* no DImode/TImode division in the 32/64 bit kernel */
95939 + switch (gimple_assign_rhs_code(def_stmt)) {
95940 + case RDIV_EXPR:
95941 + case TRUNC_DIV_EXPR:
95942 + case CEIL_DIV_EXPR:
95943 + case FLOOR_DIV_EXPR:
95944 + case ROUND_DIV_EXPR:
95945 + case TRUNC_MOD_EXPR:
95946 + case CEIL_MOD_EXPR:
95947 + case FLOOR_MOD_EXPR:
95948 + case ROUND_MOD_EXPR:
95949 + case EXACT_DIV_EXPR:
95950 + case POINTER_PLUS_EXPR:
95951 + case BIT_AND_EXPR:
95952 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
95953 + default:
95954 + break;
95955 + }
95956 +
95957 + *potentionally_overflowed = true;
95958 +
95959 + if (TREE_CODE(rhs1) == SSA_NAME)
95960 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
95961 + if (TREE_CODE(rhs2) == SSA_NAME)
95962 + new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
95963 +
95964 + if (is_gimple_constant(rhs2))
95965 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, signed_cast_constant(rhs2), &gimple_assign_set_rhs1);
95966 +
95967 + if (is_gimple_constant(rhs1))
95968 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, signed_cast_constant(rhs1), new_rhs2, &gimple_assign_set_rhs2);
95969 +
95970 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
95971 +}
95972 +
95973 +#if BUILDING_GCC_VERSION >= 4007
95974 +static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
95975 +{
95976 + if (is_gimple_constant(rhs))
95977 + return signed_cast_constant(rhs);
95978 + if (TREE_CODE(rhs) != SSA_NAME)
95979 + return NULL_TREE;
95980 + return expand(visited, potentionally_overflowed, rhs);
95981 +}
95982 +
95983 +static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
95984 +{
95985 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
95986 + gimple def_stmt = get_def_stmt(var);
95987 +
95988 + *potentionally_overflowed = true;
95989 +
95990 + rhs1 = gimple_assign_rhs1(def_stmt);
95991 + rhs2 = gimple_assign_rhs2(def_stmt);
95992 + rhs3 = gimple_assign_rhs3(def_stmt);
95993 + new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
95994 + new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
95995 + new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
95996 +
95997 + if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
95998 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
95999 + error("handle_ternary_ops: unknown rhs");
96000 + gcc_unreachable();
96001 +}
96002 +#endif
96003 +
96004 +static void set_size_overflow_type(tree node)
96005 +{
96006 + switch (TYPE_MODE(TREE_TYPE(node))) {
96007 + case SImode:
96008 + signed_size_overflow_type = intDI_type_node;
96009 + unsigned_size_overflow_type = unsigned_intDI_type_node;
96010 + break;
96011 + case DImode:
96012 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
96013 + signed_size_overflow_type = intDI_type_node;
96014 + unsigned_size_overflow_type = unsigned_intDI_type_node;
96015 + } else {
96016 + signed_size_overflow_type = intTI_type_node;
96017 + unsigned_size_overflow_type = unsigned_intTI_type_node;
96018 + }
96019 + break;
96020 + default:
96021 + error("set_size_overflow_type: unsupported gcc configuration.");
96022 + gcc_unreachable();
96023 + }
96024 +}
96025 +
96026 +static tree expand_visited(gimple def_stmt)
96027 +{
96028 + gimple tmp;
96029 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
96030 +
96031 + gsi_next(&gsi);
96032 + tmp = gsi_stmt(gsi);
96033 + switch (gimple_code(tmp)) {
96034 + case GIMPLE_ASSIGN:
96035 + return gimple_get_lhs(tmp);
96036 + case GIMPLE_PHI:
96037 + return gimple_phi_result(tmp);
96038 + case GIMPLE_CALL:
96039 + return gimple_call_lhs(tmp);
96040 + default:
96041 + return NULL_TREE;
96042 + }
96043 +}
96044 +
96045 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
96046 +{
96047 + gimple def_stmt;
96048 + enum tree_code code = TREE_CODE(TREE_TYPE(var));
96049 +
96050 + if (is_gimple_constant(var))
96051 + return NULL_TREE;
96052 +
96053 + if (TREE_CODE(var) == ADDR_EXPR)
96054 + return NULL_TREE;
96055 +
96056 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
96057 + if (code != INTEGER_TYPE)
96058 + return NULL_TREE;
96059 +
96060 + if (SSA_NAME_IS_DEFAULT_DEF(var)) {
96061 + check_missing_attribute(var);
96062 + return NULL_TREE;
96063 + }
96064 +
96065 + def_stmt = get_def_stmt(var);
96066 +
96067 + if (!def_stmt)
96068 + return NULL_TREE;
96069 +
96070 + if (pointer_set_contains(visited, def_stmt))
96071 + return expand_visited(def_stmt);
96072 +
96073 + switch (gimple_code(def_stmt)) {
96074 + case GIMPLE_NOP:
96075 + check_missing_attribute(var);
96076 + return NULL_TREE;
96077 + case GIMPLE_PHI:
96078 + return build_new_phi(visited, potentionally_overflowed, def_stmt);
96079 + case GIMPLE_CALL:
96080 + case GIMPLE_ASM:
96081 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
96082 + case GIMPLE_ASSIGN:
96083 + switch (gimple_num_ops(def_stmt)) {
96084 + case 2:
96085 + return handle_unary_ops(visited, potentionally_overflowed, var);
96086 + case 3:
96087 + return handle_binary_ops(visited, potentionally_overflowed, var);
96088 +#if BUILDING_GCC_VERSION >= 4007
96089 + case 4:
96090 + return handle_ternary_ops(visited, potentionally_overflowed, var);
96091 +#endif
96092 + }
96093 + default:
96094 + debug_gimple_stmt(def_stmt);
96095 + error("expand: unknown gimple code");
96096 + gcc_unreachable();
96097 + }
96098 +}
96099 +
96100 +static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
96101 +{
96102 + gimple assign;
96103 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
96104 + tree origtype = TREE_TYPE(origarg);
96105 +
96106 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
96107 +
96108 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
96109 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
96110 + update_stmt(assign);
96111 +
96112 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
96113 + update_stmt(stmt);
96114 +}
96115 +
96116 +static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
96117 +{
96118 + const char *origid;
96119 + tree arg, origarg;
96120 +
96121 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
96122 + gcc_assert(gimple_call_num_args(stmt) > argnum);
96123 + return gimple_call_arg(stmt, argnum);
96124 + }
96125 +
96126 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
96127 + while (origarg && argnum) {
96128 + argnum--;
96129 + origarg = TREE_CHAIN(origarg);
96130 + }
96131 +
96132 + gcc_assert(argnum == 0);
96133 +
96134 + gcc_assert(origarg != NULL_TREE);
96135 + origid = NAME(origarg);
96136 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
96137 + if (!strcmp(origid, NAME(arg)))
96138 + return arg;
96139 + }
96140 + return NULL_TREE;
96141 +}
96142 +
96143 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
96144 +{
96145 + struct pointer_set_t *visited;
96146 + tree arg, newarg, type_max;
96147 + gimple ucast_stmt;
96148 + bool potentionally_overflowed;
96149 +
96150 + arg = get_function_arg(argnum, stmt, fndecl);
96151 + if (arg == NULL_TREE)
96152 + return;
96153 +
96154 + if (is_gimple_constant(arg))
96155 + return;
96156 + if (TREE_CODE(arg) != SSA_NAME)
96157 + return;
96158 +
96159 + check_arg_type(arg);
96160 +
96161 + set_size_overflow_type(arg);
96162 +
96163 + visited = pointer_set_create();
96164 + potentionally_overflowed = false;
96165 + newarg = expand(visited, &potentionally_overflowed, arg);
96166 + pointer_set_destroy(visited);
96167 +
96168 + if (newarg == NULL_TREE || !potentionally_overflowed)
96169 + return;
96170 +
96171 + change_function_arg(stmt, arg, argnum, newarg);
96172 +
96173 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, newarg);
96174 +
96175 + type_max = build_int_cstu(unsigned_size_overflow_type, 0x7fffffff);
96176 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
96177 +}
96178 +
96179 +static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
96180 +{
96181 + tree p = TREE_VALUE(attr);
96182 + do {
96183 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
96184 + p = TREE_CHAIN(p);
96185 + } while (p);
96186 +}
96187 +
96188 +static void handle_function_by_hash(gimple stmt, tree fndecl)
96189 +{
96190 + tree orig_fndecl;
96191 + struct size_overflow_hash *hash;
96192 + const char *filename = DECL_SOURCE_FILE(fndecl);
96193 +
96194 + orig_fndecl = get_original_function_decl(fndecl);
96195 + hash = get_function_hash(orig_fndecl, filename);
96196 + if (!hash)
96197 + return;
96198 +
96199 +#define search_param(argnum) \
96200 + if (hash->param##argnum) \
96201 + handle_function_arg(stmt, fndecl, argnum - 1);
96202 +
96203 + search_param(1);
96204 + search_param(2);
96205 + search_param(3);
96206 + search_param(4);
96207 + search_param(5);
96208 + search_param(6);
96209 + search_param(7);
96210 + search_param(8);
96211 + search_param(9);
96212 +#undef search_param
96213 +}
96214 +
96215 +static unsigned int handle_function(void)
96216 +{
96217 + basic_block bb = ENTRY_BLOCK_PTR->next_bb;
96218 + int saved_last_basic_block = last_basic_block;
96219 +
96220 + do {
96221 + gimple_stmt_iterator gsi;
96222 + basic_block next = bb->next_bb;
96223 +
96224 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
96225 + tree fndecl, attr;
96226 + gimple stmt = gsi_stmt(gsi);
96227 +
96228 + if (!(is_gimple_call(stmt)))
96229 + continue;
96230 + fndecl = gimple_call_fndecl(stmt);
96231 + if (fndecl == NULL_TREE)
96232 + continue;
96233 + if (gimple_call_num_args(stmt) == 0)
96234 + continue;
96235 + attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
96236 + if (!attr || !TREE_VALUE(attr))
96237 + handle_function_by_hash(stmt, fndecl);
96238 + else
96239 + handle_function_by_attribute(stmt, attr, fndecl);
96240 + gsi = gsi_for_stmt(stmt);
96241 + }
96242 + bb = next;
96243 + } while (bb && bb->index <= saved_last_basic_block);
96244 + return 0;
96245 +}
96246 +
96247 +static struct gimple_opt_pass size_overflow_pass = {
96248 + .pass = {
96249 + .type = GIMPLE_PASS,
96250 + .name = "size_overflow",
96251 + .gate = NULL,
96252 + .execute = handle_function,
96253 + .sub = NULL,
96254 + .next = NULL,
96255 + .static_pass_number = 0,
96256 + .tv_id = TV_NONE,
96257 + .properties_required = PROP_cfg | PROP_referenced_vars,
96258 + .properties_provided = 0,
96259 + .properties_destroyed = 0,
96260 + .todo_flags_start = 0,
96261 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
96262 + }
96263 +};
96264 +
96265 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
96266 +{
96267 + tree fntype;
96268 +
96269 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
96270 +
96271 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
96272 + fntype = build_function_type_list(void_type_node,
96273 + const_char_ptr_type_node,
96274 + unsigned_type_node,
96275 + const_char_ptr_type_node,
96276 + NULL_TREE);
96277 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
96278 +
96279 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
96280 + TREE_PUBLIC(report_size_overflow_decl) = 1;
96281 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
96282 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
96283 +}
96284 +
96285 +extern struct gimple_opt_pass pass_dce;
96286 +
96287 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
96288 +{
96289 + int i;
96290 + const char * const plugin_name = plugin_info->base_name;
96291 + const int argc = plugin_info->argc;
96292 + const struct plugin_argument * const argv = plugin_info->argv;
96293 + bool enable = true;
96294 +
96295 + struct register_pass_info size_overflow_pass_info = {
96296 + .pass = &size_overflow_pass.pass,
96297 + .reference_pass_name = "ssa",
96298 + .ref_pass_instance_number = 1,
96299 + .pos_op = PASS_POS_INSERT_AFTER
96300 + };
96301 +
96302 + if (!plugin_default_version_check(version, &gcc_version)) {
96303 + error(G_("incompatible gcc/plugin versions"));
96304 + return 1;
96305 + }
96306 +
96307 + for (i = 0; i < argc; ++i) {
96308 + if (!strcmp(argv[i].key, "no-size-overflow")) {
96309 + enable = false;
96310 + continue;
96311 + } else if (!(strcmp(argv[i].key, "no-file-match"))) {
96312 + file_match = false;
96313 + continue;
96314 + }
96315 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
96316 + }
96317 +
96318 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
96319 + if (enable) {
96320 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
96321 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
96322 + }
96323 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
96324 +
96325 + return 0;
96326 +}
96327 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
96328 new file mode 100644
96329 index 0000000..b87ec9d
96330 --- /dev/null
96331 +++ b/tools/gcc/stackleak_plugin.c
96332 @@ -0,0 +1,313 @@
96333 +/*
96334 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
96335 + * Licensed under the GPL v2
96336 + *
96337 + * Note: the choice of the license means that the compilation process is
96338 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
96339 + * but for the kernel it doesn't matter since it doesn't link against
96340 + * any of the gcc libraries
96341 + *
96342 + * gcc plugin to help implement various PaX features
96343 + *
96344 + * - track lowest stack pointer
96345 + *
96346 + * TODO:
96347 + * - initialize all local variables
96348 + *
96349 + * BUGS:
96350 + * - none known
96351 + */
96352 +#include "gcc-plugin.h"
96353 +#include "config.h"
96354 +#include "system.h"
96355 +#include "coretypes.h"
96356 +#include "tree.h"
96357 +#include "tree-pass.h"
96358 +#include "flags.h"
96359 +#include "intl.h"
96360 +#include "toplev.h"
96361 +#include "plugin.h"
96362 +//#include "expr.h" where are you...
96363 +#include "diagnostic.h"
96364 +#include "plugin-version.h"
96365 +#include "tm.h"
96366 +#include "function.h"
96367 +#include "basic-block.h"
96368 +#include "gimple.h"
96369 +#include "rtl.h"
96370 +#include "emit-rtl.h"
96371 +
96372 +extern void print_gimple_stmt(FILE *, gimple, int, int);
96373 +
96374 +int plugin_is_GPL_compatible;
96375 +
96376 +static int track_frame_size = -1;
96377 +static const char track_function[] = "pax_track_stack";
96378 +static const char check_function[] = "pax_check_alloca";
96379 +static bool init_locals;
96380 +
96381 +static struct plugin_info stackleak_plugin_info = {
96382 + .version = "201203140940",
96383 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
96384 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
96385 +};
96386 +
96387 +static bool gate_stackleak_track_stack(void);
96388 +static unsigned int execute_stackleak_tree_instrument(void);
96389 +static unsigned int execute_stackleak_final(void);
96390 +
96391 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
96392 + .pass = {
96393 + .type = GIMPLE_PASS,
96394 + .name = "stackleak_tree_instrument",
96395 + .gate = gate_stackleak_track_stack,
96396 + .execute = execute_stackleak_tree_instrument,
96397 + .sub = NULL,
96398 + .next = NULL,
96399 + .static_pass_number = 0,
96400 + .tv_id = TV_NONE,
96401 + .properties_required = PROP_gimple_leh | PROP_cfg,
96402 + .properties_provided = 0,
96403 + .properties_destroyed = 0,
96404 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
96405 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
96406 + }
96407 +};
96408 +
96409 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
96410 + .pass = {
96411 + .type = RTL_PASS,
96412 + .name = "stackleak_final",
96413 + .gate = gate_stackleak_track_stack,
96414 + .execute = execute_stackleak_final,
96415 + .sub = NULL,
96416 + .next = NULL,
96417 + .static_pass_number = 0,
96418 + .tv_id = TV_NONE,
96419 + .properties_required = 0,
96420 + .properties_provided = 0,
96421 + .properties_destroyed = 0,
96422 + .todo_flags_start = 0,
96423 + .todo_flags_finish = TODO_dump_func
96424 + }
96425 +};
96426 +
96427 +static bool gate_stackleak_track_stack(void)
96428 +{
96429 + return track_frame_size >= 0;
96430 +}
96431 +
96432 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
96433 +{
96434 + gimple check_alloca;
96435 + tree fntype, fndecl, alloca_size;
96436 +
96437 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
96438 + fndecl = build_fn_decl(check_function, fntype);
96439 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
96440 +
96441 + // insert call to void pax_check_alloca(unsigned long size)
96442 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
96443 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
96444 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
96445 +}
96446 +
96447 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
96448 +{
96449 + gimple track_stack;
96450 + tree fntype, fndecl;
96451 +
96452 + fntype = build_function_type_list(void_type_node, NULL_TREE);
96453 + fndecl = build_fn_decl(track_function, fntype);
96454 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
96455 +
96456 + // insert call to void pax_track_stack(void)
96457 + track_stack = gimple_build_call(fndecl, 0);
96458 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
96459 +}
96460 +
96461 +#if BUILDING_GCC_VERSION == 4005
96462 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
96463 +{
96464 + tree fndecl;
96465 +
96466 + if (!is_gimple_call(stmt))
96467 + return false;
96468 + fndecl = gimple_call_fndecl(stmt);
96469 + if (!fndecl)
96470 + return false;
96471 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
96472 + return false;
96473 +// print_node(stderr, "pax", fndecl, 4);
96474 + return DECL_FUNCTION_CODE(fndecl) == code;
96475 +}
96476 +#endif
96477 +
96478 +static bool is_alloca(gimple stmt)
96479 +{
96480 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
96481 + return true;
96482 +
96483 +#if BUILDING_GCC_VERSION >= 4007
96484 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
96485 + return true;
96486 +#endif
96487 +
96488 + return false;
96489 +}
96490 +
96491 +static unsigned int execute_stackleak_tree_instrument(void)
96492 +{
96493 + basic_block bb, entry_bb;
96494 + bool prologue_instrumented = false, is_leaf = true;
96495 +
96496 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
96497 +
96498 + // 1. loop through BBs and GIMPLE statements
96499 + FOR_EACH_BB(bb) {
96500 + gimple_stmt_iterator gsi;
96501 +
96502 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
96503 + gimple stmt;
96504 +
96505 + stmt = gsi_stmt(gsi);
96506 +
96507 + if (is_gimple_call(stmt))
96508 + is_leaf = false;
96509 +
96510 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
96511 + if (!is_alloca(stmt))
96512 + continue;
96513 +
96514 + // 2. insert stack overflow check before each __builtin_alloca call
96515 + stackleak_check_alloca(&gsi);
96516 +
96517 + // 3. insert track call after each __builtin_alloca call
96518 + stackleak_add_instrumentation(&gsi);
96519 + if (bb == entry_bb)
96520 + prologue_instrumented = true;
96521 + }
96522 + }
96523 +
96524 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
96525 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
96526 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
96527 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
96528 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
96529 + return 0;
96530 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
96531 + return 0;
96532 +
96533 + // 4. insert track call at the beginning
96534 + if (!prologue_instrumented) {
96535 + gimple_stmt_iterator gsi;
96536 +
96537 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
96538 + if (dom_info_available_p(CDI_DOMINATORS))
96539 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
96540 + gsi = gsi_start_bb(bb);
96541 + stackleak_add_instrumentation(&gsi);
96542 + }
96543 +
96544 + return 0;
96545 +}
96546 +
96547 +static unsigned int execute_stackleak_final(void)
96548 +{
96549 + rtx insn;
96550 +
96551 + if (cfun->calls_alloca)
96552 + return 0;
96553 +
96554 + // keep calls only if function frame is big enough
96555 + if (get_frame_size() >= track_frame_size)
96556 + return 0;
96557 +
96558 + // 1. find pax_track_stack calls
96559 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
96560 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
96561 + rtx body;
96562 +
96563 + if (!CALL_P(insn))
96564 + continue;
96565 + body = PATTERN(insn);
96566 + if (GET_CODE(body) != CALL)
96567 + continue;
96568 + body = XEXP(body, 0);
96569 + if (GET_CODE(body) != MEM)
96570 + continue;
96571 + body = XEXP(body, 0);
96572 + if (GET_CODE(body) != SYMBOL_REF)
96573 + continue;
96574 + if (strcmp(XSTR(body, 0), track_function))
96575 + continue;
96576 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
96577 + // 2. delete call
96578 + insn = delete_insn_and_edges(insn);
96579 +#if BUILDING_GCC_VERSION >= 4007
96580 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
96581 + insn = delete_insn_and_edges(insn);
96582 +#endif
96583 + }
96584 +
96585 +// print_simple_rtl(stderr, get_insns());
96586 +// print_rtl(stderr, get_insns());
96587 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
96588 +
96589 + return 0;
96590 +}
96591 +
96592 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
96593 +{
96594 + const char * const plugin_name = plugin_info->base_name;
96595 + const int argc = plugin_info->argc;
96596 + const struct plugin_argument * const argv = plugin_info->argv;
96597 + int i;
96598 + struct register_pass_info stackleak_tree_instrument_pass_info = {
96599 + .pass = &stackleak_tree_instrument_pass.pass,
96600 +// .reference_pass_name = "tree_profile",
96601 + .reference_pass_name = "optimized",
96602 + .ref_pass_instance_number = 0,
96603 + .pos_op = PASS_POS_INSERT_BEFORE
96604 + };
96605 + struct register_pass_info stackleak_final_pass_info = {
96606 + .pass = &stackleak_final_rtl_opt_pass.pass,
96607 + .reference_pass_name = "final",
96608 + .ref_pass_instance_number = 0,
96609 + .pos_op = PASS_POS_INSERT_BEFORE
96610 + };
96611 +
96612 + if (!plugin_default_version_check(version, &gcc_version)) {
96613 + error(G_("incompatible gcc/plugin versions"));
96614 + return 1;
96615 + }
96616 +
96617 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
96618 +
96619 + for (i = 0; i < argc; ++i) {
96620 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
96621 + if (!argv[i].value) {
96622 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
96623 + continue;
96624 + }
96625 + track_frame_size = atoi(argv[i].value);
96626 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
96627 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
96628 + continue;
96629 + }
96630 + if (!strcmp(argv[i].key, "initialize-locals")) {
96631 + if (argv[i].value) {
96632 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
96633 + continue;
96634 + }
96635 + init_locals = true;
96636 + continue;
96637 + }
96638 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
96639 + }
96640 +
96641 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
96642 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
96643 +
96644 + return 0;
96645 +}
96646 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
96647 index 6789d78..4afd019 100644
96648 --- a/tools/perf/util/include/asm/alternative-asm.h
96649 +++ b/tools/perf/util/include/asm/alternative-asm.h
96650 @@ -5,4 +5,7 @@
96651
96652 #define altinstruction_entry #
96653
96654 + .macro pax_force_retaddr rip=0, reload=0
96655 + .endm
96656 +
96657 #endif
96658 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
96659 index af0f22f..9a7d479 100644
96660 --- a/usr/gen_init_cpio.c
96661 +++ b/usr/gen_init_cpio.c
96662 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
96663 int retval;
96664 int rc = -1;
96665 int namesize;
96666 - int i;
96667 + unsigned int i;
96668
96669 mode |= S_IFREG;
96670
96671 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
96672 *env_var = *expanded = '\0';
96673 strncat(env_var, start + 2, end - start - 2);
96674 strncat(expanded, new_location, start - new_location);
96675 - strncat(expanded, getenv(env_var), PATH_MAX);
96676 - strncat(expanded, end + 1, PATH_MAX);
96677 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
96678 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
96679 strncpy(new_location, expanded, PATH_MAX);
96680 + new_location[PATH_MAX] = 0;
96681 } else
96682 break;
96683 }
96684 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
96685 index 7858228..2919715 100644
96686 --- a/virt/kvm/kvm_main.c
96687 +++ b/virt/kvm/kvm_main.c
96688 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
96689
96690 static cpumask_var_t cpus_hardware_enabled;
96691 static int kvm_usage_count = 0;
96692 -static atomic_t hardware_enable_failed;
96693 +static atomic_unchecked_t hardware_enable_failed;
96694
96695 struct kmem_cache *kvm_vcpu_cache;
96696 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
96697 @@ -2318,7 +2318,7 @@ static void hardware_enable_nolock(void *junk)
96698
96699 if (r) {
96700 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
96701 - atomic_inc(&hardware_enable_failed);
96702 + atomic_inc_unchecked(&hardware_enable_failed);
96703 printk(KERN_INFO "kvm: enabling virtualization on "
96704 "CPU%d failed\n", cpu);
96705 }
96706 @@ -2372,10 +2372,10 @@ static int hardware_enable_all(void)
96707
96708 kvm_usage_count++;
96709 if (kvm_usage_count == 1) {
96710 - atomic_set(&hardware_enable_failed, 0);
96711 + atomic_set_unchecked(&hardware_enable_failed, 0);
96712 on_each_cpu(hardware_enable_nolock, NULL, 1);
96713
96714 - if (atomic_read(&hardware_enable_failed)) {
96715 + if (atomic_read_unchecked(&hardware_enable_failed)) {
96716 hardware_disable_all_nolock();
96717 r = -EBUSY;
96718 }
96719 @@ -2738,7 +2738,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
96720 kvm_arch_vcpu_put(vcpu);
96721 }
96722
96723 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
96724 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
96725 struct module *module)
96726 {
96727 int r;
96728 @@ -2801,7 +2801,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
96729 if (!vcpu_align)
96730 vcpu_align = __alignof__(struct kvm_vcpu);
96731 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
96732 - 0, NULL);
96733 + SLAB_USERCOPY, NULL);
96734 if (!kvm_vcpu_cache) {
96735 r = -ENOMEM;
96736 goto out_free_3;
96737 @@ -2811,9 +2811,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
96738 if (r)
96739 goto out_free;
96740
96741 - kvm_chardev_ops.owner = module;
96742 - kvm_vm_fops.owner = module;
96743 - kvm_vcpu_fops.owner = module;
96744 + pax_open_kernel();
96745 + *(void **)&kvm_chardev_ops.owner = module;
96746 + *(void **)&kvm_vm_fops.owner = module;
96747 + *(void **)&kvm_vcpu_fops.owner = module;
96748 + pax_close_kernel();
96749
96750 r = misc_register(&kvm_dev);
96751 if (r) {